1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe over Fabrics TCP target. 4 * Copyright (c) 2018 Lightbits Labs. All rights reserved. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/module.h> 8 #include <linux/init.h> 9 #include <linux/slab.h> 10 #include <linux/err.h> 11 #include <linux/nvme-tcp.h> 12 #include <net/sock.h> 13 #include <net/tcp.h> 14 #include <linux/inet.h> 15 #include <linux/llist.h> 16 #include <crypto/hash.h> 17 18 #include "nvmet.h" 19 20 #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE) 21 22 #define NVMET_TCP_RECV_BUDGET 8 23 #define NVMET_TCP_SEND_BUDGET 8 24 #define NVMET_TCP_IO_WORK_BUDGET 64 25 26 enum nvmet_tcp_send_state { 27 NVMET_TCP_SEND_DATA_PDU, 28 NVMET_TCP_SEND_DATA, 29 NVMET_TCP_SEND_R2T, 30 NVMET_TCP_SEND_DDGST, 31 NVMET_TCP_SEND_RESPONSE 32 }; 33 34 enum nvmet_tcp_recv_state { 35 NVMET_TCP_RECV_PDU, 36 NVMET_TCP_RECV_DATA, 37 NVMET_TCP_RECV_DDGST, 38 NVMET_TCP_RECV_ERR, 39 }; 40 41 enum { 42 NVMET_TCP_F_INIT_FAILED = (1 << 0), 43 }; 44 45 struct nvmet_tcp_cmd { 46 struct nvmet_tcp_queue *queue; 47 struct nvmet_req req; 48 49 struct nvme_tcp_cmd_pdu *cmd_pdu; 50 struct nvme_tcp_rsp_pdu *rsp_pdu; 51 struct nvme_tcp_data_pdu *data_pdu; 52 struct nvme_tcp_r2t_pdu *r2t_pdu; 53 54 u32 rbytes_done; 55 u32 wbytes_done; 56 57 u32 pdu_len; 58 u32 pdu_recv; 59 int sg_idx; 60 int nr_mapped; 61 struct msghdr recv_msg; 62 struct kvec *iov; 63 u32 flags; 64 65 struct list_head entry; 66 struct llist_node lentry; 67 68 /* send state */ 69 u32 offset; 70 struct scatterlist *cur_sg; 71 enum nvmet_tcp_send_state state; 72 73 __le32 exp_ddgst; 74 __le32 recv_ddgst; 75 }; 76 77 enum nvmet_tcp_queue_state { 78 NVMET_TCP_Q_CONNECTING, 79 NVMET_TCP_Q_LIVE, 80 NVMET_TCP_Q_DISCONNECTING, 81 }; 82 83 struct nvmet_tcp_queue { 84 struct socket *sock; 85 struct nvmet_tcp_port *port; 86 struct work_struct io_work; 87 int cpu; 88 struct nvmet_cq nvme_cq; 89 struct nvmet_sq nvme_sq; 90 91 /* send state */ 92 struct nvmet_tcp_cmd *cmds; 93 unsigned int nr_cmds; 94 struct list_head free_list; 95 struct llist_head resp_list; 96 struct list_head resp_send_list; 97 int send_list_len; 98 struct nvmet_tcp_cmd *snd_cmd; 99 100 /* recv state */ 101 int offset; 102 int left; 103 enum nvmet_tcp_recv_state rcv_state; 104 struct nvmet_tcp_cmd *cmd; 105 union nvme_tcp_pdu pdu; 106 107 /* digest state */ 108 bool hdr_digest; 109 bool data_digest; 110 struct ahash_request *snd_hash; 111 struct ahash_request *rcv_hash; 112 113 spinlock_t state_lock; 114 enum nvmet_tcp_queue_state state; 115 116 struct sockaddr_storage sockaddr; 117 struct sockaddr_storage sockaddr_peer; 118 struct work_struct release_work; 119 120 int idx; 121 struct list_head queue_list; 122 123 struct nvmet_tcp_cmd connect; 124 125 struct page_frag_cache pf_cache; 126 127 void (*data_ready)(struct sock *); 128 void (*state_change)(struct sock *); 129 void (*write_space)(struct sock *); 130 }; 131 132 struct nvmet_tcp_port { 133 struct socket *sock; 134 struct work_struct accept_work; 135 struct nvmet_port *nport; 136 struct sockaddr_storage addr; 137 int last_cpu; 138 void (*data_ready)(struct sock *); 139 }; 140 141 static DEFINE_IDA(nvmet_tcp_queue_ida); 142 static LIST_HEAD(nvmet_tcp_queue_list); 143 static DEFINE_MUTEX(nvmet_tcp_queue_mutex); 144 145 static struct workqueue_struct *nvmet_tcp_wq; 146 static struct nvmet_fabrics_ops nvmet_tcp_ops; 147 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c); 148 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd); 149 150 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, 151 struct nvmet_tcp_cmd *cmd) 152 { 153 return cmd - queue->cmds; 154 } 155 156 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd) 157 { 158 return nvme_is_write(cmd->req.cmd) && 159 cmd->rbytes_done < cmd->req.transfer_len; 160 } 161 162 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd) 163 { 164 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status; 165 } 166 167 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd) 168 { 169 return !nvme_is_write(cmd->req.cmd) && 170 cmd->req.transfer_len > 0 && 171 !cmd->req.cqe->status; 172 } 173 174 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd) 175 { 176 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len && 177 !cmd->rbytes_done; 178 } 179 180 static inline struct nvmet_tcp_cmd * 181 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) 182 { 183 struct nvmet_tcp_cmd *cmd; 184 185 cmd = list_first_entry_or_null(&queue->free_list, 186 struct nvmet_tcp_cmd, entry); 187 if (!cmd) 188 return NULL; 189 list_del_init(&cmd->entry); 190 191 cmd->rbytes_done = cmd->wbytes_done = 0; 192 cmd->pdu_len = 0; 193 cmd->pdu_recv = 0; 194 cmd->iov = NULL; 195 cmd->flags = 0; 196 return cmd; 197 } 198 199 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd) 200 { 201 if (unlikely(cmd == &cmd->queue->connect)) 202 return; 203 204 list_add_tail(&cmd->entry, &cmd->queue->free_list); 205 } 206 207 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue) 208 { 209 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; 210 } 211 212 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue) 213 { 214 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; 215 } 216 217 static inline void nvmet_tcp_hdgst(struct ahash_request *hash, 218 void *pdu, size_t len) 219 { 220 struct scatterlist sg; 221 222 sg_init_one(&sg, pdu, len); 223 ahash_request_set_crypt(hash, &sg, pdu + len, len); 224 crypto_ahash_digest(hash); 225 } 226 227 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue, 228 void *pdu, size_t len) 229 { 230 struct nvme_tcp_hdr *hdr = pdu; 231 __le32 recv_digest; 232 __le32 exp_digest; 233 234 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) { 235 pr_err("queue %d: header digest enabled but no header digest\n", 236 queue->idx); 237 return -EPROTO; 238 } 239 240 recv_digest = *(__le32 *)(pdu + hdr->hlen); 241 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len); 242 exp_digest = *(__le32 *)(pdu + hdr->hlen); 243 if (recv_digest != exp_digest) { 244 pr_err("queue %d: header digest error: recv %#x expected %#x\n", 245 queue->idx, le32_to_cpu(recv_digest), 246 le32_to_cpu(exp_digest)); 247 return -EPROTO; 248 } 249 250 return 0; 251 } 252 253 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu) 254 { 255 struct nvme_tcp_hdr *hdr = pdu; 256 u8 digest_len = nvmet_tcp_hdgst_len(queue); 257 u32 len; 258 259 len = le32_to_cpu(hdr->plen) - hdr->hlen - 260 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0); 261 262 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { 263 pr_err("queue %d: data digest flag is cleared\n", queue->idx); 264 return -EPROTO; 265 } 266 267 return 0; 268 } 269 270 static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd) 271 { 272 struct scatterlist *sg; 273 int i; 274 275 sg = &cmd->req.sg[cmd->sg_idx]; 276 277 for (i = 0; i < cmd->nr_mapped; i++) 278 kunmap(sg_page(&sg[i])); 279 } 280 281 static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) 282 { 283 struct kvec *iov = cmd->iov; 284 struct scatterlist *sg; 285 u32 length, offset, sg_offset; 286 287 length = cmd->pdu_len; 288 cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE); 289 offset = cmd->rbytes_done; 290 cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE); 291 sg_offset = offset % PAGE_SIZE; 292 sg = &cmd->req.sg[cmd->sg_idx]; 293 294 while (length) { 295 u32 iov_len = min_t(u32, length, sg->length - sg_offset); 296 297 iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset; 298 iov->iov_len = iov_len; 299 300 length -= iov_len; 301 sg = sg_next(sg); 302 iov++; 303 } 304 305 iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov, 306 cmd->nr_mapped, cmd->pdu_len); 307 } 308 309 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue) 310 { 311 queue->rcv_state = NVMET_TCP_RECV_ERR; 312 if (queue->nvme_sq.ctrl) 313 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); 314 else 315 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 316 } 317 318 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) 319 { 320 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl; 321 u32 len = le32_to_cpu(sgl->length); 322 323 if (!cmd->req.data_len) 324 return 0; 325 326 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) | 327 NVME_SGL_FMT_OFFSET)) { 328 if (!nvme_is_write(cmd->req.cmd)) 329 return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 330 331 if (len > cmd->req.port->inline_data_size) 332 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; 333 cmd->pdu_len = len; 334 } 335 cmd->req.transfer_len += len; 336 337 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt); 338 if (!cmd->req.sg) 339 return NVME_SC_INTERNAL; 340 cmd->cur_sg = cmd->req.sg; 341 342 if (nvmet_tcp_has_data_in(cmd)) { 343 cmd->iov = kmalloc_array(cmd->req.sg_cnt, 344 sizeof(*cmd->iov), GFP_KERNEL); 345 if (!cmd->iov) 346 goto err; 347 } 348 349 return 0; 350 err: 351 sgl_free(cmd->req.sg); 352 return NVME_SC_INTERNAL; 353 } 354 355 static void nvmet_tcp_ddgst(struct ahash_request *hash, 356 struct nvmet_tcp_cmd *cmd) 357 { 358 ahash_request_set_crypt(hash, cmd->req.sg, 359 (void *)&cmd->exp_ddgst, cmd->req.transfer_len); 360 crypto_ahash_digest(hash); 361 } 362 363 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) 364 { 365 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu; 366 struct nvmet_tcp_queue *queue = cmd->queue; 367 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 368 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue); 369 370 cmd->offset = 0; 371 cmd->state = NVMET_TCP_SEND_DATA_PDU; 372 373 pdu->hdr.type = nvme_tcp_c2h_data; 374 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ? 375 NVME_TCP_F_DATA_SUCCESS : 0); 376 pdu->hdr.hlen = sizeof(*pdu); 377 pdu->hdr.pdo = pdu->hdr.hlen + hdgst; 378 pdu->hdr.plen = 379 cpu_to_le32(pdu->hdr.hlen + hdgst + 380 cmd->req.transfer_len + ddgst); 381 pdu->command_id = cmd->req.cqe->command_id; 382 pdu->data_length = cpu_to_le32(cmd->req.transfer_len); 383 pdu->data_offset = cpu_to_le32(cmd->wbytes_done); 384 385 if (queue->data_digest) { 386 pdu->hdr.flags |= NVME_TCP_F_DDGST; 387 nvmet_tcp_ddgst(queue->snd_hash, cmd); 388 } 389 390 if (cmd->queue->hdr_digest) { 391 pdu->hdr.flags |= NVME_TCP_F_HDGST; 392 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); 393 } 394 } 395 396 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd) 397 { 398 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu; 399 struct nvmet_tcp_queue *queue = cmd->queue; 400 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 401 402 cmd->offset = 0; 403 cmd->state = NVMET_TCP_SEND_R2T; 404 405 pdu->hdr.type = nvme_tcp_r2t; 406 pdu->hdr.flags = 0; 407 pdu->hdr.hlen = sizeof(*pdu); 408 pdu->hdr.pdo = 0; 409 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); 410 411 pdu->command_id = cmd->req.cmd->common.command_id; 412 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd); 413 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done); 414 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done); 415 if (cmd->queue->hdr_digest) { 416 pdu->hdr.flags |= NVME_TCP_F_HDGST; 417 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); 418 } 419 } 420 421 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd) 422 { 423 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu; 424 struct nvmet_tcp_queue *queue = cmd->queue; 425 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 426 427 cmd->offset = 0; 428 cmd->state = NVMET_TCP_SEND_RESPONSE; 429 430 pdu->hdr.type = nvme_tcp_rsp; 431 pdu->hdr.flags = 0; 432 pdu->hdr.hlen = sizeof(*pdu); 433 pdu->hdr.pdo = 0; 434 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); 435 if (cmd->queue->hdr_digest) { 436 pdu->hdr.flags |= NVME_TCP_F_HDGST; 437 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); 438 } 439 } 440 441 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue) 442 { 443 struct llist_node *node; 444 445 node = llist_del_all(&queue->resp_list); 446 if (!node) 447 return; 448 449 while (node) { 450 struct nvmet_tcp_cmd *cmd = llist_entry(node, 451 struct nvmet_tcp_cmd, lentry); 452 453 list_add(&cmd->entry, &queue->resp_send_list); 454 node = node->next; 455 queue->send_list_len++; 456 } 457 } 458 459 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue) 460 { 461 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list, 462 struct nvmet_tcp_cmd, entry); 463 if (!queue->snd_cmd) { 464 nvmet_tcp_process_resp_list(queue); 465 queue->snd_cmd = 466 list_first_entry_or_null(&queue->resp_send_list, 467 struct nvmet_tcp_cmd, entry); 468 if (unlikely(!queue->snd_cmd)) 469 return NULL; 470 } 471 472 list_del_init(&queue->snd_cmd->entry); 473 queue->send_list_len--; 474 475 if (nvmet_tcp_need_data_out(queue->snd_cmd)) 476 nvmet_setup_c2h_data_pdu(queue->snd_cmd); 477 else if (nvmet_tcp_need_data_in(queue->snd_cmd)) 478 nvmet_setup_r2t_pdu(queue->snd_cmd); 479 else 480 nvmet_setup_response_pdu(queue->snd_cmd); 481 482 return queue->snd_cmd; 483 } 484 485 static void nvmet_tcp_queue_response(struct nvmet_req *req) 486 { 487 struct nvmet_tcp_cmd *cmd = 488 container_of(req, struct nvmet_tcp_cmd, req); 489 struct nvmet_tcp_queue *queue = cmd->queue; 490 491 llist_add(&cmd->lentry, &queue->resp_list); 492 queue_work_on(cmd->queue->cpu, nvmet_tcp_wq, &cmd->queue->io_work); 493 } 494 495 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd) 496 { 497 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 498 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst; 499 int ret; 500 501 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu), 502 offset_in_page(cmd->data_pdu) + cmd->offset, 503 left, MSG_DONTWAIT | MSG_MORE); 504 if (ret <= 0) 505 return ret; 506 507 cmd->offset += ret; 508 left -= ret; 509 510 if (left) 511 return -EAGAIN; 512 513 cmd->state = NVMET_TCP_SEND_DATA; 514 cmd->offset = 0; 515 return 1; 516 } 517 518 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd) 519 { 520 struct nvmet_tcp_queue *queue = cmd->queue; 521 int ret; 522 523 while (cmd->cur_sg) { 524 struct page *page = sg_page(cmd->cur_sg); 525 u32 left = cmd->cur_sg->length - cmd->offset; 526 527 ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset, 528 left, MSG_DONTWAIT | MSG_MORE); 529 if (ret <= 0) 530 return ret; 531 532 cmd->offset += ret; 533 cmd->wbytes_done += ret; 534 535 /* Done with sg?*/ 536 if (cmd->offset == cmd->cur_sg->length) { 537 cmd->cur_sg = sg_next(cmd->cur_sg); 538 cmd->offset = 0; 539 } 540 } 541 542 if (queue->data_digest) { 543 cmd->state = NVMET_TCP_SEND_DDGST; 544 cmd->offset = 0; 545 } else { 546 if (queue->nvme_sq.sqhd_disabled) { 547 cmd->queue->snd_cmd = NULL; 548 nvmet_tcp_put_cmd(cmd); 549 } else { 550 nvmet_setup_response_pdu(cmd); 551 } 552 } 553 554 if (queue->nvme_sq.sqhd_disabled) { 555 kfree(cmd->iov); 556 sgl_free(cmd->req.sg); 557 } 558 559 return 1; 560 561 } 562 563 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd, 564 bool last_in_batch) 565 { 566 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 567 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst; 568 int flags = MSG_DONTWAIT; 569 int ret; 570 571 if (!last_in_batch && cmd->queue->send_list_len) 572 flags |= MSG_MORE; 573 else 574 flags |= MSG_EOR; 575 576 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu), 577 offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags); 578 if (ret <= 0) 579 return ret; 580 cmd->offset += ret; 581 left -= ret; 582 583 if (left) 584 return -EAGAIN; 585 586 kfree(cmd->iov); 587 sgl_free(cmd->req.sg); 588 cmd->queue->snd_cmd = NULL; 589 nvmet_tcp_put_cmd(cmd); 590 return 1; 591 } 592 593 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch) 594 { 595 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 596 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst; 597 int flags = MSG_DONTWAIT; 598 int ret; 599 600 if (!last_in_batch && cmd->queue->send_list_len) 601 flags |= MSG_MORE; 602 else 603 flags |= MSG_EOR; 604 605 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu), 606 offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags); 607 if (ret <= 0) 608 return ret; 609 cmd->offset += ret; 610 left -= ret; 611 612 if (left) 613 return -EAGAIN; 614 615 cmd->queue->snd_cmd = NULL; 616 return 1; 617 } 618 619 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd) 620 { 621 struct nvmet_tcp_queue *queue = cmd->queue; 622 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 623 struct kvec iov = { 624 .iov_base = &cmd->exp_ddgst + cmd->offset, 625 .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset 626 }; 627 int ret; 628 629 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); 630 if (unlikely(ret <= 0)) 631 return ret; 632 633 cmd->offset += ret; 634 635 if (queue->nvme_sq.sqhd_disabled) { 636 cmd->queue->snd_cmd = NULL; 637 nvmet_tcp_put_cmd(cmd); 638 } else { 639 nvmet_setup_response_pdu(cmd); 640 } 641 return 1; 642 } 643 644 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue, 645 bool last_in_batch) 646 { 647 struct nvmet_tcp_cmd *cmd = queue->snd_cmd; 648 int ret = 0; 649 650 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) { 651 cmd = nvmet_tcp_fetch_cmd(queue); 652 if (unlikely(!cmd)) 653 return 0; 654 } 655 656 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) { 657 ret = nvmet_try_send_data_pdu(cmd); 658 if (ret <= 0) 659 goto done_send; 660 } 661 662 if (cmd->state == NVMET_TCP_SEND_DATA) { 663 ret = nvmet_try_send_data(cmd); 664 if (ret <= 0) 665 goto done_send; 666 } 667 668 if (cmd->state == NVMET_TCP_SEND_DDGST) { 669 ret = nvmet_try_send_ddgst(cmd); 670 if (ret <= 0) 671 goto done_send; 672 } 673 674 if (cmd->state == NVMET_TCP_SEND_R2T) { 675 ret = nvmet_try_send_r2t(cmd, last_in_batch); 676 if (ret <= 0) 677 goto done_send; 678 } 679 680 if (cmd->state == NVMET_TCP_SEND_RESPONSE) 681 ret = nvmet_try_send_response(cmd, last_in_batch); 682 683 done_send: 684 if (ret < 0) { 685 if (ret == -EAGAIN) 686 return 0; 687 return ret; 688 } 689 690 return 1; 691 } 692 693 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue, 694 int budget, int *sends) 695 { 696 int i, ret = 0; 697 698 for (i = 0; i < budget; i++) { 699 ret = nvmet_tcp_try_send_one(queue, i == budget - 1); 700 if (ret <= 0) 701 break; 702 (*sends)++; 703 } 704 705 return ret; 706 } 707 708 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue) 709 { 710 queue->offset = 0; 711 queue->left = sizeof(struct nvme_tcp_hdr); 712 queue->cmd = NULL; 713 queue->rcv_state = NVMET_TCP_RECV_PDU; 714 } 715 716 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue) 717 { 718 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); 719 720 ahash_request_free(queue->rcv_hash); 721 ahash_request_free(queue->snd_hash); 722 crypto_free_ahash(tfm); 723 } 724 725 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue) 726 { 727 struct crypto_ahash *tfm; 728 729 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); 730 if (IS_ERR(tfm)) 731 return PTR_ERR(tfm); 732 733 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); 734 if (!queue->snd_hash) 735 goto free_tfm; 736 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); 737 738 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); 739 if (!queue->rcv_hash) 740 goto free_snd_hash; 741 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); 742 743 return 0; 744 free_snd_hash: 745 ahash_request_free(queue->snd_hash); 746 free_tfm: 747 crypto_free_ahash(tfm); 748 return -ENOMEM; 749 } 750 751 752 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) 753 { 754 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq; 755 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp; 756 struct msghdr msg = {}; 757 struct kvec iov; 758 int ret; 759 760 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) { 761 pr_err("bad nvme-tcp pdu length (%d)\n", 762 le32_to_cpu(icreq->hdr.plen)); 763 nvmet_tcp_fatal_error(queue); 764 } 765 766 if (icreq->pfv != NVME_TCP_PFV_1_0) { 767 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv); 768 return -EPROTO; 769 } 770 771 if (icreq->hpda != 0) { 772 pr_err("queue %d: unsupported hpda %d\n", queue->idx, 773 icreq->hpda); 774 return -EPROTO; 775 } 776 777 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE); 778 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE); 779 if (queue->hdr_digest || queue->data_digest) { 780 ret = nvmet_tcp_alloc_crypto(queue); 781 if (ret) 782 return ret; 783 } 784 785 memset(icresp, 0, sizeof(*icresp)); 786 icresp->hdr.type = nvme_tcp_icresp; 787 icresp->hdr.hlen = sizeof(*icresp); 788 icresp->hdr.pdo = 0; 789 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen); 790 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); 791 icresp->maxdata = cpu_to_le32(0xffff); /* FIXME: support r2t */ 792 icresp->cpda = 0; 793 if (queue->hdr_digest) 794 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE; 795 if (queue->data_digest) 796 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE; 797 798 iov.iov_base = icresp; 799 iov.iov_len = sizeof(*icresp); 800 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); 801 if (ret < 0) 802 goto free_crypto; 803 804 queue->state = NVMET_TCP_Q_LIVE; 805 nvmet_prepare_receive_pdu(queue); 806 return 0; 807 free_crypto: 808 if (queue->hdr_digest || queue->data_digest) 809 nvmet_tcp_free_crypto(queue); 810 return ret; 811 } 812 813 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, 814 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) 815 { 816 int ret; 817 818 /* recover the expected data transfer length */ 819 req->data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); 820 821 if (!nvme_is_write(cmd->req.cmd) || 822 req->data_len > cmd->req.port->inline_data_size) { 823 nvmet_prepare_receive_pdu(queue); 824 return; 825 } 826 827 ret = nvmet_tcp_map_data(cmd); 828 if (unlikely(ret)) { 829 pr_err("queue %d: failed to map data\n", queue->idx); 830 nvmet_tcp_fatal_error(queue); 831 return; 832 } 833 834 queue->rcv_state = NVMET_TCP_RECV_DATA; 835 nvmet_tcp_map_pdu_iovec(cmd); 836 cmd->flags |= NVMET_TCP_F_INIT_FAILED; 837 } 838 839 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) 840 { 841 struct nvme_tcp_data_pdu *data = &queue->pdu.data; 842 struct nvmet_tcp_cmd *cmd; 843 844 cmd = &queue->cmds[data->ttag]; 845 846 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { 847 pr_err("ttag %u unexpected data offset %u (expected %u)\n", 848 data->ttag, le32_to_cpu(data->data_offset), 849 cmd->rbytes_done); 850 /* FIXME: use path and transport errors */ 851 nvmet_req_complete(&cmd->req, 852 NVME_SC_INVALID_FIELD | NVME_SC_DNR); 853 return -EPROTO; 854 } 855 856 cmd->pdu_len = le32_to_cpu(data->data_length); 857 cmd->pdu_recv = 0; 858 nvmet_tcp_map_pdu_iovec(cmd); 859 queue->cmd = cmd; 860 queue->rcv_state = NVMET_TCP_RECV_DATA; 861 862 return 0; 863 } 864 865 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) 866 { 867 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; 868 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd; 869 struct nvmet_req *req; 870 int ret; 871 872 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { 873 if (hdr->type != nvme_tcp_icreq) { 874 pr_err("unexpected pdu type (%d) before icreq\n", 875 hdr->type); 876 nvmet_tcp_fatal_error(queue); 877 return -EPROTO; 878 } 879 return nvmet_tcp_handle_icreq(queue); 880 } 881 882 if (hdr->type == nvme_tcp_h2c_data) { 883 ret = nvmet_tcp_handle_h2c_data_pdu(queue); 884 if (unlikely(ret)) 885 return ret; 886 return 0; 887 } 888 889 queue->cmd = nvmet_tcp_get_cmd(queue); 890 if (unlikely(!queue->cmd)) { 891 /* This should never happen */ 892 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d", 893 queue->idx, queue->nr_cmds, queue->send_list_len, 894 nvme_cmd->common.opcode); 895 nvmet_tcp_fatal_error(queue); 896 return -ENOMEM; 897 } 898 899 req = &queue->cmd->req; 900 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd)); 901 902 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq, 903 &queue->nvme_sq, &nvmet_tcp_ops))) { 904 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n", 905 req->cmd, req->cmd->common.command_id, 906 req->cmd->common.opcode, 907 le32_to_cpu(req->cmd->common.dptr.sgl.length)); 908 909 nvmet_tcp_handle_req_failure(queue, queue->cmd, req); 910 return -EAGAIN; 911 } 912 913 ret = nvmet_tcp_map_data(queue->cmd); 914 if (unlikely(ret)) { 915 pr_err("queue %d: failed to map data\n", queue->idx); 916 if (nvmet_tcp_has_inline_data(queue->cmd)) 917 nvmet_tcp_fatal_error(queue); 918 else 919 nvmet_req_complete(req, ret); 920 ret = -EAGAIN; 921 goto out; 922 } 923 924 if (nvmet_tcp_need_data_in(queue->cmd)) { 925 if (nvmet_tcp_has_inline_data(queue->cmd)) { 926 queue->rcv_state = NVMET_TCP_RECV_DATA; 927 nvmet_tcp_map_pdu_iovec(queue->cmd); 928 return 0; 929 } 930 /* send back R2T */ 931 nvmet_tcp_queue_response(&queue->cmd->req); 932 goto out; 933 } 934 935 nvmet_req_execute(&queue->cmd->req); 936 out: 937 nvmet_prepare_receive_pdu(queue); 938 return ret; 939 } 940 941 static const u8 nvme_tcp_pdu_sizes[] = { 942 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu), 943 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu), 944 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu), 945 }; 946 947 static inline u8 nvmet_tcp_pdu_size(u8 type) 948 { 949 size_t idx = type; 950 951 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) && 952 nvme_tcp_pdu_sizes[idx]) ? 953 nvme_tcp_pdu_sizes[idx] : 0; 954 } 955 956 static inline bool nvmet_tcp_pdu_valid(u8 type) 957 { 958 switch (type) { 959 case nvme_tcp_icreq: 960 case nvme_tcp_cmd: 961 case nvme_tcp_h2c_data: 962 /* fallthru */ 963 return true; 964 } 965 966 return false; 967 } 968 969 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue) 970 { 971 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; 972 int len; 973 struct kvec iov; 974 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 975 976 recv: 977 iov.iov_base = (void *)&queue->pdu + queue->offset; 978 iov.iov_len = queue->left; 979 len = kernel_recvmsg(queue->sock, &msg, &iov, 1, 980 iov.iov_len, msg.msg_flags); 981 if (unlikely(len < 0)) 982 return len; 983 984 queue->offset += len; 985 queue->left -= len; 986 if (queue->left) 987 return -EAGAIN; 988 989 if (queue->offset == sizeof(struct nvme_tcp_hdr)) { 990 u8 hdgst = nvmet_tcp_hdgst_len(queue); 991 992 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) { 993 pr_err("unexpected pdu type %d\n", hdr->type); 994 nvmet_tcp_fatal_error(queue); 995 return -EIO; 996 } 997 998 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) { 999 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen); 1000 return -EIO; 1001 } 1002 1003 queue->left = hdr->hlen - queue->offset + hdgst; 1004 goto recv; 1005 } 1006 1007 if (queue->hdr_digest && 1008 nvmet_tcp_verify_hdgst(queue, &queue->pdu, queue->offset)) { 1009 nvmet_tcp_fatal_error(queue); /* fatal */ 1010 return -EPROTO; 1011 } 1012 1013 if (queue->data_digest && 1014 nvmet_tcp_check_ddgst(queue, &queue->pdu)) { 1015 nvmet_tcp_fatal_error(queue); /* fatal */ 1016 return -EPROTO; 1017 } 1018 1019 return nvmet_tcp_done_recv_pdu(queue); 1020 } 1021 1022 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd) 1023 { 1024 struct nvmet_tcp_queue *queue = cmd->queue; 1025 1026 nvmet_tcp_ddgst(queue->rcv_hash, cmd); 1027 queue->offset = 0; 1028 queue->left = NVME_TCP_DIGEST_LENGTH; 1029 queue->rcv_state = NVMET_TCP_RECV_DDGST; 1030 } 1031 1032 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) 1033 { 1034 struct nvmet_tcp_cmd *cmd = queue->cmd; 1035 int ret; 1036 1037 while (msg_data_left(&cmd->recv_msg)) { 1038 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg, 1039 cmd->recv_msg.msg_flags); 1040 if (ret <= 0) 1041 return ret; 1042 1043 cmd->pdu_recv += ret; 1044 cmd->rbytes_done += ret; 1045 } 1046 1047 nvmet_tcp_unmap_pdu_iovec(cmd); 1048 1049 if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) && 1050 cmd->rbytes_done == cmd->req.transfer_len) { 1051 if (queue->data_digest) { 1052 nvmet_tcp_prep_recv_ddgst(cmd); 1053 return 0; 1054 } 1055 nvmet_req_execute(&cmd->req); 1056 } 1057 1058 nvmet_prepare_receive_pdu(queue); 1059 return 0; 1060 } 1061 1062 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) 1063 { 1064 struct nvmet_tcp_cmd *cmd = queue->cmd; 1065 int ret; 1066 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 1067 struct kvec iov = { 1068 .iov_base = (void *)&cmd->recv_ddgst + queue->offset, 1069 .iov_len = queue->left 1070 }; 1071 1072 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, 1073 iov.iov_len, msg.msg_flags); 1074 if (unlikely(ret < 0)) 1075 return ret; 1076 1077 queue->offset += ret; 1078 queue->left -= ret; 1079 if (queue->left) 1080 return -EAGAIN; 1081 1082 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) { 1083 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n", 1084 queue->idx, cmd->req.cmd->common.command_id, 1085 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst), 1086 le32_to_cpu(cmd->exp_ddgst)); 1087 nvmet_tcp_finish_cmd(cmd); 1088 nvmet_tcp_fatal_error(queue); 1089 ret = -EPROTO; 1090 goto out; 1091 } 1092 1093 if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) && 1094 cmd->rbytes_done == cmd->req.transfer_len) 1095 nvmet_req_execute(&cmd->req); 1096 ret = 0; 1097 out: 1098 nvmet_prepare_receive_pdu(queue); 1099 return ret; 1100 } 1101 1102 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) 1103 { 1104 int result = 0; 1105 1106 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) 1107 return 0; 1108 1109 if (queue->rcv_state == NVMET_TCP_RECV_PDU) { 1110 result = nvmet_tcp_try_recv_pdu(queue); 1111 if (result != 0) 1112 goto done_recv; 1113 } 1114 1115 if (queue->rcv_state == NVMET_TCP_RECV_DATA) { 1116 result = nvmet_tcp_try_recv_data(queue); 1117 if (result != 0) 1118 goto done_recv; 1119 } 1120 1121 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) { 1122 result = nvmet_tcp_try_recv_ddgst(queue); 1123 if (result != 0) 1124 goto done_recv; 1125 } 1126 1127 done_recv: 1128 if (result < 0) { 1129 if (result == -EAGAIN) 1130 return 0; 1131 return result; 1132 } 1133 return 1; 1134 } 1135 1136 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue, 1137 int budget, int *recvs) 1138 { 1139 int i, ret = 0; 1140 1141 for (i = 0; i < budget; i++) { 1142 ret = nvmet_tcp_try_recv_one(queue); 1143 if (ret <= 0) 1144 break; 1145 (*recvs)++; 1146 } 1147 1148 return ret; 1149 } 1150 1151 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue) 1152 { 1153 spin_lock(&queue->state_lock); 1154 if (queue->state != NVMET_TCP_Q_DISCONNECTING) { 1155 queue->state = NVMET_TCP_Q_DISCONNECTING; 1156 schedule_work(&queue->release_work); 1157 } 1158 spin_unlock(&queue->state_lock); 1159 } 1160 1161 static void nvmet_tcp_io_work(struct work_struct *w) 1162 { 1163 struct nvmet_tcp_queue *queue = 1164 container_of(w, struct nvmet_tcp_queue, io_work); 1165 bool pending; 1166 int ret, ops = 0; 1167 1168 do { 1169 pending = false; 1170 1171 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops); 1172 if (ret > 0) { 1173 pending = true; 1174 } else if (ret < 0) { 1175 if (ret == -EPIPE || ret == -ECONNRESET) 1176 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 1177 else 1178 nvmet_tcp_fatal_error(queue); 1179 return; 1180 } 1181 1182 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops); 1183 if (ret > 0) { 1184 /* transmitted message/data */ 1185 pending = true; 1186 } else if (ret < 0) { 1187 if (ret == -EPIPE || ret == -ECONNRESET) 1188 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 1189 else 1190 nvmet_tcp_fatal_error(queue); 1191 return; 1192 } 1193 1194 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET); 1195 1196 /* 1197 * We exahusted our budget, requeue our selves 1198 */ 1199 if (pending) 1200 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work); 1201 } 1202 1203 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue, 1204 struct nvmet_tcp_cmd *c) 1205 { 1206 u8 hdgst = nvmet_tcp_hdgst_len(queue); 1207 1208 c->queue = queue; 1209 c->req.port = queue->port->nport; 1210 1211 c->cmd_pdu = page_frag_alloc(&queue->pf_cache, 1212 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1213 if (!c->cmd_pdu) 1214 return -ENOMEM; 1215 c->req.cmd = &c->cmd_pdu->cmd; 1216 1217 c->rsp_pdu = page_frag_alloc(&queue->pf_cache, 1218 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1219 if (!c->rsp_pdu) 1220 goto out_free_cmd; 1221 c->req.cqe = &c->rsp_pdu->cqe; 1222 1223 c->data_pdu = page_frag_alloc(&queue->pf_cache, 1224 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1225 if (!c->data_pdu) 1226 goto out_free_rsp; 1227 1228 c->r2t_pdu = page_frag_alloc(&queue->pf_cache, 1229 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1230 if (!c->r2t_pdu) 1231 goto out_free_data; 1232 1233 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; 1234 1235 list_add_tail(&c->entry, &queue->free_list); 1236 1237 return 0; 1238 out_free_data: 1239 page_frag_free(c->data_pdu); 1240 out_free_rsp: 1241 page_frag_free(c->rsp_pdu); 1242 out_free_cmd: 1243 page_frag_free(c->cmd_pdu); 1244 return -ENOMEM; 1245 } 1246 1247 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c) 1248 { 1249 page_frag_free(c->r2t_pdu); 1250 page_frag_free(c->data_pdu); 1251 page_frag_free(c->rsp_pdu); 1252 page_frag_free(c->cmd_pdu); 1253 } 1254 1255 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue) 1256 { 1257 struct nvmet_tcp_cmd *cmds; 1258 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds; 1259 1260 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL); 1261 if (!cmds) 1262 goto out; 1263 1264 for (i = 0; i < nr_cmds; i++) { 1265 ret = nvmet_tcp_alloc_cmd(queue, cmds + i); 1266 if (ret) 1267 goto out_free; 1268 } 1269 1270 queue->cmds = cmds; 1271 1272 return 0; 1273 out_free: 1274 while (--i >= 0) 1275 nvmet_tcp_free_cmd(cmds + i); 1276 kfree(cmds); 1277 out: 1278 return ret; 1279 } 1280 1281 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue) 1282 { 1283 struct nvmet_tcp_cmd *cmds = queue->cmds; 1284 int i; 1285 1286 for (i = 0; i < queue->nr_cmds; i++) 1287 nvmet_tcp_free_cmd(cmds + i); 1288 1289 nvmet_tcp_free_cmd(&queue->connect); 1290 kfree(cmds); 1291 } 1292 1293 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) 1294 { 1295 struct socket *sock = queue->sock; 1296 1297 write_lock_bh(&sock->sk->sk_callback_lock); 1298 sock->sk->sk_data_ready = queue->data_ready; 1299 sock->sk->sk_state_change = queue->state_change; 1300 sock->sk->sk_write_space = queue->write_space; 1301 sock->sk->sk_user_data = NULL; 1302 write_unlock_bh(&sock->sk->sk_callback_lock); 1303 } 1304 1305 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd) 1306 { 1307 nvmet_req_uninit(&cmd->req); 1308 nvmet_tcp_unmap_pdu_iovec(cmd); 1309 kfree(cmd->iov); 1310 sgl_free(cmd->req.sg); 1311 } 1312 1313 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) 1314 { 1315 struct nvmet_tcp_cmd *cmd = queue->cmds; 1316 int i; 1317 1318 for (i = 0; i < queue->nr_cmds; i++, cmd++) { 1319 if (nvmet_tcp_need_data_in(cmd)) 1320 nvmet_tcp_finish_cmd(cmd); 1321 } 1322 1323 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) { 1324 /* failed in connect */ 1325 nvmet_tcp_finish_cmd(&queue->connect); 1326 } 1327 } 1328 1329 static void nvmet_tcp_release_queue_work(struct work_struct *w) 1330 { 1331 struct nvmet_tcp_queue *queue = 1332 container_of(w, struct nvmet_tcp_queue, release_work); 1333 1334 mutex_lock(&nvmet_tcp_queue_mutex); 1335 list_del_init(&queue->queue_list); 1336 mutex_unlock(&nvmet_tcp_queue_mutex); 1337 1338 nvmet_tcp_restore_socket_callbacks(queue); 1339 flush_work(&queue->io_work); 1340 1341 nvmet_tcp_uninit_data_in_cmds(queue); 1342 nvmet_sq_destroy(&queue->nvme_sq); 1343 cancel_work_sync(&queue->io_work); 1344 sock_release(queue->sock); 1345 nvmet_tcp_free_cmds(queue); 1346 if (queue->hdr_digest || queue->data_digest) 1347 nvmet_tcp_free_crypto(queue); 1348 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx); 1349 1350 kfree(queue); 1351 } 1352 1353 static void nvmet_tcp_data_ready(struct sock *sk) 1354 { 1355 struct nvmet_tcp_queue *queue; 1356 1357 read_lock_bh(&sk->sk_callback_lock); 1358 queue = sk->sk_user_data; 1359 if (likely(queue)) 1360 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work); 1361 read_unlock_bh(&sk->sk_callback_lock); 1362 } 1363 1364 static void nvmet_tcp_write_space(struct sock *sk) 1365 { 1366 struct nvmet_tcp_queue *queue; 1367 1368 read_lock_bh(&sk->sk_callback_lock); 1369 queue = sk->sk_user_data; 1370 if (unlikely(!queue)) 1371 goto out; 1372 1373 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { 1374 queue->write_space(sk); 1375 goto out; 1376 } 1377 1378 if (sk_stream_is_writeable(sk)) { 1379 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1380 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work); 1381 } 1382 out: 1383 read_unlock_bh(&sk->sk_callback_lock); 1384 } 1385 1386 static void nvmet_tcp_state_change(struct sock *sk) 1387 { 1388 struct nvmet_tcp_queue *queue; 1389 1390 write_lock_bh(&sk->sk_callback_lock); 1391 queue = sk->sk_user_data; 1392 if (!queue) 1393 goto done; 1394 1395 switch (sk->sk_state) { 1396 case TCP_FIN_WAIT1: 1397 case TCP_CLOSE_WAIT: 1398 case TCP_CLOSE: 1399 /* FALLTHRU */ 1400 sk->sk_user_data = NULL; 1401 nvmet_tcp_schedule_release_queue(queue); 1402 break; 1403 default: 1404 pr_warn("queue %d unhandled state %d\n", 1405 queue->idx, sk->sk_state); 1406 } 1407 done: 1408 write_unlock_bh(&sk->sk_callback_lock); 1409 } 1410 1411 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) 1412 { 1413 struct socket *sock = queue->sock; 1414 struct inet_sock *inet = inet_sk(sock->sk); 1415 struct linger sol = { .l_onoff = 1, .l_linger = 0 }; 1416 int ret; 1417 1418 ret = kernel_getsockname(sock, 1419 (struct sockaddr *)&queue->sockaddr); 1420 if (ret < 0) 1421 return ret; 1422 1423 ret = kernel_getpeername(sock, 1424 (struct sockaddr *)&queue->sockaddr_peer); 1425 if (ret < 0) 1426 return ret; 1427 1428 /* 1429 * Cleanup whatever is sitting in the TCP transmit queue on socket 1430 * close. This is done to prevent stale data from being sent should 1431 * the network connection be restored before TCP times out. 1432 */ 1433 ret = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, 1434 (char *)&sol, sizeof(sol)); 1435 if (ret) 1436 return ret; 1437 1438 /* Set socket type of service */ 1439 if (inet->rcv_tos > 0) { 1440 int tos = inet->rcv_tos; 1441 1442 ret = kernel_setsockopt(sock, SOL_IP, IP_TOS, 1443 (char *)&tos, sizeof(tos)); 1444 if (ret) 1445 return ret; 1446 } 1447 1448 write_lock_bh(&sock->sk->sk_callback_lock); 1449 sock->sk->sk_user_data = queue; 1450 queue->data_ready = sock->sk->sk_data_ready; 1451 sock->sk->sk_data_ready = nvmet_tcp_data_ready; 1452 queue->state_change = sock->sk->sk_state_change; 1453 sock->sk->sk_state_change = nvmet_tcp_state_change; 1454 queue->write_space = sock->sk->sk_write_space; 1455 sock->sk->sk_write_space = nvmet_tcp_write_space; 1456 write_unlock_bh(&sock->sk->sk_callback_lock); 1457 1458 return 0; 1459 } 1460 1461 static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, 1462 struct socket *newsock) 1463 { 1464 struct nvmet_tcp_queue *queue; 1465 int ret; 1466 1467 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 1468 if (!queue) 1469 return -ENOMEM; 1470 1471 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work); 1472 INIT_WORK(&queue->io_work, nvmet_tcp_io_work); 1473 queue->sock = newsock; 1474 queue->port = port; 1475 queue->nr_cmds = 0; 1476 spin_lock_init(&queue->state_lock); 1477 queue->state = NVMET_TCP_Q_CONNECTING; 1478 INIT_LIST_HEAD(&queue->free_list); 1479 init_llist_head(&queue->resp_list); 1480 INIT_LIST_HEAD(&queue->resp_send_list); 1481 1482 queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL); 1483 if (queue->idx < 0) { 1484 ret = queue->idx; 1485 goto out_free_queue; 1486 } 1487 1488 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect); 1489 if (ret) 1490 goto out_ida_remove; 1491 1492 ret = nvmet_sq_init(&queue->nvme_sq); 1493 if (ret) 1494 goto out_free_connect; 1495 1496 port->last_cpu = cpumask_next_wrap(port->last_cpu, 1497 cpu_online_mask, -1, false); 1498 queue->cpu = port->last_cpu; 1499 nvmet_prepare_receive_pdu(queue); 1500 1501 mutex_lock(&nvmet_tcp_queue_mutex); 1502 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list); 1503 mutex_unlock(&nvmet_tcp_queue_mutex); 1504 1505 ret = nvmet_tcp_set_queue_sock(queue); 1506 if (ret) 1507 goto out_destroy_sq; 1508 1509 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work); 1510 1511 return 0; 1512 out_destroy_sq: 1513 mutex_lock(&nvmet_tcp_queue_mutex); 1514 list_del_init(&queue->queue_list); 1515 mutex_unlock(&nvmet_tcp_queue_mutex); 1516 nvmet_sq_destroy(&queue->nvme_sq); 1517 out_free_connect: 1518 nvmet_tcp_free_cmd(&queue->connect); 1519 out_ida_remove: 1520 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx); 1521 out_free_queue: 1522 kfree(queue); 1523 return ret; 1524 } 1525 1526 static void nvmet_tcp_accept_work(struct work_struct *w) 1527 { 1528 struct nvmet_tcp_port *port = 1529 container_of(w, struct nvmet_tcp_port, accept_work); 1530 struct socket *newsock; 1531 int ret; 1532 1533 while (true) { 1534 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK); 1535 if (ret < 0) { 1536 if (ret != -EAGAIN) 1537 pr_warn("failed to accept err=%d\n", ret); 1538 return; 1539 } 1540 ret = nvmet_tcp_alloc_queue(port, newsock); 1541 if (ret) { 1542 pr_err("failed to allocate queue\n"); 1543 sock_release(newsock); 1544 } 1545 } 1546 } 1547 1548 static void nvmet_tcp_listen_data_ready(struct sock *sk) 1549 { 1550 struct nvmet_tcp_port *port; 1551 1552 read_lock_bh(&sk->sk_callback_lock); 1553 port = sk->sk_user_data; 1554 if (!port) 1555 goto out; 1556 1557 if (sk->sk_state == TCP_LISTEN) 1558 schedule_work(&port->accept_work); 1559 out: 1560 read_unlock_bh(&sk->sk_callback_lock); 1561 } 1562 1563 static int nvmet_tcp_add_port(struct nvmet_port *nport) 1564 { 1565 struct nvmet_tcp_port *port; 1566 __kernel_sa_family_t af; 1567 int opt, ret; 1568 1569 port = kzalloc(sizeof(*port), GFP_KERNEL); 1570 if (!port) 1571 return -ENOMEM; 1572 1573 switch (nport->disc_addr.adrfam) { 1574 case NVMF_ADDR_FAMILY_IP4: 1575 af = AF_INET; 1576 break; 1577 case NVMF_ADDR_FAMILY_IP6: 1578 af = AF_INET6; 1579 break; 1580 default: 1581 pr_err("address family %d not supported\n", 1582 nport->disc_addr.adrfam); 1583 ret = -EINVAL; 1584 goto err_port; 1585 } 1586 1587 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr, 1588 nport->disc_addr.trsvcid, &port->addr); 1589 if (ret) { 1590 pr_err("malformed ip/port passed: %s:%s\n", 1591 nport->disc_addr.traddr, nport->disc_addr.trsvcid); 1592 goto err_port; 1593 } 1594 1595 port->nport = nport; 1596 port->last_cpu = -1; 1597 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work); 1598 if (port->nport->inline_data_size < 0) 1599 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE; 1600 1601 ret = sock_create(port->addr.ss_family, SOCK_STREAM, 1602 IPPROTO_TCP, &port->sock); 1603 if (ret) { 1604 pr_err("failed to create a socket\n"); 1605 goto err_port; 1606 } 1607 1608 port->sock->sk->sk_user_data = port; 1609 port->data_ready = port->sock->sk->sk_data_ready; 1610 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready; 1611 1612 opt = 1; 1613 ret = kernel_setsockopt(port->sock, IPPROTO_TCP, 1614 TCP_NODELAY, (char *)&opt, sizeof(opt)); 1615 if (ret) { 1616 pr_err("failed to set TCP_NODELAY sock opt %d\n", ret); 1617 goto err_sock; 1618 } 1619 1620 ret = kernel_setsockopt(port->sock, SOL_SOCKET, SO_REUSEADDR, 1621 (char *)&opt, sizeof(opt)); 1622 if (ret) { 1623 pr_err("failed to set SO_REUSEADDR sock opt %d\n", ret); 1624 goto err_sock; 1625 } 1626 1627 ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr, 1628 sizeof(port->addr)); 1629 if (ret) { 1630 pr_err("failed to bind port socket %d\n", ret); 1631 goto err_sock; 1632 } 1633 1634 ret = kernel_listen(port->sock, 128); 1635 if (ret) { 1636 pr_err("failed to listen %d on port sock\n", ret); 1637 goto err_sock; 1638 } 1639 1640 nport->priv = port; 1641 pr_info("enabling port %d (%pISpc)\n", 1642 le16_to_cpu(nport->disc_addr.portid), &port->addr); 1643 1644 return 0; 1645 1646 err_sock: 1647 sock_release(port->sock); 1648 err_port: 1649 kfree(port); 1650 return ret; 1651 } 1652 1653 static void nvmet_tcp_remove_port(struct nvmet_port *nport) 1654 { 1655 struct nvmet_tcp_port *port = nport->priv; 1656 1657 write_lock_bh(&port->sock->sk->sk_callback_lock); 1658 port->sock->sk->sk_data_ready = port->data_ready; 1659 port->sock->sk->sk_user_data = NULL; 1660 write_unlock_bh(&port->sock->sk->sk_callback_lock); 1661 cancel_work_sync(&port->accept_work); 1662 1663 sock_release(port->sock); 1664 kfree(port); 1665 } 1666 1667 static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl) 1668 { 1669 struct nvmet_tcp_queue *queue; 1670 1671 mutex_lock(&nvmet_tcp_queue_mutex); 1672 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) 1673 if (queue->nvme_sq.ctrl == ctrl) 1674 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 1675 mutex_unlock(&nvmet_tcp_queue_mutex); 1676 } 1677 1678 static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq) 1679 { 1680 struct nvmet_tcp_queue *queue = 1681 container_of(sq, struct nvmet_tcp_queue, nvme_sq); 1682 1683 if (sq->qid == 0) { 1684 /* Let inflight controller teardown complete */ 1685 flush_scheduled_work(); 1686 } 1687 1688 queue->nr_cmds = sq->size * 2; 1689 if (nvmet_tcp_alloc_cmds(queue)) 1690 return NVME_SC_INTERNAL; 1691 return 0; 1692 } 1693 1694 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req, 1695 struct nvmet_port *nport, char *traddr) 1696 { 1697 struct nvmet_tcp_port *port = nport->priv; 1698 1699 if (inet_addr_is_any((struct sockaddr *)&port->addr)) { 1700 struct nvmet_tcp_cmd *cmd = 1701 container_of(req, struct nvmet_tcp_cmd, req); 1702 struct nvmet_tcp_queue *queue = cmd->queue; 1703 1704 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr); 1705 } else { 1706 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE); 1707 } 1708 } 1709 1710 static struct nvmet_fabrics_ops nvmet_tcp_ops = { 1711 .owner = THIS_MODULE, 1712 .type = NVMF_TRTYPE_TCP, 1713 .msdbd = 1, 1714 .has_keyed_sgls = 0, 1715 .add_port = nvmet_tcp_add_port, 1716 .remove_port = nvmet_tcp_remove_port, 1717 .queue_response = nvmet_tcp_queue_response, 1718 .delete_ctrl = nvmet_tcp_delete_ctrl, 1719 .install_queue = nvmet_tcp_install_queue, 1720 .disc_traddr = nvmet_tcp_disc_port_addr, 1721 }; 1722 1723 static int __init nvmet_tcp_init(void) 1724 { 1725 int ret; 1726 1727 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0); 1728 if (!nvmet_tcp_wq) 1729 return -ENOMEM; 1730 1731 ret = nvmet_register_transport(&nvmet_tcp_ops); 1732 if (ret) 1733 goto err; 1734 1735 return 0; 1736 err: 1737 destroy_workqueue(nvmet_tcp_wq); 1738 return ret; 1739 } 1740 1741 static void __exit nvmet_tcp_exit(void) 1742 { 1743 struct nvmet_tcp_queue *queue; 1744 1745 nvmet_unregister_transport(&nvmet_tcp_ops); 1746 1747 flush_scheduled_work(); 1748 mutex_lock(&nvmet_tcp_queue_mutex); 1749 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) 1750 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 1751 mutex_unlock(&nvmet_tcp_queue_mutex); 1752 flush_scheduled_work(); 1753 1754 destroy_workqueue(nvmet_tcp_wq); 1755 } 1756 1757 module_init(nvmet_tcp_init); 1758 module_exit(nvmet_tcp_exit); 1759 1760 MODULE_LICENSE("GPL v2"); 1761 MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */ 1762