1 /* 2 * NVMe over Fabrics RDMA host code. 3 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 #include <linux/module.h> 16 #include <linux/init.h> 17 #include <linux/slab.h> 18 #include <linux/err.h> 19 #include <linux/string.h> 20 #include <linux/atomic.h> 21 #include <linux/blk-mq.h> 22 #include <linux/blk-mq-rdma.h> 23 #include <linux/types.h> 24 #include <linux/list.h> 25 #include <linux/mutex.h> 26 #include <linux/scatterlist.h> 27 #include <linux/nvme.h> 28 #include <asm/unaligned.h> 29 30 #include <rdma/ib_verbs.h> 31 #include <rdma/rdma_cm.h> 32 #include <linux/nvme-rdma.h> 33 34 #include "nvme.h" 35 #include "fabrics.h" 36 37 38 #define NVME_RDMA_CONNECT_TIMEOUT_MS 3000 /* 3 second */ 39 40 #define NVME_RDMA_MAX_SEGMENT_SIZE 0xffffff /* 24-bit SGL field */ 41 42 #define NVME_RDMA_MAX_SEGMENTS 256 43 44 #define NVME_RDMA_MAX_INLINE_SEGMENTS 1 45 46 /* 47 * We handle AEN commands ourselves and don't even let the 48 * block layer know about them. 49 */ 50 #define NVME_RDMA_NR_AEN_COMMANDS 1 51 #define NVME_RDMA_AQ_BLKMQ_DEPTH \ 52 (NVME_AQ_DEPTH - NVME_RDMA_NR_AEN_COMMANDS) 53 54 struct nvme_rdma_device { 55 struct ib_device *dev; 56 struct ib_pd *pd; 57 struct kref ref; 58 struct list_head entry; 59 }; 60 61 struct nvme_rdma_qe { 62 struct ib_cqe cqe; 63 void *data; 64 u64 dma; 65 }; 66 67 struct nvme_rdma_queue; 68 struct nvme_rdma_request { 69 struct nvme_request req; 70 struct ib_mr *mr; 71 struct nvme_rdma_qe sqe; 72 struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS]; 73 u32 num_sge; 74 int nents; 75 bool inline_data; 76 struct ib_reg_wr reg_wr; 77 struct ib_cqe reg_cqe; 78 struct nvme_rdma_queue *queue; 79 struct sg_table sg_table; 80 struct scatterlist first_sgl[]; 81 }; 82 83 enum nvme_rdma_queue_flags { 84 NVME_RDMA_Q_LIVE = 0, 85 NVME_RDMA_Q_DELETING = 1, 86 }; 87 88 struct nvme_rdma_queue { 89 struct nvme_rdma_qe *rsp_ring; 90 atomic_t sig_count; 91 int queue_size; 92 size_t cmnd_capsule_len; 93 struct nvme_rdma_ctrl *ctrl; 94 struct nvme_rdma_device *device; 95 struct ib_cq *ib_cq; 96 struct ib_qp *qp; 97 98 unsigned long flags; 99 struct rdma_cm_id *cm_id; 100 int cm_error; 101 struct completion cm_done; 102 }; 103 104 struct nvme_rdma_ctrl { 105 /* read only in the hot path */ 106 struct nvme_rdma_queue *queues; 107 108 /* other member variables */ 109 struct blk_mq_tag_set tag_set; 110 struct work_struct delete_work; 111 struct work_struct err_work; 112 113 struct nvme_rdma_qe async_event_sqe; 114 115 struct delayed_work reconnect_work; 116 117 struct list_head list; 118 119 struct blk_mq_tag_set admin_tag_set; 120 struct nvme_rdma_device *device; 121 122 u32 max_fr_pages; 123 124 struct sockaddr_storage addr; 125 struct sockaddr_storage src_addr; 126 127 struct nvme_ctrl ctrl; 128 }; 129 130 static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl) 131 { 132 return container_of(ctrl, struct nvme_rdma_ctrl, ctrl); 133 } 134 135 static LIST_HEAD(device_list); 136 static DEFINE_MUTEX(device_list_mutex); 137 138 static LIST_HEAD(nvme_rdma_ctrl_list); 139 static DEFINE_MUTEX(nvme_rdma_ctrl_mutex); 140 141 /* 142 * Disabling this option makes small I/O goes faster, but is fundamentally 143 * unsafe. With it turned off we will have to register a global rkey that 144 * allows read and write access to all physical memory. 145 */ 146 static bool register_always = true; 147 module_param(register_always, bool, 0444); 148 MODULE_PARM_DESC(register_always, 149 "Use memory registration even for contiguous memory regions"); 150 151 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, 152 struct rdma_cm_event *event); 153 static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); 154 155 /* XXX: really should move to a generic header sooner or later.. */ 156 static inline void put_unaligned_le24(u32 val, u8 *p) 157 { 158 *p++ = val; 159 *p++ = val >> 8; 160 *p++ = val >> 16; 161 } 162 163 static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue) 164 { 165 return queue - queue->ctrl->queues; 166 } 167 168 static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) 169 { 170 return queue->cmnd_capsule_len - sizeof(struct nvme_command); 171 } 172 173 static void nvme_rdma_free_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe, 174 size_t capsule_size, enum dma_data_direction dir) 175 { 176 ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir); 177 kfree(qe->data); 178 } 179 180 static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe, 181 size_t capsule_size, enum dma_data_direction dir) 182 { 183 qe->data = kzalloc(capsule_size, GFP_KERNEL); 184 if (!qe->data) 185 return -ENOMEM; 186 187 qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir); 188 if (ib_dma_mapping_error(ibdev, qe->dma)) { 189 kfree(qe->data); 190 return -ENOMEM; 191 } 192 193 return 0; 194 } 195 196 static void nvme_rdma_free_ring(struct ib_device *ibdev, 197 struct nvme_rdma_qe *ring, size_t ib_queue_size, 198 size_t capsule_size, enum dma_data_direction dir) 199 { 200 int i; 201 202 for (i = 0; i < ib_queue_size; i++) 203 nvme_rdma_free_qe(ibdev, &ring[i], capsule_size, dir); 204 kfree(ring); 205 } 206 207 static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev, 208 size_t ib_queue_size, size_t capsule_size, 209 enum dma_data_direction dir) 210 { 211 struct nvme_rdma_qe *ring; 212 int i; 213 214 ring = kcalloc(ib_queue_size, sizeof(struct nvme_rdma_qe), GFP_KERNEL); 215 if (!ring) 216 return NULL; 217 218 for (i = 0; i < ib_queue_size; i++) { 219 if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir)) 220 goto out_free_ring; 221 } 222 223 return ring; 224 225 out_free_ring: 226 nvme_rdma_free_ring(ibdev, ring, i, capsule_size, dir); 227 return NULL; 228 } 229 230 static void nvme_rdma_qp_event(struct ib_event *event, void *context) 231 { 232 pr_debug("QP event %s (%d)\n", 233 ib_event_msg(event->event), event->event); 234 235 } 236 237 static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue) 238 { 239 wait_for_completion_interruptible_timeout(&queue->cm_done, 240 msecs_to_jiffies(NVME_RDMA_CONNECT_TIMEOUT_MS) + 1); 241 return queue->cm_error; 242 } 243 244 static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor) 245 { 246 struct nvme_rdma_device *dev = queue->device; 247 struct ib_qp_init_attr init_attr; 248 int ret; 249 250 memset(&init_attr, 0, sizeof(init_attr)); 251 init_attr.event_handler = nvme_rdma_qp_event; 252 /* +1 for drain */ 253 init_attr.cap.max_send_wr = factor * queue->queue_size + 1; 254 /* +1 for drain */ 255 init_attr.cap.max_recv_wr = queue->queue_size + 1; 256 init_attr.cap.max_recv_sge = 1; 257 init_attr.cap.max_send_sge = 1 + NVME_RDMA_MAX_INLINE_SEGMENTS; 258 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 259 init_attr.qp_type = IB_QPT_RC; 260 init_attr.send_cq = queue->ib_cq; 261 init_attr.recv_cq = queue->ib_cq; 262 263 ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr); 264 265 queue->qp = queue->cm_id->qp; 266 return ret; 267 } 268 269 static int nvme_rdma_reinit_request(void *data, struct request *rq) 270 { 271 struct nvme_rdma_ctrl *ctrl = data; 272 struct nvme_rdma_device *dev = ctrl->device; 273 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 274 int ret = 0; 275 276 ib_dereg_mr(req->mr); 277 278 req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG, 279 ctrl->max_fr_pages); 280 if (IS_ERR(req->mr)) { 281 ret = PTR_ERR(req->mr); 282 req->mr = NULL; 283 goto out; 284 } 285 286 req->mr->need_inval = false; 287 288 out: 289 return ret; 290 } 291 292 static void nvme_rdma_exit_request(struct blk_mq_tag_set *set, 293 struct request *rq, unsigned int hctx_idx) 294 { 295 struct nvme_rdma_ctrl *ctrl = set->driver_data; 296 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 297 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; 298 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; 299 struct nvme_rdma_device *dev = queue->device; 300 301 if (req->mr) 302 ib_dereg_mr(req->mr); 303 304 nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command), 305 DMA_TO_DEVICE); 306 } 307 308 static int nvme_rdma_init_request(struct blk_mq_tag_set *set, 309 struct request *rq, unsigned int hctx_idx, 310 unsigned int numa_node) 311 { 312 struct nvme_rdma_ctrl *ctrl = set->driver_data; 313 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 314 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; 315 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; 316 struct nvme_rdma_device *dev = queue->device; 317 struct ib_device *ibdev = dev->dev; 318 int ret; 319 320 ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command), 321 DMA_TO_DEVICE); 322 if (ret) 323 return ret; 324 325 req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG, 326 ctrl->max_fr_pages); 327 if (IS_ERR(req->mr)) { 328 ret = PTR_ERR(req->mr); 329 goto out_free_qe; 330 } 331 332 req->queue = queue; 333 334 return 0; 335 336 out_free_qe: 337 nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command), 338 DMA_TO_DEVICE); 339 return -ENOMEM; 340 } 341 342 static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 343 unsigned int hctx_idx) 344 { 345 struct nvme_rdma_ctrl *ctrl = data; 346 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; 347 348 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); 349 350 hctx->driver_data = queue; 351 return 0; 352 } 353 354 static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, 355 unsigned int hctx_idx) 356 { 357 struct nvme_rdma_ctrl *ctrl = data; 358 struct nvme_rdma_queue *queue = &ctrl->queues[0]; 359 360 BUG_ON(hctx_idx != 0); 361 362 hctx->driver_data = queue; 363 return 0; 364 } 365 366 static void nvme_rdma_free_dev(struct kref *ref) 367 { 368 struct nvme_rdma_device *ndev = 369 container_of(ref, struct nvme_rdma_device, ref); 370 371 mutex_lock(&device_list_mutex); 372 list_del(&ndev->entry); 373 mutex_unlock(&device_list_mutex); 374 375 ib_dealloc_pd(ndev->pd); 376 kfree(ndev); 377 } 378 379 static void nvme_rdma_dev_put(struct nvme_rdma_device *dev) 380 { 381 kref_put(&dev->ref, nvme_rdma_free_dev); 382 } 383 384 static int nvme_rdma_dev_get(struct nvme_rdma_device *dev) 385 { 386 return kref_get_unless_zero(&dev->ref); 387 } 388 389 static struct nvme_rdma_device * 390 nvme_rdma_find_get_device(struct rdma_cm_id *cm_id) 391 { 392 struct nvme_rdma_device *ndev; 393 394 mutex_lock(&device_list_mutex); 395 list_for_each_entry(ndev, &device_list, entry) { 396 if (ndev->dev->node_guid == cm_id->device->node_guid && 397 nvme_rdma_dev_get(ndev)) 398 goto out_unlock; 399 } 400 401 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); 402 if (!ndev) 403 goto out_err; 404 405 ndev->dev = cm_id->device; 406 kref_init(&ndev->ref); 407 408 ndev->pd = ib_alloc_pd(ndev->dev, 409 register_always ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY); 410 if (IS_ERR(ndev->pd)) 411 goto out_free_dev; 412 413 if (!(ndev->dev->attrs.device_cap_flags & 414 IB_DEVICE_MEM_MGT_EXTENSIONS)) { 415 dev_err(&ndev->dev->dev, 416 "Memory registrations not supported.\n"); 417 goto out_free_pd; 418 } 419 420 list_add(&ndev->entry, &device_list); 421 out_unlock: 422 mutex_unlock(&device_list_mutex); 423 return ndev; 424 425 out_free_pd: 426 ib_dealloc_pd(ndev->pd); 427 out_free_dev: 428 kfree(ndev); 429 out_err: 430 mutex_unlock(&device_list_mutex); 431 return NULL; 432 } 433 434 static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue) 435 { 436 struct nvme_rdma_device *dev; 437 struct ib_device *ibdev; 438 439 dev = queue->device; 440 ibdev = dev->dev; 441 rdma_destroy_qp(queue->cm_id); 442 ib_free_cq(queue->ib_cq); 443 444 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, 445 sizeof(struct nvme_completion), DMA_FROM_DEVICE); 446 447 nvme_rdma_dev_put(dev); 448 } 449 450 static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) 451 { 452 struct ib_device *ibdev; 453 const int send_wr_factor = 3; /* MR, SEND, INV */ 454 const int cq_factor = send_wr_factor + 1; /* + RECV */ 455 int comp_vector, idx = nvme_rdma_queue_idx(queue); 456 int ret; 457 458 queue->device = nvme_rdma_find_get_device(queue->cm_id); 459 if (!queue->device) { 460 dev_err(queue->cm_id->device->dev.parent, 461 "no client data found!\n"); 462 return -ECONNREFUSED; 463 } 464 ibdev = queue->device->dev; 465 466 /* 467 * Spread I/O queues completion vectors according their queue index. 468 * Admin queues can always go on completion vector 0. 469 */ 470 comp_vector = idx == 0 ? idx : idx - 1; 471 472 /* +1 for ib_stop_cq */ 473 queue->ib_cq = ib_alloc_cq(ibdev, queue, 474 cq_factor * queue->queue_size + 1, 475 comp_vector, IB_POLL_SOFTIRQ); 476 if (IS_ERR(queue->ib_cq)) { 477 ret = PTR_ERR(queue->ib_cq); 478 goto out_put_dev; 479 } 480 481 ret = nvme_rdma_create_qp(queue, send_wr_factor); 482 if (ret) 483 goto out_destroy_ib_cq; 484 485 queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size, 486 sizeof(struct nvme_completion), DMA_FROM_DEVICE); 487 if (!queue->rsp_ring) { 488 ret = -ENOMEM; 489 goto out_destroy_qp; 490 } 491 492 return 0; 493 494 out_destroy_qp: 495 ib_destroy_qp(queue->qp); 496 out_destroy_ib_cq: 497 ib_free_cq(queue->ib_cq); 498 out_put_dev: 499 nvme_rdma_dev_put(queue->device); 500 return ret; 501 } 502 503 static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl, 504 int idx, size_t queue_size) 505 { 506 struct nvme_rdma_queue *queue; 507 struct sockaddr *src_addr = NULL; 508 int ret; 509 510 queue = &ctrl->queues[idx]; 511 queue->ctrl = ctrl; 512 init_completion(&queue->cm_done); 513 514 if (idx > 0) 515 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; 516 else 517 queue->cmnd_capsule_len = sizeof(struct nvme_command); 518 519 queue->queue_size = queue_size; 520 atomic_set(&queue->sig_count, 0); 521 522 queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue, 523 RDMA_PS_TCP, IB_QPT_RC); 524 if (IS_ERR(queue->cm_id)) { 525 dev_info(ctrl->ctrl.device, 526 "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id)); 527 return PTR_ERR(queue->cm_id); 528 } 529 530 if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) 531 src_addr = (struct sockaddr *)&ctrl->src_addr; 532 533 queue->cm_error = -ETIMEDOUT; 534 ret = rdma_resolve_addr(queue->cm_id, src_addr, 535 (struct sockaddr *)&ctrl->addr, 536 NVME_RDMA_CONNECT_TIMEOUT_MS); 537 if (ret) { 538 dev_info(ctrl->ctrl.device, 539 "rdma_resolve_addr failed (%d).\n", ret); 540 goto out_destroy_cm_id; 541 } 542 543 ret = nvme_rdma_wait_for_cm(queue); 544 if (ret) { 545 dev_info(ctrl->ctrl.device, 546 "rdma_resolve_addr wait failed (%d).\n", ret); 547 goto out_destroy_cm_id; 548 } 549 550 clear_bit(NVME_RDMA_Q_DELETING, &queue->flags); 551 552 return 0; 553 554 out_destroy_cm_id: 555 rdma_destroy_id(queue->cm_id); 556 return ret; 557 } 558 559 static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) 560 { 561 rdma_disconnect(queue->cm_id); 562 ib_drain_qp(queue->qp); 563 } 564 565 static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) 566 { 567 nvme_rdma_destroy_queue_ib(queue); 568 rdma_destroy_id(queue->cm_id); 569 } 570 571 static void nvme_rdma_stop_and_free_queue(struct nvme_rdma_queue *queue) 572 { 573 if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags)) 574 return; 575 nvme_rdma_stop_queue(queue); 576 nvme_rdma_free_queue(queue); 577 } 578 579 static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl) 580 { 581 int i; 582 583 for (i = 1; i < ctrl->ctrl.queue_count; i++) 584 nvme_rdma_stop_and_free_queue(&ctrl->queues[i]); 585 } 586 587 static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl) 588 { 589 int i, ret = 0; 590 591 for (i = 1; i < ctrl->ctrl.queue_count; i++) { 592 ret = nvmf_connect_io_queue(&ctrl->ctrl, i); 593 if (ret) { 594 dev_info(ctrl->ctrl.device, 595 "failed to connect i/o queue: %d\n", ret); 596 goto out_free_queues; 597 } 598 set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags); 599 } 600 601 return 0; 602 603 out_free_queues: 604 nvme_rdma_free_io_queues(ctrl); 605 return ret; 606 } 607 608 static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl) 609 { 610 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 611 struct ib_device *ibdev = ctrl->device->dev; 612 unsigned int nr_io_queues; 613 int i, ret; 614 615 nr_io_queues = min(opts->nr_io_queues, num_online_cpus()); 616 617 /* 618 * we map queues according to the device irq vectors for 619 * optimal locality so we don't need more queues than 620 * completion vectors. 621 */ 622 nr_io_queues = min_t(unsigned int, nr_io_queues, 623 ibdev->num_comp_vectors); 624 625 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 626 if (ret) 627 return ret; 628 629 ctrl->ctrl.queue_count = nr_io_queues + 1; 630 if (ctrl->ctrl.queue_count < 2) 631 return 0; 632 633 dev_info(ctrl->ctrl.device, 634 "creating %d I/O queues.\n", nr_io_queues); 635 636 for (i = 1; i < ctrl->ctrl.queue_count; i++) { 637 ret = nvme_rdma_init_queue(ctrl, i, 638 ctrl->ctrl.opts->queue_size); 639 if (ret) { 640 dev_info(ctrl->ctrl.device, 641 "failed to initialize i/o queue: %d\n", ret); 642 goto out_free_queues; 643 } 644 } 645 646 return 0; 647 648 out_free_queues: 649 for (i--; i >= 1; i--) 650 nvme_rdma_stop_and_free_queue(&ctrl->queues[i]); 651 652 return ret; 653 } 654 655 static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl) 656 { 657 nvme_rdma_free_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe, 658 sizeof(struct nvme_command), DMA_TO_DEVICE); 659 nvme_rdma_stop_and_free_queue(&ctrl->queues[0]); 660 blk_cleanup_queue(ctrl->ctrl.admin_q); 661 blk_mq_free_tag_set(&ctrl->admin_tag_set); 662 nvme_rdma_dev_put(ctrl->device); 663 } 664 665 static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) 666 { 667 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); 668 669 if (list_empty(&ctrl->list)) 670 goto free_ctrl; 671 672 mutex_lock(&nvme_rdma_ctrl_mutex); 673 list_del(&ctrl->list); 674 mutex_unlock(&nvme_rdma_ctrl_mutex); 675 676 kfree(ctrl->queues); 677 nvmf_free_options(nctrl->opts); 678 free_ctrl: 679 kfree(ctrl); 680 } 681 682 static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) 683 { 684 /* If we are resetting/deleting then do nothing */ 685 if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) { 686 WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW || 687 ctrl->ctrl.state == NVME_CTRL_LIVE); 688 return; 689 } 690 691 if (nvmf_should_reconnect(&ctrl->ctrl)) { 692 dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n", 693 ctrl->ctrl.opts->reconnect_delay); 694 queue_delayed_work(nvme_wq, &ctrl->reconnect_work, 695 ctrl->ctrl.opts->reconnect_delay * HZ); 696 } else { 697 dev_info(ctrl->ctrl.device, "Removing controller...\n"); 698 queue_work(nvme_wq, &ctrl->delete_work); 699 } 700 } 701 702 static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) 703 { 704 struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work), 705 struct nvme_rdma_ctrl, reconnect_work); 706 bool changed; 707 int ret; 708 709 ++ctrl->ctrl.nr_reconnects; 710 711 if (ctrl->ctrl.queue_count > 1) { 712 nvme_rdma_free_io_queues(ctrl); 713 714 ret = blk_mq_reinit_tagset(&ctrl->tag_set, 715 nvme_rdma_reinit_request); 716 if (ret) 717 goto requeue; 718 } 719 720 nvme_rdma_stop_and_free_queue(&ctrl->queues[0]); 721 722 ret = blk_mq_reinit_tagset(&ctrl->admin_tag_set, 723 nvme_rdma_reinit_request); 724 if (ret) 725 goto requeue; 726 727 ret = nvme_rdma_init_queue(ctrl, 0, NVME_AQ_DEPTH); 728 if (ret) 729 goto requeue; 730 731 ret = nvmf_connect_admin_queue(&ctrl->ctrl); 732 if (ret) 733 goto requeue; 734 735 set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); 736 737 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); 738 if (ret) 739 goto requeue; 740 741 if (ctrl->ctrl.queue_count > 1) { 742 ret = nvme_rdma_init_io_queues(ctrl); 743 if (ret) 744 goto requeue; 745 746 ret = nvme_rdma_connect_io_queues(ctrl); 747 if (ret) 748 goto requeue; 749 750 blk_mq_update_nr_hw_queues(&ctrl->tag_set, 751 ctrl->ctrl.queue_count - 1); 752 } 753 754 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 755 WARN_ON_ONCE(!changed); 756 ctrl->ctrl.nr_reconnects = 0; 757 758 nvme_start_ctrl(&ctrl->ctrl); 759 760 dev_info(ctrl->ctrl.device, "Successfully reconnected\n"); 761 762 return; 763 764 requeue: 765 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", 766 ctrl->ctrl.nr_reconnects); 767 nvme_rdma_reconnect_or_remove(ctrl); 768 } 769 770 static void nvme_rdma_error_recovery_work(struct work_struct *work) 771 { 772 struct nvme_rdma_ctrl *ctrl = container_of(work, 773 struct nvme_rdma_ctrl, err_work); 774 int i; 775 776 nvme_stop_ctrl(&ctrl->ctrl); 777 778 for (i = 0; i < ctrl->ctrl.queue_count; i++) 779 clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags); 780 781 if (ctrl->ctrl.queue_count > 1) 782 nvme_stop_queues(&ctrl->ctrl); 783 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); 784 785 /* We must take care of fastfail/requeue all our inflight requests */ 786 if (ctrl->ctrl.queue_count > 1) 787 blk_mq_tagset_busy_iter(&ctrl->tag_set, 788 nvme_cancel_request, &ctrl->ctrl); 789 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 790 nvme_cancel_request, &ctrl->ctrl); 791 792 /* 793 * queues are not a live anymore, so restart the queues to fail fast 794 * new IO 795 */ 796 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 797 nvme_start_queues(&ctrl->ctrl); 798 799 nvme_rdma_reconnect_or_remove(ctrl); 800 } 801 802 static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl) 803 { 804 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) 805 return; 806 807 queue_work(nvme_wq, &ctrl->err_work); 808 } 809 810 static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc, 811 const char *op) 812 { 813 struct nvme_rdma_queue *queue = cq->cq_context; 814 struct nvme_rdma_ctrl *ctrl = queue->ctrl; 815 816 if (ctrl->ctrl.state == NVME_CTRL_LIVE) 817 dev_info(ctrl->ctrl.device, 818 "%s for CQE 0x%p failed with status %s (%d)\n", 819 op, wc->wr_cqe, 820 ib_wc_status_msg(wc->status), wc->status); 821 nvme_rdma_error_recovery(ctrl); 822 } 823 824 static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc) 825 { 826 if (unlikely(wc->status != IB_WC_SUCCESS)) 827 nvme_rdma_wr_error(cq, wc, "MEMREG"); 828 } 829 830 static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) 831 { 832 if (unlikely(wc->status != IB_WC_SUCCESS)) 833 nvme_rdma_wr_error(cq, wc, "LOCAL_INV"); 834 } 835 836 static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue, 837 struct nvme_rdma_request *req) 838 { 839 struct ib_send_wr *bad_wr; 840 struct ib_send_wr wr = { 841 .opcode = IB_WR_LOCAL_INV, 842 .next = NULL, 843 .num_sge = 0, 844 .send_flags = 0, 845 .ex.invalidate_rkey = req->mr->rkey, 846 }; 847 848 req->reg_cqe.done = nvme_rdma_inv_rkey_done; 849 wr.wr_cqe = &req->reg_cqe; 850 851 return ib_post_send(queue->qp, &wr, &bad_wr); 852 } 853 854 static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, 855 struct request *rq) 856 { 857 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 858 struct nvme_rdma_ctrl *ctrl = queue->ctrl; 859 struct nvme_rdma_device *dev = queue->device; 860 struct ib_device *ibdev = dev->dev; 861 int res; 862 863 if (!blk_rq_bytes(rq)) 864 return; 865 866 if (req->mr->need_inval) { 867 res = nvme_rdma_inv_rkey(queue, req); 868 if (res < 0) { 869 dev_err(ctrl->ctrl.device, 870 "Queueing INV WR for rkey %#x failed (%d)\n", 871 req->mr->rkey, res); 872 nvme_rdma_error_recovery(queue->ctrl); 873 } 874 } 875 876 ib_dma_unmap_sg(ibdev, req->sg_table.sgl, 877 req->nents, rq_data_dir(rq) == 878 WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 879 880 nvme_cleanup_cmd(rq); 881 sg_free_table_chained(&req->sg_table, true); 882 } 883 884 static int nvme_rdma_set_sg_null(struct nvme_command *c) 885 { 886 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; 887 888 sg->addr = 0; 889 put_unaligned_le24(0, sg->length); 890 put_unaligned_le32(0, sg->key); 891 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; 892 return 0; 893 } 894 895 static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue, 896 struct nvme_rdma_request *req, struct nvme_command *c) 897 { 898 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; 899 900 req->sge[1].addr = sg_dma_address(req->sg_table.sgl); 901 req->sge[1].length = sg_dma_len(req->sg_table.sgl); 902 req->sge[1].lkey = queue->device->pd->local_dma_lkey; 903 904 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); 905 sg->length = cpu_to_le32(sg_dma_len(req->sg_table.sgl)); 906 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; 907 908 req->inline_data = true; 909 req->num_sge++; 910 return 0; 911 } 912 913 static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue, 914 struct nvme_rdma_request *req, struct nvme_command *c) 915 { 916 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; 917 918 sg->addr = cpu_to_le64(sg_dma_address(req->sg_table.sgl)); 919 put_unaligned_le24(sg_dma_len(req->sg_table.sgl), sg->length); 920 put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key); 921 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; 922 return 0; 923 } 924 925 static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue, 926 struct nvme_rdma_request *req, struct nvme_command *c, 927 int count) 928 { 929 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; 930 int nr; 931 932 /* 933 * Align the MR to a 4K page size to match the ctrl page size and 934 * the block virtual boundary. 935 */ 936 nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K); 937 if (nr < count) { 938 if (nr < 0) 939 return nr; 940 return -EINVAL; 941 } 942 943 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); 944 945 req->reg_cqe.done = nvme_rdma_memreg_done; 946 memset(&req->reg_wr, 0, sizeof(req->reg_wr)); 947 req->reg_wr.wr.opcode = IB_WR_REG_MR; 948 req->reg_wr.wr.wr_cqe = &req->reg_cqe; 949 req->reg_wr.wr.num_sge = 0; 950 req->reg_wr.mr = req->mr; 951 req->reg_wr.key = req->mr->rkey; 952 req->reg_wr.access = IB_ACCESS_LOCAL_WRITE | 953 IB_ACCESS_REMOTE_READ | 954 IB_ACCESS_REMOTE_WRITE; 955 956 req->mr->need_inval = true; 957 958 sg->addr = cpu_to_le64(req->mr->iova); 959 put_unaligned_le24(req->mr->length, sg->length); 960 put_unaligned_le32(req->mr->rkey, sg->key); 961 sg->type = (NVME_KEY_SGL_FMT_DATA_DESC << 4) | 962 NVME_SGL_FMT_INVALIDATE; 963 964 return 0; 965 } 966 967 static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, 968 struct request *rq, struct nvme_command *c) 969 { 970 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 971 struct nvme_rdma_device *dev = queue->device; 972 struct ib_device *ibdev = dev->dev; 973 int count, ret; 974 975 req->num_sge = 1; 976 req->inline_data = false; 977 req->mr->need_inval = false; 978 979 c->common.flags |= NVME_CMD_SGL_METABUF; 980 981 if (!blk_rq_bytes(rq)) 982 return nvme_rdma_set_sg_null(c); 983 984 req->sg_table.sgl = req->first_sgl; 985 ret = sg_alloc_table_chained(&req->sg_table, 986 blk_rq_nr_phys_segments(rq), req->sg_table.sgl); 987 if (ret) 988 return -ENOMEM; 989 990 req->nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl); 991 992 count = ib_dma_map_sg(ibdev, req->sg_table.sgl, req->nents, 993 rq_data_dir(rq) == WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 994 if (unlikely(count <= 0)) { 995 sg_free_table_chained(&req->sg_table, true); 996 return -EIO; 997 } 998 999 if (count == 1) { 1000 if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) && 1001 blk_rq_payload_bytes(rq) <= 1002 nvme_rdma_inline_data_size(queue)) 1003 return nvme_rdma_map_sg_inline(queue, req, c); 1004 1005 if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) 1006 return nvme_rdma_map_sg_single(queue, req, c); 1007 } 1008 1009 return nvme_rdma_map_sg_fr(queue, req, c, count); 1010 } 1011 1012 static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) 1013 { 1014 if (unlikely(wc->status != IB_WC_SUCCESS)) 1015 nvme_rdma_wr_error(cq, wc, "SEND"); 1016 } 1017 1018 /* 1019 * We want to signal completion at least every queue depth/2. This returns the 1020 * largest power of two that is not above half of (queue size + 1) to optimize 1021 * (avoid divisions). 1022 */ 1023 static inline bool nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue) 1024 { 1025 int limit = 1 << ilog2((queue->queue_size + 1) / 2); 1026 1027 return (atomic_inc_return(&queue->sig_count) & (limit - 1)) == 0; 1028 } 1029 1030 static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, 1031 struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge, 1032 struct ib_send_wr *first, bool flush) 1033 { 1034 struct ib_send_wr wr, *bad_wr; 1035 int ret; 1036 1037 sge->addr = qe->dma; 1038 sge->length = sizeof(struct nvme_command), 1039 sge->lkey = queue->device->pd->local_dma_lkey; 1040 1041 qe->cqe.done = nvme_rdma_send_done; 1042 1043 wr.next = NULL; 1044 wr.wr_cqe = &qe->cqe; 1045 wr.sg_list = sge; 1046 wr.num_sge = num_sge; 1047 wr.opcode = IB_WR_SEND; 1048 wr.send_flags = 0; 1049 1050 /* 1051 * Unsignalled send completions are another giant desaster in the 1052 * IB Verbs spec: If we don't regularly post signalled sends 1053 * the send queue will fill up and only a QP reset will rescue us. 1054 * Would have been way to obvious to handle this in hardware or 1055 * at least the RDMA stack.. 1056 * 1057 * Always signal the flushes. The magic request used for the flush 1058 * sequencer is not allocated in our driver's tagset and it's 1059 * triggered to be freed by blk_cleanup_queue(). So we need to 1060 * always mark it as signaled to ensure that the "wr_cqe", which is 1061 * embedded in request's payload, is not freed when __ib_process_cq() 1062 * calls wr_cqe->done(). 1063 */ 1064 if (nvme_rdma_queue_sig_limit(queue) || flush) 1065 wr.send_flags |= IB_SEND_SIGNALED; 1066 1067 if (first) 1068 first->next = ≀ 1069 else 1070 first = ≀ 1071 1072 ret = ib_post_send(queue->qp, first, &bad_wr); 1073 if (ret) { 1074 dev_err(queue->ctrl->ctrl.device, 1075 "%s failed with error code %d\n", __func__, ret); 1076 } 1077 return ret; 1078 } 1079 1080 static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue, 1081 struct nvme_rdma_qe *qe) 1082 { 1083 struct ib_recv_wr wr, *bad_wr; 1084 struct ib_sge list; 1085 int ret; 1086 1087 list.addr = qe->dma; 1088 list.length = sizeof(struct nvme_completion); 1089 list.lkey = queue->device->pd->local_dma_lkey; 1090 1091 qe->cqe.done = nvme_rdma_recv_done; 1092 1093 wr.next = NULL; 1094 wr.wr_cqe = &qe->cqe; 1095 wr.sg_list = &list; 1096 wr.num_sge = 1; 1097 1098 ret = ib_post_recv(queue->qp, &wr, &bad_wr); 1099 if (ret) { 1100 dev_err(queue->ctrl->ctrl.device, 1101 "%s failed with error code %d\n", __func__, ret); 1102 } 1103 return ret; 1104 } 1105 1106 static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue) 1107 { 1108 u32 queue_idx = nvme_rdma_queue_idx(queue); 1109 1110 if (queue_idx == 0) 1111 return queue->ctrl->admin_tag_set.tags[queue_idx]; 1112 return queue->ctrl->tag_set.tags[queue_idx - 1]; 1113 } 1114 1115 static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx) 1116 { 1117 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg); 1118 struct nvme_rdma_queue *queue = &ctrl->queues[0]; 1119 struct ib_device *dev = queue->device->dev; 1120 struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe; 1121 struct nvme_command *cmd = sqe->data; 1122 struct ib_sge sge; 1123 int ret; 1124 1125 if (WARN_ON_ONCE(aer_idx != 0)) 1126 return; 1127 1128 ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE); 1129 1130 memset(cmd, 0, sizeof(*cmd)); 1131 cmd->common.opcode = nvme_admin_async_event; 1132 cmd->common.command_id = NVME_RDMA_AQ_BLKMQ_DEPTH; 1133 cmd->common.flags |= NVME_CMD_SGL_METABUF; 1134 nvme_rdma_set_sg_null(cmd); 1135 1136 ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd), 1137 DMA_TO_DEVICE); 1138 1139 ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL, false); 1140 WARN_ON_ONCE(ret); 1141 } 1142 1143 static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, 1144 struct nvme_completion *cqe, struct ib_wc *wc, int tag) 1145 { 1146 struct request *rq; 1147 struct nvme_rdma_request *req; 1148 int ret = 0; 1149 1150 rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id); 1151 if (!rq) { 1152 dev_err(queue->ctrl->ctrl.device, 1153 "tag 0x%x on QP %#x not found\n", 1154 cqe->command_id, queue->qp->qp_num); 1155 nvme_rdma_error_recovery(queue->ctrl); 1156 return ret; 1157 } 1158 req = blk_mq_rq_to_pdu(rq); 1159 1160 if (rq->tag == tag) 1161 ret = 1; 1162 1163 if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) && 1164 wc->ex.invalidate_rkey == req->mr->rkey) 1165 req->mr->need_inval = false; 1166 1167 nvme_end_request(rq, cqe->status, cqe->result); 1168 return ret; 1169 } 1170 1171 static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag) 1172 { 1173 struct nvme_rdma_qe *qe = 1174 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); 1175 struct nvme_rdma_queue *queue = cq->cq_context; 1176 struct ib_device *ibdev = queue->device->dev; 1177 struct nvme_completion *cqe = qe->data; 1178 const size_t len = sizeof(struct nvme_completion); 1179 int ret = 0; 1180 1181 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1182 nvme_rdma_wr_error(cq, wc, "RECV"); 1183 return 0; 1184 } 1185 1186 ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE); 1187 /* 1188 * AEN requests are special as they don't time out and can 1189 * survive any kind of queue freeze and often don't respond to 1190 * aborts. We don't even bother to allocate a struct request 1191 * for them but rather special case them here. 1192 */ 1193 if (unlikely(nvme_rdma_queue_idx(queue) == 0 && 1194 cqe->command_id >= NVME_RDMA_AQ_BLKMQ_DEPTH)) 1195 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, 1196 &cqe->result); 1197 else 1198 ret = nvme_rdma_process_nvme_rsp(queue, cqe, wc, tag); 1199 ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE); 1200 1201 nvme_rdma_post_recv(queue, qe); 1202 return ret; 1203 } 1204 1205 static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1206 { 1207 __nvme_rdma_recv_done(cq, wc, -1); 1208 } 1209 1210 static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue) 1211 { 1212 int ret, i; 1213 1214 for (i = 0; i < queue->queue_size; i++) { 1215 ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]); 1216 if (ret) 1217 goto out_destroy_queue_ib; 1218 } 1219 1220 return 0; 1221 1222 out_destroy_queue_ib: 1223 nvme_rdma_destroy_queue_ib(queue); 1224 return ret; 1225 } 1226 1227 static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue, 1228 struct rdma_cm_event *ev) 1229 { 1230 struct rdma_cm_id *cm_id = queue->cm_id; 1231 int status = ev->status; 1232 const char *rej_msg; 1233 const struct nvme_rdma_cm_rej *rej_data; 1234 u8 rej_data_len; 1235 1236 rej_msg = rdma_reject_msg(cm_id, status); 1237 rej_data = rdma_consumer_reject_data(cm_id, ev, &rej_data_len); 1238 1239 if (rej_data && rej_data_len >= sizeof(u16)) { 1240 u16 sts = le16_to_cpu(rej_data->sts); 1241 1242 dev_err(queue->ctrl->ctrl.device, 1243 "Connect rejected: status %d (%s) nvme status %d (%s).\n", 1244 status, rej_msg, sts, nvme_rdma_cm_msg(sts)); 1245 } else { 1246 dev_err(queue->ctrl->ctrl.device, 1247 "Connect rejected: status %d (%s).\n", status, rej_msg); 1248 } 1249 1250 return -ECONNRESET; 1251 } 1252 1253 static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue) 1254 { 1255 int ret; 1256 1257 ret = nvme_rdma_create_queue_ib(queue); 1258 if (ret) 1259 return ret; 1260 1261 ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS); 1262 if (ret) { 1263 dev_err(queue->ctrl->ctrl.device, 1264 "rdma_resolve_route failed (%d).\n", 1265 queue->cm_error); 1266 goto out_destroy_queue; 1267 } 1268 1269 return 0; 1270 1271 out_destroy_queue: 1272 nvme_rdma_destroy_queue_ib(queue); 1273 return ret; 1274 } 1275 1276 static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) 1277 { 1278 struct nvme_rdma_ctrl *ctrl = queue->ctrl; 1279 struct rdma_conn_param param = { }; 1280 struct nvme_rdma_cm_req priv = { }; 1281 int ret; 1282 1283 param.qp_num = queue->qp->qp_num; 1284 param.flow_control = 1; 1285 1286 param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom; 1287 /* maximum retry count */ 1288 param.retry_count = 7; 1289 param.rnr_retry_count = 7; 1290 param.private_data = &priv; 1291 param.private_data_len = sizeof(priv); 1292 1293 priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); 1294 priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue)); 1295 /* 1296 * set the admin queue depth to the minimum size 1297 * specified by the Fabrics standard. 1298 */ 1299 if (priv.qid == 0) { 1300 priv.hrqsize = cpu_to_le16(NVME_AQ_DEPTH); 1301 priv.hsqsize = cpu_to_le16(NVME_AQ_DEPTH - 1); 1302 } else { 1303 /* 1304 * current interpretation of the fabrics spec 1305 * is at minimum you make hrqsize sqsize+1, or a 1306 * 1's based representation of sqsize. 1307 */ 1308 priv.hrqsize = cpu_to_le16(queue->queue_size); 1309 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); 1310 } 1311 1312 ret = rdma_connect(queue->cm_id, ¶m); 1313 if (ret) { 1314 dev_err(ctrl->ctrl.device, 1315 "rdma_connect failed (%d).\n", ret); 1316 goto out_destroy_queue_ib; 1317 } 1318 1319 return 0; 1320 1321 out_destroy_queue_ib: 1322 nvme_rdma_destroy_queue_ib(queue); 1323 return ret; 1324 } 1325 1326 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, 1327 struct rdma_cm_event *ev) 1328 { 1329 struct nvme_rdma_queue *queue = cm_id->context; 1330 int cm_error = 0; 1331 1332 dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n", 1333 rdma_event_msg(ev->event), ev->event, 1334 ev->status, cm_id); 1335 1336 switch (ev->event) { 1337 case RDMA_CM_EVENT_ADDR_RESOLVED: 1338 cm_error = nvme_rdma_addr_resolved(queue); 1339 break; 1340 case RDMA_CM_EVENT_ROUTE_RESOLVED: 1341 cm_error = nvme_rdma_route_resolved(queue); 1342 break; 1343 case RDMA_CM_EVENT_ESTABLISHED: 1344 queue->cm_error = nvme_rdma_conn_established(queue); 1345 /* complete cm_done regardless of success/failure */ 1346 complete(&queue->cm_done); 1347 return 0; 1348 case RDMA_CM_EVENT_REJECTED: 1349 nvme_rdma_destroy_queue_ib(queue); 1350 cm_error = nvme_rdma_conn_rejected(queue, ev); 1351 break; 1352 case RDMA_CM_EVENT_ROUTE_ERROR: 1353 case RDMA_CM_EVENT_CONNECT_ERROR: 1354 case RDMA_CM_EVENT_UNREACHABLE: 1355 nvme_rdma_destroy_queue_ib(queue); 1356 case RDMA_CM_EVENT_ADDR_ERROR: 1357 dev_dbg(queue->ctrl->ctrl.device, 1358 "CM error event %d\n", ev->event); 1359 cm_error = -ECONNRESET; 1360 break; 1361 case RDMA_CM_EVENT_DISCONNECTED: 1362 case RDMA_CM_EVENT_ADDR_CHANGE: 1363 case RDMA_CM_EVENT_TIMEWAIT_EXIT: 1364 dev_dbg(queue->ctrl->ctrl.device, 1365 "disconnect received - connection closed\n"); 1366 nvme_rdma_error_recovery(queue->ctrl); 1367 break; 1368 case RDMA_CM_EVENT_DEVICE_REMOVAL: 1369 /* device removal is handled via the ib_client API */ 1370 break; 1371 default: 1372 dev_err(queue->ctrl->ctrl.device, 1373 "Unexpected RDMA CM event (%d)\n", ev->event); 1374 nvme_rdma_error_recovery(queue->ctrl); 1375 break; 1376 } 1377 1378 if (cm_error) { 1379 queue->cm_error = cm_error; 1380 complete(&queue->cm_done); 1381 } 1382 1383 return 0; 1384 } 1385 1386 static enum blk_eh_timer_return 1387 nvme_rdma_timeout(struct request *rq, bool reserved) 1388 { 1389 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 1390 1391 /* queue error recovery */ 1392 nvme_rdma_error_recovery(req->queue->ctrl); 1393 1394 /* fail with DNR on cmd timeout */ 1395 nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR; 1396 1397 return BLK_EH_HANDLED; 1398 } 1399 1400 /* 1401 * We cannot accept any other command until the Connect command has completed. 1402 */ 1403 static inline blk_status_t 1404 nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq) 1405 { 1406 if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { 1407 struct nvme_command *cmd = nvme_req(rq)->cmd; 1408 1409 if (!blk_rq_is_passthrough(rq) || 1410 cmd->common.opcode != nvme_fabrics_command || 1411 cmd->fabrics.fctype != nvme_fabrics_type_connect) { 1412 /* 1413 * reconnecting state means transport disruption, which 1414 * can take a long time and even might fail permanently, 1415 * so we can't let incoming I/O be requeued forever. 1416 * fail it fast to allow upper layers a chance to 1417 * failover. 1418 */ 1419 if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING) 1420 return BLK_STS_IOERR; 1421 return BLK_STS_RESOURCE; /* try again later */ 1422 } 1423 } 1424 1425 return 0; 1426 } 1427 1428 static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, 1429 const struct blk_mq_queue_data *bd) 1430 { 1431 struct nvme_ns *ns = hctx->queue->queuedata; 1432 struct nvme_rdma_queue *queue = hctx->driver_data; 1433 struct request *rq = bd->rq; 1434 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 1435 struct nvme_rdma_qe *sqe = &req->sqe; 1436 struct nvme_command *c = sqe->data; 1437 bool flush = false; 1438 struct ib_device *dev; 1439 blk_status_t ret; 1440 int err; 1441 1442 WARN_ON_ONCE(rq->tag < 0); 1443 1444 ret = nvme_rdma_queue_is_ready(queue, rq); 1445 if (unlikely(ret)) 1446 return ret; 1447 1448 dev = queue->device->dev; 1449 ib_dma_sync_single_for_cpu(dev, sqe->dma, 1450 sizeof(struct nvme_command), DMA_TO_DEVICE); 1451 1452 ret = nvme_setup_cmd(ns, rq, c); 1453 if (ret) 1454 return ret; 1455 1456 blk_mq_start_request(rq); 1457 1458 err = nvme_rdma_map_data(queue, rq, c); 1459 if (err < 0) { 1460 dev_err(queue->ctrl->ctrl.device, 1461 "Failed to map data (%d)\n", err); 1462 nvme_cleanup_cmd(rq); 1463 goto err; 1464 } 1465 1466 ib_dma_sync_single_for_device(dev, sqe->dma, 1467 sizeof(struct nvme_command), DMA_TO_DEVICE); 1468 1469 if (req_op(rq) == REQ_OP_FLUSH) 1470 flush = true; 1471 err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, 1472 req->mr->need_inval ? &req->reg_wr.wr : NULL, flush); 1473 if (err) { 1474 nvme_rdma_unmap_data(queue, rq); 1475 goto err; 1476 } 1477 1478 return BLK_STS_OK; 1479 err: 1480 if (err == -ENOMEM || err == -EAGAIN) 1481 return BLK_STS_RESOURCE; 1482 return BLK_STS_IOERR; 1483 } 1484 1485 static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) 1486 { 1487 struct nvme_rdma_queue *queue = hctx->driver_data; 1488 struct ib_cq *cq = queue->ib_cq; 1489 struct ib_wc wc; 1490 int found = 0; 1491 1492 while (ib_poll_cq(cq, 1, &wc) > 0) { 1493 struct ib_cqe *cqe = wc.wr_cqe; 1494 1495 if (cqe) { 1496 if (cqe->done == nvme_rdma_recv_done) 1497 found |= __nvme_rdma_recv_done(cq, &wc, tag); 1498 else 1499 cqe->done(cq, &wc); 1500 } 1501 } 1502 1503 return found; 1504 } 1505 1506 static void nvme_rdma_complete_rq(struct request *rq) 1507 { 1508 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 1509 1510 nvme_rdma_unmap_data(req->queue, rq); 1511 nvme_complete_rq(rq); 1512 } 1513 1514 static int nvme_rdma_map_queues(struct blk_mq_tag_set *set) 1515 { 1516 struct nvme_rdma_ctrl *ctrl = set->driver_data; 1517 1518 return blk_mq_rdma_map_queues(set, ctrl->device->dev, 0); 1519 } 1520 1521 static const struct blk_mq_ops nvme_rdma_mq_ops = { 1522 .queue_rq = nvme_rdma_queue_rq, 1523 .complete = nvme_rdma_complete_rq, 1524 .init_request = nvme_rdma_init_request, 1525 .exit_request = nvme_rdma_exit_request, 1526 .init_hctx = nvme_rdma_init_hctx, 1527 .poll = nvme_rdma_poll, 1528 .timeout = nvme_rdma_timeout, 1529 .map_queues = nvme_rdma_map_queues, 1530 }; 1531 1532 static const struct blk_mq_ops nvme_rdma_admin_mq_ops = { 1533 .queue_rq = nvme_rdma_queue_rq, 1534 .complete = nvme_rdma_complete_rq, 1535 .init_request = nvme_rdma_init_request, 1536 .exit_request = nvme_rdma_exit_request, 1537 .init_hctx = nvme_rdma_init_admin_hctx, 1538 .timeout = nvme_rdma_timeout, 1539 }; 1540 1541 static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl) 1542 { 1543 int error; 1544 1545 error = nvme_rdma_init_queue(ctrl, 0, NVME_AQ_DEPTH); 1546 if (error) 1547 return error; 1548 1549 ctrl->device = ctrl->queues[0].device; 1550 1551 /* 1552 * We need a reference on the device as long as the tag_set is alive, 1553 * as the MRs in the request structures need a valid ib_device. 1554 */ 1555 error = -EINVAL; 1556 if (!nvme_rdma_dev_get(ctrl->device)) 1557 goto out_free_queue; 1558 1559 ctrl->max_fr_pages = min_t(u32, NVME_RDMA_MAX_SEGMENTS, 1560 ctrl->device->dev->attrs.max_fast_reg_page_list_len); 1561 1562 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); 1563 ctrl->admin_tag_set.ops = &nvme_rdma_admin_mq_ops; 1564 ctrl->admin_tag_set.queue_depth = NVME_RDMA_AQ_BLKMQ_DEPTH; 1565 ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */ 1566 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE; 1567 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_rdma_request) + 1568 SG_CHUNK_SIZE * sizeof(struct scatterlist); 1569 ctrl->admin_tag_set.driver_data = ctrl; 1570 ctrl->admin_tag_set.nr_hw_queues = 1; 1571 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; 1572 1573 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); 1574 if (error) 1575 goto out_put_dev; 1576 1577 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); 1578 if (IS_ERR(ctrl->ctrl.admin_q)) { 1579 error = PTR_ERR(ctrl->ctrl.admin_q); 1580 goto out_free_tagset; 1581 } 1582 1583 error = nvmf_connect_admin_queue(&ctrl->ctrl); 1584 if (error) 1585 goto out_cleanup_queue; 1586 1587 set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); 1588 1589 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, 1590 &ctrl->ctrl.cap); 1591 if (error) { 1592 dev_err(ctrl->ctrl.device, 1593 "prop_get NVME_REG_CAP failed\n"); 1594 goto out_cleanup_queue; 1595 } 1596 1597 ctrl->ctrl.sqsize = 1598 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize); 1599 1600 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); 1601 if (error) 1602 goto out_cleanup_queue; 1603 1604 ctrl->ctrl.max_hw_sectors = 1605 (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9); 1606 1607 error = nvme_init_identify(&ctrl->ctrl); 1608 if (error) 1609 goto out_cleanup_queue; 1610 1611 error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev, 1612 &ctrl->async_event_sqe, sizeof(struct nvme_command), 1613 DMA_TO_DEVICE); 1614 if (error) 1615 goto out_cleanup_queue; 1616 1617 return 0; 1618 1619 out_cleanup_queue: 1620 blk_cleanup_queue(ctrl->ctrl.admin_q); 1621 out_free_tagset: 1622 /* disconnect and drain the queue before freeing the tagset */ 1623 nvme_rdma_stop_queue(&ctrl->queues[0]); 1624 blk_mq_free_tag_set(&ctrl->admin_tag_set); 1625 out_put_dev: 1626 nvme_rdma_dev_put(ctrl->device); 1627 out_free_queue: 1628 nvme_rdma_free_queue(&ctrl->queues[0]); 1629 return error; 1630 } 1631 1632 static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl) 1633 { 1634 cancel_work_sync(&ctrl->err_work); 1635 cancel_delayed_work_sync(&ctrl->reconnect_work); 1636 1637 if (ctrl->ctrl.queue_count > 1) { 1638 nvme_stop_queues(&ctrl->ctrl); 1639 blk_mq_tagset_busy_iter(&ctrl->tag_set, 1640 nvme_cancel_request, &ctrl->ctrl); 1641 nvme_rdma_free_io_queues(ctrl); 1642 } 1643 1644 if (test_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags)) 1645 nvme_shutdown_ctrl(&ctrl->ctrl); 1646 1647 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); 1648 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 1649 nvme_cancel_request, &ctrl->ctrl); 1650 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 1651 nvme_rdma_destroy_admin_queue(ctrl); 1652 } 1653 1654 static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) 1655 { 1656 nvme_stop_ctrl(&ctrl->ctrl); 1657 nvme_remove_namespaces(&ctrl->ctrl); 1658 if (shutdown) 1659 nvme_rdma_shutdown_ctrl(ctrl); 1660 1661 nvme_uninit_ctrl(&ctrl->ctrl); 1662 if (ctrl->ctrl.tagset) { 1663 blk_cleanup_queue(ctrl->ctrl.connect_q); 1664 blk_mq_free_tag_set(&ctrl->tag_set); 1665 nvme_rdma_dev_put(ctrl->device); 1666 } 1667 1668 nvme_put_ctrl(&ctrl->ctrl); 1669 } 1670 1671 static void nvme_rdma_del_ctrl_work(struct work_struct *work) 1672 { 1673 struct nvme_rdma_ctrl *ctrl = container_of(work, 1674 struct nvme_rdma_ctrl, delete_work); 1675 1676 __nvme_rdma_remove_ctrl(ctrl, true); 1677 } 1678 1679 static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl) 1680 { 1681 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING)) 1682 return -EBUSY; 1683 1684 if (!queue_work(nvme_wq, &ctrl->delete_work)) 1685 return -EBUSY; 1686 1687 return 0; 1688 } 1689 1690 static int nvme_rdma_del_ctrl(struct nvme_ctrl *nctrl) 1691 { 1692 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); 1693 int ret = 0; 1694 1695 /* 1696 * Keep a reference until all work is flushed since 1697 * __nvme_rdma_del_ctrl can free the ctrl mem 1698 */ 1699 if (!kref_get_unless_zero(&ctrl->ctrl.kref)) 1700 return -EBUSY; 1701 ret = __nvme_rdma_del_ctrl(ctrl); 1702 if (!ret) 1703 flush_work(&ctrl->delete_work); 1704 nvme_put_ctrl(&ctrl->ctrl); 1705 return ret; 1706 } 1707 1708 static void nvme_rdma_remove_ctrl_work(struct work_struct *work) 1709 { 1710 struct nvme_rdma_ctrl *ctrl = container_of(work, 1711 struct nvme_rdma_ctrl, delete_work); 1712 1713 __nvme_rdma_remove_ctrl(ctrl, false); 1714 } 1715 1716 static void nvme_rdma_reset_ctrl_work(struct work_struct *work) 1717 { 1718 struct nvme_rdma_ctrl *ctrl = 1719 container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work); 1720 int ret; 1721 bool changed; 1722 1723 nvme_stop_ctrl(&ctrl->ctrl); 1724 nvme_rdma_shutdown_ctrl(ctrl); 1725 1726 ret = nvme_rdma_configure_admin_queue(ctrl); 1727 if (ret) { 1728 /* ctrl is already shutdown, just remove the ctrl */ 1729 INIT_WORK(&ctrl->delete_work, nvme_rdma_remove_ctrl_work); 1730 goto del_dead_ctrl; 1731 } 1732 1733 if (ctrl->ctrl.queue_count > 1) { 1734 ret = blk_mq_reinit_tagset(&ctrl->tag_set, 1735 nvme_rdma_reinit_request); 1736 if (ret) 1737 goto del_dead_ctrl; 1738 1739 ret = nvme_rdma_init_io_queues(ctrl); 1740 if (ret) 1741 goto del_dead_ctrl; 1742 1743 ret = nvme_rdma_connect_io_queues(ctrl); 1744 if (ret) 1745 goto del_dead_ctrl; 1746 1747 blk_mq_update_nr_hw_queues(&ctrl->tag_set, 1748 ctrl->ctrl.queue_count - 1); 1749 } 1750 1751 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 1752 WARN_ON_ONCE(!changed); 1753 1754 nvme_start_ctrl(&ctrl->ctrl); 1755 1756 return; 1757 1758 del_dead_ctrl: 1759 /* Deleting this dead controller... */ 1760 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); 1761 WARN_ON(!queue_work(nvme_wq, &ctrl->delete_work)); 1762 } 1763 1764 static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { 1765 .name = "rdma", 1766 .module = THIS_MODULE, 1767 .flags = NVME_F_FABRICS, 1768 .reg_read32 = nvmf_reg_read32, 1769 .reg_read64 = nvmf_reg_read64, 1770 .reg_write32 = nvmf_reg_write32, 1771 .free_ctrl = nvme_rdma_free_ctrl, 1772 .submit_async_event = nvme_rdma_submit_async_event, 1773 .delete_ctrl = nvme_rdma_del_ctrl, 1774 .get_address = nvmf_get_address, 1775 }; 1776 1777 static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl) 1778 { 1779 int ret; 1780 1781 ret = nvme_rdma_init_io_queues(ctrl); 1782 if (ret) 1783 return ret; 1784 1785 /* 1786 * We need a reference on the device as long as the tag_set is alive, 1787 * as the MRs in the request structures need a valid ib_device. 1788 */ 1789 ret = -EINVAL; 1790 if (!nvme_rdma_dev_get(ctrl->device)) 1791 goto out_free_io_queues; 1792 1793 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); 1794 ctrl->tag_set.ops = &nvme_rdma_mq_ops; 1795 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; 1796 ctrl->tag_set.reserved_tags = 1; /* fabric connect */ 1797 ctrl->tag_set.numa_node = NUMA_NO_NODE; 1798 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 1799 ctrl->tag_set.cmd_size = sizeof(struct nvme_rdma_request) + 1800 SG_CHUNK_SIZE * sizeof(struct scatterlist); 1801 ctrl->tag_set.driver_data = ctrl; 1802 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1; 1803 ctrl->tag_set.timeout = NVME_IO_TIMEOUT; 1804 1805 ret = blk_mq_alloc_tag_set(&ctrl->tag_set); 1806 if (ret) 1807 goto out_put_dev; 1808 ctrl->ctrl.tagset = &ctrl->tag_set; 1809 1810 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); 1811 if (IS_ERR(ctrl->ctrl.connect_q)) { 1812 ret = PTR_ERR(ctrl->ctrl.connect_q); 1813 goto out_free_tag_set; 1814 } 1815 1816 ret = nvme_rdma_connect_io_queues(ctrl); 1817 if (ret) 1818 goto out_cleanup_connect_q; 1819 1820 return 0; 1821 1822 out_cleanup_connect_q: 1823 blk_cleanup_queue(ctrl->ctrl.connect_q); 1824 out_free_tag_set: 1825 blk_mq_free_tag_set(&ctrl->tag_set); 1826 out_put_dev: 1827 nvme_rdma_dev_put(ctrl->device); 1828 out_free_io_queues: 1829 nvme_rdma_free_io_queues(ctrl); 1830 return ret; 1831 } 1832 1833 static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, 1834 struct nvmf_ctrl_options *opts) 1835 { 1836 struct nvme_rdma_ctrl *ctrl; 1837 int ret; 1838 bool changed; 1839 char *port; 1840 1841 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 1842 if (!ctrl) 1843 return ERR_PTR(-ENOMEM); 1844 ctrl->ctrl.opts = opts; 1845 INIT_LIST_HEAD(&ctrl->list); 1846 1847 if (opts->mask & NVMF_OPT_TRSVCID) 1848 port = opts->trsvcid; 1849 else 1850 port = __stringify(NVME_RDMA_IP_PORT); 1851 1852 ret = inet_pton_with_scope(&init_net, AF_UNSPEC, 1853 opts->traddr, port, &ctrl->addr); 1854 if (ret) { 1855 pr_err("malformed address passed: %s:%s\n", opts->traddr, port); 1856 goto out_free_ctrl; 1857 } 1858 1859 if (opts->mask & NVMF_OPT_HOST_TRADDR) { 1860 ret = inet_pton_with_scope(&init_net, AF_UNSPEC, 1861 opts->host_traddr, NULL, &ctrl->src_addr); 1862 if (ret) { 1863 pr_err("malformed src address passed: %s\n", 1864 opts->host_traddr); 1865 goto out_free_ctrl; 1866 } 1867 } 1868 1869 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops, 1870 0 /* no quirks, we're perfect! */); 1871 if (ret) 1872 goto out_free_ctrl; 1873 1874 INIT_DELAYED_WORK(&ctrl->reconnect_work, 1875 nvme_rdma_reconnect_ctrl_work); 1876 INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work); 1877 INIT_WORK(&ctrl->delete_work, nvme_rdma_del_ctrl_work); 1878 INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work); 1879 1880 ctrl->ctrl.queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */ 1881 ctrl->ctrl.sqsize = opts->queue_size - 1; 1882 ctrl->ctrl.kato = opts->kato; 1883 1884 ret = -ENOMEM; 1885 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), 1886 GFP_KERNEL); 1887 if (!ctrl->queues) 1888 goto out_uninit_ctrl; 1889 1890 ret = nvme_rdma_configure_admin_queue(ctrl); 1891 if (ret) 1892 goto out_kfree_queues; 1893 1894 /* sanity check icdoff */ 1895 if (ctrl->ctrl.icdoff) { 1896 dev_err(ctrl->ctrl.device, "icdoff is not supported!\n"); 1897 ret = -EINVAL; 1898 goto out_remove_admin_queue; 1899 } 1900 1901 /* sanity check keyed sgls */ 1902 if (!(ctrl->ctrl.sgls & (1 << 20))) { 1903 dev_err(ctrl->ctrl.device, "Mandatory keyed sgls are not support\n"); 1904 ret = -EINVAL; 1905 goto out_remove_admin_queue; 1906 } 1907 1908 if (opts->queue_size > ctrl->ctrl.maxcmd) { 1909 /* warn if maxcmd is lower than queue_size */ 1910 dev_warn(ctrl->ctrl.device, 1911 "queue_size %zu > ctrl maxcmd %u, clamping down\n", 1912 opts->queue_size, ctrl->ctrl.maxcmd); 1913 opts->queue_size = ctrl->ctrl.maxcmd; 1914 } 1915 1916 if (opts->queue_size > ctrl->ctrl.sqsize + 1) { 1917 /* warn if sqsize is lower than queue_size */ 1918 dev_warn(ctrl->ctrl.device, 1919 "queue_size %zu > ctrl sqsize %u, clamping down\n", 1920 opts->queue_size, ctrl->ctrl.sqsize + 1); 1921 opts->queue_size = ctrl->ctrl.sqsize + 1; 1922 } 1923 1924 if (opts->nr_io_queues) { 1925 ret = nvme_rdma_create_io_queues(ctrl); 1926 if (ret) 1927 goto out_remove_admin_queue; 1928 } 1929 1930 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 1931 WARN_ON_ONCE(!changed); 1932 1933 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n", 1934 ctrl->ctrl.opts->subsysnqn, &ctrl->addr); 1935 1936 kref_get(&ctrl->ctrl.kref); 1937 1938 mutex_lock(&nvme_rdma_ctrl_mutex); 1939 list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); 1940 mutex_unlock(&nvme_rdma_ctrl_mutex); 1941 1942 nvme_start_ctrl(&ctrl->ctrl); 1943 1944 return &ctrl->ctrl; 1945 1946 out_remove_admin_queue: 1947 nvme_rdma_destroy_admin_queue(ctrl); 1948 out_kfree_queues: 1949 kfree(ctrl->queues); 1950 out_uninit_ctrl: 1951 nvme_uninit_ctrl(&ctrl->ctrl); 1952 nvme_put_ctrl(&ctrl->ctrl); 1953 if (ret > 0) 1954 ret = -EIO; 1955 return ERR_PTR(ret); 1956 out_free_ctrl: 1957 kfree(ctrl); 1958 return ERR_PTR(ret); 1959 } 1960 1961 static struct nvmf_transport_ops nvme_rdma_transport = { 1962 .name = "rdma", 1963 .required_opts = NVMF_OPT_TRADDR, 1964 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY | 1965 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO, 1966 .create_ctrl = nvme_rdma_create_ctrl, 1967 }; 1968 1969 static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data) 1970 { 1971 struct nvme_rdma_ctrl *ctrl; 1972 1973 /* Delete all controllers using this device */ 1974 mutex_lock(&nvme_rdma_ctrl_mutex); 1975 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) { 1976 if (ctrl->device->dev != ib_device) 1977 continue; 1978 dev_info(ctrl->ctrl.device, 1979 "Removing ctrl: NQN \"%s\", addr %pISp\n", 1980 ctrl->ctrl.opts->subsysnqn, &ctrl->addr); 1981 __nvme_rdma_del_ctrl(ctrl); 1982 } 1983 mutex_unlock(&nvme_rdma_ctrl_mutex); 1984 1985 flush_workqueue(nvme_wq); 1986 } 1987 1988 static struct ib_client nvme_rdma_ib_client = { 1989 .name = "nvme_rdma", 1990 .remove = nvme_rdma_remove_one 1991 }; 1992 1993 static int __init nvme_rdma_init_module(void) 1994 { 1995 int ret; 1996 1997 ret = ib_register_client(&nvme_rdma_ib_client); 1998 if (ret) 1999 return ret; 2000 2001 ret = nvmf_register_transport(&nvme_rdma_transport); 2002 if (ret) 2003 goto err_unreg_client; 2004 2005 return 0; 2006 2007 err_unreg_client: 2008 ib_unregister_client(&nvme_rdma_ib_client); 2009 return ret; 2010 } 2011 2012 static void __exit nvme_rdma_cleanup_module(void) 2013 { 2014 nvmf_unregister_transport(&nvme_rdma_transport); 2015 ib_unregister_client(&nvme_rdma_ib_client); 2016 } 2017 2018 module_init(nvme_rdma_init_module); 2019 module_exit(nvme_rdma_cleanup_module); 2020 2021 MODULE_LICENSE("GPL v2"); 2022