1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe over Fabrics RDMA target. 4 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/atomic.h> 8 #include <linux/blk-integrity.h> 9 #include <linux/ctype.h> 10 #include <linux/delay.h> 11 #include <linux/err.h> 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/nvme.h> 15 #include <linux/slab.h> 16 #include <linux/string.h> 17 #include <linux/wait.h> 18 #include <linux/inet.h> 19 #include <linux/unaligned.h> 20 21 #include <rdma/ib_verbs.h> 22 #include <rdma/rdma_cm.h> 23 #include <rdma/rw.h> 24 #include <rdma/ib_cm.h> 25 26 #include <linux/nvme-rdma.h> 27 #include "nvmet.h" 28 29 /* 30 * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data 31 */ 32 #define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE PAGE_SIZE 33 #define NVMET_RDMA_MAX_INLINE_SGE 4 34 #define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE) 35 36 /* Assume mpsmin == device_page_size == 4KB */ 37 #define NVMET_RDMA_MAX_MDTS 8 38 #define NVMET_RDMA_MAX_METADATA_MDTS 5 39 40 #define NVMET_RDMA_BACKLOG 128 41 42 #define NVMET_RDMA_DISCRETE_RSP_TAG -1 43 44 struct nvmet_rdma_srq; 45 46 struct nvmet_rdma_cmd { 47 struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1]; 48 struct ib_cqe cqe; 49 struct ib_recv_wr wr; 50 struct scatterlist inline_sg[NVMET_RDMA_MAX_INLINE_SGE]; 51 struct nvme_command *nvme_cmd; 52 struct nvmet_rdma_queue *queue; 53 struct nvmet_rdma_srq *nsrq; 54 }; 55 56 enum { 57 NVMET_RDMA_REQ_INLINE_DATA = (1 << 0), 58 }; 59 60 struct nvmet_rdma_rsp { 61 struct ib_sge send_sge; 62 struct ib_cqe send_cqe; 63 struct ib_send_wr send_wr; 64 65 struct nvmet_rdma_cmd *cmd; 66 struct nvmet_rdma_queue *queue; 67 68 struct ib_cqe read_cqe; 69 struct ib_cqe write_cqe; 70 struct rdma_rw_ctx rw; 71 72 struct nvmet_req req; 73 74 bool allocated; 75 u8 n_rdma; 76 u32 flags; 77 u32 invalidate_rkey; 78 79 struct list_head wait_list; 80 int tag; 81 }; 82 83 enum nvmet_rdma_queue_state { 84 NVMET_RDMA_Q_CONNECTING, 85 NVMET_RDMA_Q_LIVE, 86 NVMET_RDMA_Q_DISCONNECTING, 87 }; 88 89 struct nvmet_rdma_queue { 90 struct rdma_cm_id *cm_id; 91 struct ib_qp *qp; 92 struct nvmet_port *port; 93 struct ib_cq *cq; 94 atomic_t sq_wr_avail; 95 struct nvmet_rdma_device *dev; 96 struct nvmet_rdma_srq *nsrq; 97 spinlock_t state_lock; 98 enum nvmet_rdma_queue_state state; 99 struct nvmet_cq nvme_cq; 100 struct nvmet_sq nvme_sq; 101 102 struct nvmet_rdma_rsp *rsps; 103 struct sbitmap rsp_tags; 104 struct nvmet_rdma_cmd *cmds; 105 106 struct work_struct release_work; 107 struct list_head rsp_wait_list; 108 struct list_head rsp_wr_wait_list; 109 spinlock_t rsp_wr_wait_lock; 110 111 int idx; 112 int host_qid; 113 int comp_vector; 114 int recv_queue_size; 115 int send_queue_size; 116 117 struct list_head queue_list; 118 }; 119 120 struct nvmet_rdma_port { 121 struct nvmet_port *nport; 122 struct sockaddr_storage addr; 123 struct rdma_cm_id *cm_id; 124 struct delayed_work repair_work; 125 }; 126 127 struct nvmet_rdma_srq { 128 struct ib_srq *srq; 129 struct nvmet_rdma_cmd *cmds; 130 struct nvmet_rdma_device *ndev; 131 }; 132 133 struct nvmet_rdma_device { 134 struct ib_device *device; 135 struct ib_pd *pd; 136 struct nvmet_rdma_srq **srqs; 137 int srq_count; 138 size_t srq_size; 139 struct kref ref; 140 struct list_head entry; 141 int inline_data_size; 142 int inline_page_count; 143 }; 144 145 static bool nvmet_rdma_use_srq; 146 module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); 147 MODULE_PARM_DESC(use_srq, "Use shared receive queue."); 148 149 static int srq_size_set(const char *val, const struct kernel_param *kp); 150 static const struct kernel_param_ops srq_size_ops = { 151 .set = srq_size_set, 152 .get = param_get_int, 153 }; 154 155 static int nvmet_rdma_srq_size = 1024; 156 module_param_cb(srq_size, &srq_size_ops, &nvmet_rdma_srq_size, 0644); 157 MODULE_PARM_DESC(srq_size, "set Shared Receive Queue (SRQ) size, should >= 256 (default: 1024)"); 158 159 static DEFINE_IDA(nvmet_rdma_queue_ida); 160 static LIST_HEAD(nvmet_rdma_queue_list); 161 static DEFINE_MUTEX(nvmet_rdma_queue_mutex); 162 163 static LIST_HEAD(device_list); 164 static DEFINE_MUTEX(device_list_mutex); 165 166 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp); 167 static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc); 168 static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); 169 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); 170 static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc); 171 static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); 172 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); 173 static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, 174 struct nvmet_rdma_rsp *r); 175 static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, 176 struct nvmet_rdma_rsp *r, 177 int tag); 178 179 static const struct nvmet_fabrics_ops nvmet_rdma_ops; 180 181 static int srq_size_set(const char *val, const struct kernel_param *kp) 182 { 183 int n = 0, ret; 184 185 ret = kstrtoint(val, 10, &n); 186 if (ret != 0 || n < 256) 187 return -EINVAL; 188 189 return param_set_int(val, kp); 190 } 191 192 static int num_pages(int len) 193 { 194 return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT); 195 } 196 197 static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp) 198 { 199 return nvme_is_write(rsp->req.cmd) && 200 rsp->req.transfer_len && 201 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); 202 } 203 204 static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp) 205 { 206 return !nvme_is_write(rsp->req.cmd) && 207 rsp->req.transfer_len && 208 !rsp->req.cqe->status && 209 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); 210 } 211 212 static inline struct nvmet_rdma_rsp * 213 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) 214 { 215 struct nvmet_rdma_rsp *rsp = NULL; 216 int tag; 217 218 tag = sbitmap_get(&queue->rsp_tags); 219 if (tag >= 0) 220 rsp = &queue->rsps[tag]; 221 222 if (unlikely(!rsp)) { 223 int ret; 224 225 rsp = kzalloc_obj(*rsp); 226 if (unlikely(!rsp)) 227 return NULL; 228 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp, 229 NVMET_RDMA_DISCRETE_RSP_TAG); 230 if (unlikely(ret)) { 231 kfree(rsp); 232 return NULL; 233 } 234 } 235 236 return rsp; 237 } 238 239 static inline void 240 nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) 241 { 242 if (unlikely(rsp->tag == NVMET_RDMA_DISCRETE_RSP_TAG)) { 243 nvmet_rdma_free_rsp(rsp->queue->dev, rsp); 244 kfree(rsp); 245 return; 246 } 247 248 sbitmap_clear_bit(&rsp->queue->rsp_tags, rsp->tag); 249 } 250 251 static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev, 252 struct nvmet_rdma_cmd *c) 253 { 254 struct scatterlist *sg; 255 struct ib_sge *sge; 256 int i; 257 258 if (!ndev->inline_data_size) 259 return; 260 261 sg = c->inline_sg; 262 sge = &c->sge[1]; 263 264 for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { 265 if (sge->length) 266 ib_dma_unmap_page(ndev->device, sge->addr, 267 sge->length, DMA_FROM_DEVICE); 268 if (sg_page(sg)) 269 __free_page(sg_page(sg)); 270 } 271 } 272 273 static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev, 274 struct nvmet_rdma_cmd *c) 275 { 276 struct scatterlist *sg; 277 struct ib_sge *sge; 278 struct page *pg; 279 int len; 280 int i; 281 282 if (!ndev->inline_data_size) 283 return 0; 284 285 sg = c->inline_sg; 286 sg_init_table(sg, ndev->inline_page_count); 287 sge = &c->sge[1]; 288 len = ndev->inline_data_size; 289 290 for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { 291 pg = alloc_page(GFP_KERNEL); 292 if (!pg) 293 goto out_err; 294 sg_assign_page(sg, pg); 295 sge->addr = ib_dma_map_page(ndev->device, 296 pg, 0, PAGE_SIZE, DMA_FROM_DEVICE); 297 if (ib_dma_mapping_error(ndev->device, sge->addr)) 298 goto out_err; 299 sge->length = min_t(int, len, PAGE_SIZE); 300 sge->lkey = ndev->pd->local_dma_lkey; 301 len -= sge->length; 302 } 303 304 return 0; 305 out_err: 306 for (; i >= 0; i--, sg--, sge--) { 307 if (sge->length) 308 ib_dma_unmap_page(ndev->device, sge->addr, 309 sge->length, DMA_FROM_DEVICE); 310 if (sg_page(sg)) 311 __free_page(sg_page(sg)); 312 } 313 return -ENOMEM; 314 } 315 316 static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev, 317 struct nvmet_rdma_cmd *c, bool admin) 318 { 319 /* NVMe command / RDMA RECV */ 320 c->nvme_cmd = kmalloc_obj(*c->nvme_cmd); 321 if (!c->nvme_cmd) 322 goto out; 323 324 c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd, 325 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); 326 if (ib_dma_mapping_error(ndev->device, c->sge[0].addr)) 327 goto out_free_cmd; 328 329 c->sge[0].length = sizeof(*c->nvme_cmd); 330 c->sge[0].lkey = ndev->pd->local_dma_lkey; 331 332 if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c)) 333 goto out_unmap_cmd; 334 335 c->cqe.done = nvmet_rdma_recv_done; 336 337 c->wr.wr_cqe = &c->cqe; 338 c->wr.sg_list = c->sge; 339 c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1; 340 341 return 0; 342 343 out_unmap_cmd: 344 ib_dma_unmap_single(ndev->device, c->sge[0].addr, 345 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); 346 out_free_cmd: 347 kfree(c->nvme_cmd); 348 349 out: 350 return -ENOMEM; 351 } 352 353 static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev, 354 struct nvmet_rdma_cmd *c, bool admin) 355 { 356 if (!admin) 357 nvmet_rdma_free_inline_pages(ndev, c); 358 ib_dma_unmap_single(ndev->device, c->sge[0].addr, 359 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); 360 kfree(c->nvme_cmd); 361 } 362 363 static struct nvmet_rdma_cmd * 364 nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev, 365 int nr_cmds, bool admin) 366 { 367 struct nvmet_rdma_cmd *cmds; 368 int ret = -EINVAL, i; 369 370 cmds = kvzalloc_objs(struct nvmet_rdma_cmd, nr_cmds); 371 if (!cmds) 372 goto out; 373 374 for (i = 0; i < nr_cmds; i++) { 375 ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin); 376 if (ret) 377 goto out_free; 378 } 379 380 return cmds; 381 382 out_free: 383 while (--i >= 0) 384 nvmet_rdma_free_cmd(ndev, cmds + i, admin); 385 kvfree(cmds); 386 out: 387 return ERR_PTR(ret); 388 } 389 390 static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev, 391 struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin) 392 { 393 int i; 394 395 for (i = 0; i < nr_cmds; i++) 396 nvmet_rdma_free_cmd(ndev, cmds + i, admin); 397 kvfree(cmds); 398 } 399 400 static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, 401 struct nvmet_rdma_rsp *r, int tag) 402 { 403 /* NVMe CQE / RDMA SEND */ 404 r->req.cqe = kmalloc_obj(*r->req.cqe); 405 if (!r->req.cqe) 406 goto out; 407 408 r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe, 409 sizeof(*r->req.cqe), DMA_TO_DEVICE); 410 if (ib_dma_mapping_error(ndev->device, r->send_sge.addr)) 411 goto out_free_rsp; 412 413 if (ib_dma_pci_p2p_dma_supported(ndev->device)) 414 r->req.p2p_client = &ndev->device->dev; 415 r->send_sge.length = sizeof(*r->req.cqe); 416 r->send_sge.lkey = ndev->pd->local_dma_lkey; 417 418 r->send_cqe.done = nvmet_rdma_send_done; 419 420 r->send_wr.wr_cqe = &r->send_cqe; 421 r->send_wr.sg_list = &r->send_sge; 422 r->send_wr.num_sge = 1; 423 r->send_wr.send_flags = IB_SEND_SIGNALED; 424 425 /* Data In / RDMA READ */ 426 r->read_cqe.done = nvmet_rdma_read_data_done; 427 /* Data Out / RDMA WRITE */ 428 r->write_cqe.done = nvmet_rdma_write_data_done; 429 r->tag = tag; 430 431 return 0; 432 433 out_free_rsp: 434 kfree(r->req.cqe); 435 out: 436 return -ENOMEM; 437 } 438 439 static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, 440 struct nvmet_rdma_rsp *r) 441 { 442 ib_dma_unmap_single(ndev->device, r->send_sge.addr, 443 sizeof(*r->req.cqe), DMA_TO_DEVICE); 444 kfree(r->req.cqe); 445 } 446 447 static int 448 nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) 449 { 450 struct nvmet_rdma_device *ndev = queue->dev; 451 int nr_rsps = queue->recv_queue_size * 2; 452 int ret = -ENOMEM, i; 453 454 if (sbitmap_init_node(&queue->rsp_tags, nr_rsps, -1, GFP_KERNEL, 455 NUMA_NO_NODE, false, true)) 456 goto out; 457 458 queue->rsps = kvzalloc_objs(struct nvmet_rdma_rsp, nr_rsps); 459 if (!queue->rsps) 460 goto out_free_sbitmap; 461 462 for (i = 0; i < nr_rsps; i++) { 463 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; 464 465 ret = nvmet_rdma_alloc_rsp(ndev, rsp, i); 466 if (ret) 467 goto out_free; 468 } 469 470 return 0; 471 472 out_free: 473 while (--i >= 0) 474 nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); 475 kvfree(queue->rsps); 476 out_free_sbitmap: 477 sbitmap_free(&queue->rsp_tags); 478 out: 479 return ret; 480 } 481 482 static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) 483 { 484 struct nvmet_rdma_device *ndev = queue->dev; 485 int i, nr_rsps = queue->recv_queue_size * 2; 486 487 for (i = 0; i < nr_rsps; i++) 488 nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); 489 kvfree(queue->rsps); 490 sbitmap_free(&queue->rsp_tags); 491 } 492 493 static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, 494 struct nvmet_rdma_cmd *cmd) 495 { 496 int ret; 497 498 ib_dma_sync_single_for_device(ndev->device, 499 cmd->sge[0].addr, cmd->sge[0].length, 500 DMA_FROM_DEVICE); 501 502 if (cmd->nsrq) 503 ret = ib_post_srq_recv(cmd->nsrq->srq, &cmd->wr, NULL); 504 else 505 ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL); 506 507 if (unlikely(ret)) 508 pr_err("post_recv cmd failed\n"); 509 510 return ret; 511 } 512 513 static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue) 514 { 515 spin_lock(&queue->rsp_wr_wait_lock); 516 while (!list_empty(&queue->rsp_wr_wait_list)) { 517 struct nvmet_rdma_rsp *rsp; 518 bool ret; 519 520 rsp = list_entry(queue->rsp_wr_wait_list.next, 521 struct nvmet_rdma_rsp, wait_list); 522 list_del(&rsp->wait_list); 523 524 spin_unlock(&queue->rsp_wr_wait_lock); 525 ret = nvmet_rdma_execute_command(rsp); 526 spin_lock(&queue->rsp_wr_wait_lock); 527 528 if (!ret) { 529 list_add(&rsp->wait_list, &queue->rsp_wr_wait_list); 530 break; 531 } 532 } 533 spin_unlock(&queue->rsp_wr_wait_lock); 534 } 535 536 static u16 nvmet_rdma_check_pi_status(struct ib_mr *sig_mr) 537 { 538 struct ib_mr_status mr_status; 539 int ret; 540 u16 status = 0; 541 542 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); 543 if (ret) { 544 pr_err("ib_check_mr_status failed, ret %d\n", ret); 545 return NVME_SC_INVALID_PI; 546 } 547 548 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { 549 switch (mr_status.sig_err.err_type) { 550 case IB_SIG_BAD_GUARD: 551 status = NVME_SC_GUARD_CHECK; 552 break; 553 case IB_SIG_BAD_REFTAG: 554 status = NVME_SC_REFTAG_CHECK; 555 break; 556 case IB_SIG_BAD_APPTAG: 557 status = NVME_SC_APPTAG_CHECK; 558 break; 559 } 560 pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n", 561 mr_status.sig_err.err_type, 562 mr_status.sig_err.expected, 563 mr_status.sig_err.actual); 564 } 565 566 return status; 567 } 568 569 static void nvmet_rdma_set_sig_domain(struct blk_integrity *bi, 570 struct nvme_command *cmd, struct ib_sig_domain *domain, 571 u16 control, u8 pi_type) 572 { 573 domain->sig_type = IB_SIG_TYPE_T10_DIF; 574 domain->sig.dif.bg_type = IB_T10DIF_CRC; 575 domain->sig.dif.pi_interval = 1 << bi->interval_exp; 576 domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag); 577 if (control & NVME_RW_PRINFO_PRCHK_REF) 578 domain->sig.dif.ref_remap = true; 579 580 domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.lbat); 581 domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.lbatm); 582 domain->sig.dif.app_escape = true; 583 if (pi_type == NVME_NS_DPS_PI_TYPE3) 584 domain->sig.dif.ref_escape = true; 585 } 586 587 static void nvmet_rdma_set_sig_attrs(struct nvmet_req *req, 588 struct ib_sig_attrs *sig_attrs) 589 { 590 struct nvme_command *cmd = req->cmd; 591 u16 control = le16_to_cpu(cmd->rw.control); 592 u8 pi_type = req->ns->pi_type; 593 struct blk_integrity *bi; 594 595 bi = bdev_get_integrity(req->ns->bdev); 596 597 memset(sig_attrs, 0, sizeof(*sig_attrs)); 598 599 if (control & NVME_RW_PRINFO_PRACT) { 600 /* for WRITE_INSERT/READ_STRIP no wire domain */ 601 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; 602 nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control, 603 pi_type); 604 /* Clear the PRACT bit since HCA will generate/verify the PI */ 605 control &= ~NVME_RW_PRINFO_PRACT; 606 cmd->rw.control = cpu_to_le16(control); 607 /* PI is added by the HW */ 608 req->transfer_len += req->metadata_len; 609 } else { 610 /* for WRITE_PASS/READ_PASS both wire/memory domains exist */ 611 nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control, 612 pi_type); 613 nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control, 614 pi_type); 615 } 616 617 if (control & NVME_RW_PRINFO_PRCHK_REF) 618 sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG; 619 if (control & NVME_RW_PRINFO_PRCHK_GUARD) 620 sig_attrs->check_mask |= IB_SIG_CHECK_GUARD; 621 if (control & NVME_RW_PRINFO_PRCHK_APP) 622 sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG; 623 } 624 625 static int nvmet_rdma_rw_ctx_init(struct nvmet_rdma_rsp *rsp, u64 addr, u32 key, 626 struct ib_sig_attrs *sig_attrs) 627 { 628 struct rdma_cm_id *cm_id = rsp->queue->cm_id; 629 struct nvmet_req *req = &rsp->req; 630 int ret; 631 632 if (req->metadata_len) 633 ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp, 634 cm_id->port_num, req->sg, req->sg_cnt, 635 req->metadata_sg, req->metadata_sg_cnt, sig_attrs, 636 addr, key, nvmet_data_dir(req)); 637 else 638 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, 639 req->sg, req->sg_cnt, 0, addr, key, 640 nvmet_data_dir(req)); 641 642 return ret; 643 } 644 645 static void nvmet_rdma_rw_ctx_destroy(struct nvmet_rdma_rsp *rsp) 646 { 647 struct rdma_cm_id *cm_id = rsp->queue->cm_id; 648 struct nvmet_req *req = &rsp->req; 649 650 if (req->metadata_len) 651 rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp, 652 cm_id->port_num, req->sg, req->sg_cnt, 653 req->metadata_sg, req->metadata_sg_cnt, 654 nvmet_data_dir(req)); 655 else 656 rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num, 657 req->sg, req->sg_cnt, nvmet_data_dir(req)); 658 } 659 660 static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) 661 { 662 struct nvmet_rdma_queue *queue = rsp->queue; 663 664 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); 665 666 if (rsp->n_rdma) 667 nvmet_rdma_rw_ctx_destroy(rsp); 668 669 if (rsp->req.sg != rsp->cmd->inline_sg) 670 nvmet_req_free_sgls(&rsp->req); 671 672 if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) 673 nvmet_rdma_process_wr_wait_list(queue); 674 675 nvmet_rdma_put_rsp(rsp); 676 } 677 678 static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue) 679 { 680 if (queue->nvme_sq.ctrl) { 681 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); 682 } else { 683 /* 684 * we didn't setup the controller yet in case 685 * of admin connect error, just disconnect and 686 * cleanup the queue 687 */ 688 nvmet_rdma_queue_disconnect(queue); 689 } 690 } 691 692 static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) 693 { 694 struct nvmet_rdma_rsp *rsp = 695 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); 696 struct nvmet_rdma_queue *queue = wc->qp->qp_context; 697 698 nvmet_rdma_release_rsp(rsp); 699 700 if (unlikely(wc->status != IB_WC_SUCCESS && 701 wc->status != IB_WC_WR_FLUSH_ERR)) { 702 pr_err("SEND for CQE 0x%p failed with status %s (%d).\n", 703 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); 704 nvmet_rdma_error_comp(queue); 705 } 706 } 707 708 static void nvmet_rdma_queue_response(struct nvmet_req *req) 709 { 710 struct nvmet_rdma_rsp *rsp = 711 container_of(req, struct nvmet_rdma_rsp, req); 712 struct rdma_cm_id *cm_id = rsp->queue->cm_id; 713 struct ib_send_wr *first_wr; 714 715 if (rsp->invalidate_rkey) { 716 rsp->send_wr.opcode = IB_WR_SEND_WITH_INV; 717 rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey; 718 } else { 719 rsp->send_wr.opcode = IB_WR_SEND; 720 } 721 722 if (nvmet_rdma_need_data_out(rsp)) { 723 if (rsp->req.metadata_len) 724 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, 725 cm_id->port_num, &rsp->write_cqe, NULL); 726 else 727 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, 728 cm_id->port_num, NULL, &rsp->send_wr); 729 } else { 730 first_wr = &rsp->send_wr; 731 } 732 733 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); 734 735 ib_dma_sync_single_for_device(rsp->queue->dev->device, 736 rsp->send_sge.addr, rsp->send_sge.length, 737 DMA_TO_DEVICE); 738 739 if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) { 740 pr_err("sending cmd response failed\n"); 741 nvmet_rdma_release_rsp(rsp); 742 } 743 } 744 745 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) 746 { 747 struct nvmet_rdma_rsp *rsp = 748 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe); 749 struct nvmet_rdma_queue *queue = wc->qp->qp_context; 750 u16 status = 0; 751 752 WARN_ON(rsp->n_rdma <= 0); 753 atomic_add(rsp->n_rdma, &queue->sq_wr_avail); 754 rsp->n_rdma = 0; 755 756 if (unlikely(wc->status != IB_WC_SUCCESS)) { 757 nvmet_rdma_rw_ctx_destroy(rsp); 758 nvmet_req_uninit(&rsp->req); 759 nvmet_rdma_release_rsp(rsp); 760 if (wc->status != IB_WC_WR_FLUSH_ERR) { 761 pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n", 762 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); 763 nvmet_rdma_error_comp(queue); 764 } 765 return; 766 } 767 768 if (rsp->req.metadata_len) 769 status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr); 770 nvmet_rdma_rw_ctx_destroy(rsp); 771 772 if (unlikely(status)) 773 nvmet_req_complete(&rsp->req, status); 774 else 775 rsp->req.execute(&rsp->req); 776 } 777 778 static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc) 779 { 780 struct nvmet_rdma_rsp *rsp = 781 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe); 782 struct nvmet_rdma_queue *queue = wc->qp->qp_context; 783 struct rdma_cm_id *cm_id = rsp->queue->cm_id; 784 u16 status; 785 786 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) 787 return; 788 789 WARN_ON(rsp->n_rdma <= 0); 790 atomic_add(rsp->n_rdma, &queue->sq_wr_avail); 791 rsp->n_rdma = 0; 792 793 if (unlikely(wc->status != IB_WC_SUCCESS)) { 794 nvmet_rdma_rw_ctx_destroy(rsp); 795 nvmet_req_uninit(&rsp->req); 796 nvmet_rdma_release_rsp(rsp); 797 if (wc->status != IB_WC_WR_FLUSH_ERR) { 798 pr_info("RDMA WRITE for CQE failed with status %s (%d).\n", 799 ib_wc_status_msg(wc->status), wc->status); 800 nvmet_rdma_error_comp(queue); 801 } 802 return; 803 } 804 805 /* 806 * Upon RDMA completion check the signature status 807 * - if succeeded send good NVMe response 808 * - if failed send bad NVMe response with appropriate error 809 */ 810 status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr); 811 if (unlikely(status)) 812 rsp->req.cqe->status = cpu_to_le16(status << 1); 813 nvmet_rdma_rw_ctx_destroy(rsp); 814 815 if (unlikely(ib_post_send(cm_id->qp, &rsp->send_wr, NULL))) { 816 pr_err("sending cmd response failed\n"); 817 nvmet_rdma_release_rsp(rsp); 818 } 819 } 820 821 static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len, 822 u64 off) 823 { 824 int sg_count = num_pages(len); 825 struct scatterlist *sg; 826 int i; 827 828 sg = rsp->cmd->inline_sg; 829 for (i = 0; i < sg_count; i++, sg++) { 830 if (i < sg_count - 1) 831 sg_unmark_end(sg); 832 else 833 sg_mark_end(sg); 834 sg->offset = off; 835 sg->length = min_t(int, len, PAGE_SIZE - off); 836 len -= sg->length; 837 if (!i) 838 off = 0; 839 } 840 841 rsp->req.sg = rsp->cmd->inline_sg; 842 rsp->req.sg_cnt = sg_count; 843 } 844 845 static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) 846 { 847 struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl; 848 u64 off = le64_to_cpu(sgl->addr); 849 u32 len = le32_to_cpu(sgl->length); 850 851 if (!nvme_is_write(rsp->req.cmd)) { 852 rsp->req.error_loc = 853 offsetof(struct nvme_common_command, opcode); 854 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 855 } 856 857 if (off + len > rsp->queue->dev->inline_data_size) { 858 pr_err("invalid inline data offset!\n"); 859 return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR; 860 } 861 862 /* no data command? */ 863 if (!len) 864 return 0; 865 866 nvmet_rdma_use_inline_sg(rsp, len, off); 867 rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA; 868 rsp->req.transfer_len += len; 869 return 0; 870 } 871 872 static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, 873 struct nvme_keyed_sgl_desc *sgl, bool invalidate) 874 { 875 u64 addr = le64_to_cpu(sgl->addr); 876 u32 key = get_unaligned_le32(sgl->key); 877 struct ib_sig_attrs sig_attrs; 878 int ret; 879 880 rsp->req.transfer_len = get_unaligned_le24(sgl->length); 881 882 /* no data command? */ 883 if (!rsp->req.transfer_len) 884 return 0; 885 886 if (rsp->req.metadata_len) 887 nvmet_rdma_set_sig_attrs(&rsp->req, &sig_attrs); 888 889 ret = nvmet_req_alloc_sgls(&rsp->req); 890 if (unlikely(ret < 0)) 891 goto error_out; 892 893 ret = nvmet_rdma_rw_ctx_init(rsp, addr, key, &sig_attrs); 894 if (unlikely(ret < 0)) 895 goto error_out; 896 rsp->n_rdma += ret; 897 898 if (invalidate) 899 rsp->invalidate_rkey = key; 900 901 return 0; 902 903 error_out: 904 rsp->req.transfer_len = 0; 905 return NVME_SC_INTERNAL; 906 } 907 908 static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) 909 { 910 struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl; 911 912 switch (sgl->type >> 4) { 913 case NVME_SGL_FMT_DATA_DESC: 914 switch (sgl->type & 0xf) { 915 case NVME_SGL_FMT_OFFSET: 916 return nvmet_rdma_map_sgl_inline(rsp); 917 default: 918 pr_err("invalid SGL subtype: %#x\n", sgl->type); 919 rsp->req.error_loc = 920 offsetof(struct nvme_common_command, dptr); 921 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 922 } 923 case NVME_KEY_SGL_FMT_DATA_DESC: 924 switch (sgl->type & 0xf) { 925 case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE: 926 return nvmet_rdma_map_sgl_keyed(rsp, sgl, true); 927 case NVME_SGL_FMT_ADDRESS: 928 return nvmet_rdma_map_sgl_keyed(rsp, sgl, false); 929 default: 930 pr_err("invalid SGL subtype: %#x\n", sgl->type); 931 rsp->req.error_loc = 932 offsetof(struct nvme_common_command, dptr); 933 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 934 } 935 default: 936 pr_err("invalid SGL type: %#x\n", sgl->type); 937 rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); 938 return NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR; 939 } 940 } 941 942 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) 943 { 944 struct nvmet_rdma_queue *queue = rsp->queue; 945 946 if (unlikely(atomic_sub_return(1 + rsp->n_rdma, 947 &queue->sq_wr_avail) < 0)) { 948 pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n", 949 1 + rsp->n_rdma, queue->idx, 950 queue->nvme_sq.ctrl->cntlid); 951 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); 952 return false; 953 } 954 955 if (nvmet_rdma_need_data_in(rsp)) { 956 if (rdma_rw_ctx_post(&rsp->rw, queue->qp, 957 queue->cm_id->port_num, &rsp->read_cqe, NULL)) 958 nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); 959 } else { 960 rsp->req.execute(&rsp->req); 961 } 962 963 return true; 964 } 965 966 static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, 967 struct nvmet_rdma_rsp *cmd) 968 { 969 u16 status; 970 971 ib_dma_sync_single_for_cpu(queue->dev->device, 972 cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, 973 DMA_FROM_DEVICE); 974 ib_dma_sync_single_for_cpu(queue->dev->device, 975 cmd->send_sge.addr, cmd->send_sge.length, 976 DMA_TO_DEVICE); 977 978 if (!nvmet_req_init(&cmd->req, &queue->nvme_sq, &nvmet_rdma_ops)) 979 return; 980 981 status = nvmet_rdma_map_sgl(cmd); 982 if (status) 983 goto out_err; 984 985 if (unlikely(!nvmet_rdma_execute_command(cmd))) { 986 spin_lock(&queue->rsp_wr_wait_lock); 987 list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list); 988 spin_unlock(&queue->rsp_wr_wait_lock); 989 } 990 991 return; 992 993 out_err: 994 nvmet_req_complete(&cmd->req, status); 995 } 996 997 static bool nvmet_rdma_recv_not_live(struct nvmet_rdma_queue *queue, 998 struct nvmet_rdma_rsp *rsp) 999 { 1000 unsigned long flags; 1001 bool ret = true; 1002 1003 spin_lock_irqsave(&queue->state_lock, flags); 1004 /* 1005 * recheck queue state is not live to prevent a race condition 1006 * with RDMA_CM_EVENT_ESTABLISHED handler. 1007 */ 1008 if (queue->state == NVMET_RDMA_Q_LIVE) 1009 ret = false; 1010 else if (queue->state == NVMET_RDMA_Q_CONNECTING) 1011 list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); 1012 else 1013 nvmet_rdma_put_rsp(rsp); 1014 spin_unlock_irqrestore(&queue->state_lock, flags); 1015 return ret; 1016 } 1017 1018 static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1019 { 1020 struct nvmet_rdma_cmd *cmd = 1021 container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe); 1022 struct nvmet_rdma_queue *queue = wc->qp->qp_context; 1023 struct nvmet_rdma_rsp *rsp; 1024 1025 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1026 if (wc->status != IB_WC_WR_FLUSH_ERR) { 1027 pr_err("RECV for CQE 0x%p failed with status %s (%d)\n", 1028 wc->wr_cqe, ib_wc_status_msg(wc->status), 1029 wc->status); 1030 nvmet_rdma_error_comp(queue); 1031 } 1032 return; 1033 } 1034 1035 if (unlikely(wc->byte_len < sizeof(struct nvme_command))) { 1036 pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n"); 1037 nvmet_rdma_error_comp(queue); 1038 return; 1039 } 1040 1041 cmd->queue = queue; 1042 rsp = nvmet_rdma_get_rsp(queue); 1043 if (unlikely(!rsp)) { 1044 /* 1045 * we get here only under memory pressure, 1046 * silently drop and have the host retry 1047 * as we can't even fail it. 1048 */ 1049 nvmet_rdma_post_recv(queue->dev, cmd); 1050 return; 1051 } 1052 rsp->queue = queue; 1053 rsp->cmd = cmd; 1054 rsp->flags = 0; 1055 rsp->req.cmd = cmd->nvme_cmd; 1056 rsp->req.port = queue->port; 1057 rsp->n_rdma = 0; 1058 rsp->invalidate_rkey = 0; 1059 1060 if (unlikely(queue->state != NVMET_RDMA_Q_LIVE) && 1061 nvmet_rdma_recv_not_live(queue, rsp)) 1062 return; 1063 1064 nvmet_rdma_handle_command(queue, rsp); 1065 } 1066 1067 static void nvmet_rdma_destroy_srq(struct nvmet_rdma_srq *nsrq) 1068 { 1069 nvmet_rdma_free_cmds(nsrq->ndev, nsrq->cmds, nsrq->ndev->srq_size, 1070 false); 1071 ib_destroy_srq(nsrq->srq); 1072 1073 kfree(nsrq); 1074 } 1075 1076 static void nvmet_rdma_destroy_srqs(struct nvmet_rdma_device *ndev) 1077 { 1078 int i; 1079 1080 if (!ndev->srqs) 1081 return; 1082 1083 for (i = 0; i < ndev->srq_count; i++) 1084 nvmet_rdma_destroy_srq(ndev->srqs[i]); 1085 1086 kfree(ndev->srqs); 1087 } 1088 1089 static struct nvmet_rdma_srq * 1090 nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev) 1091 { 1092 struct ib_srq_init_attr srq_attr = { NULL, }; 1093 size_t srq_size = ndev->srq_size; 1094 struct nvmet_rdma_srq *nsrq; 1095 struct ib_srq *srq; 1096 int ret, i; 1097 1098 nsrq = kzalloc_obj(*nsrq); 1099 if (!nsrq) 1100 return ERR_PTR(-ENOMEM); 1101 1102 srq_attr.attr.max_wr = srq_size; 1103 srq_attr.attr.max_sge = 1 + ndev->inline_page_count; 1104 srq_attr.attr.srq_limit = 0; 1105 srq_attr.srq_type = IB_SRQT_BASIC; 1106 srq = ib_create_srq(ndev->pd, &srq_attr); 1107 if (IS_ERR(srq)) { 1108 ret = PTR_ERR(srq); 1109 goto out_free; 1110 } 1111 1112 nsrq->cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false); 1113 if (IS_ERR(nsrq->cmds)) { 1114 ret = PTR_ERR(nsrq->cmds); 1115 goto out_destroy_srq; 1116 } 1117 1118 nsrq->srq = srq; 1119 nsrq->ndev = ndev; 1120 1121 for (i = 0; i < srq_size; i++) { 1122 nsrq->cmds[i].nsrq = nsrq; 1123 ret = nvmet_rdma_post_recv(ndev, &nsrq->cmds[i]); 1124 if (ret) 1125 goto out_free_cmds; 1126 } 1127 1128 return nsrq; 1129 1130 out_free_cmds: 1131 nvmet_rdma_free_cmds(ndev, nsrq->cmds, srq_size, false); 1132 out_destroy_srq: 1133 ib_destroy_srq(srq); 1134 out_free: 1135 kfree(nsrq); 1136 return ERR_PTR(ret); 1137 } 1138 1139 static int nvmet_rdma_init_srqs(struct nvmet_rdma_device *ndev) 1140 { 1141 int i, ret; 1142 1143 if (!ndev->device->attrs.max_srq_wr || !ndev->device->attrs.max_srq) { 1144 /* 1145 * If SRQs aren't supported we just go ahead and use normal 1146 * non-shared receive queues. 1147 */ 1148 pr_info("SRQ requested but not supported.\n"); 1149 return 0; 1150 } 1151 1152 ndev->srq_size = min(ndev->device->attrs.max_srq_wr, 1153 nvmet_rdma_srq_size); 1154 ndev->srq_count = min(ndev->device->num_comp_vectors, 1155 ndev->device->attrs.max_srq); 1156 1157 ndev->srqs = kzalloc_objs(*ndev->srqs, ndev->srq_count); 1158 if (!ndev->srqs) 1159 return -ENOMEM; 1160 1161 for (i = 0; i < ndev->srq_count; i++) { 1162 ndev->srqs[i] = nvmet_rdma_init_srq(ndev); 1163 if (IS_ERR(ndev->srqs[i])) { 1164 ret = PTR_ERR(ndev->srqs[i]); 1165 goto err_srq; 1166 } 1167 } 1168 1169 return 0; 1170 1171 err_srq: 1172 while (--i >= 0) 1173 nvmet_rdma_destroy_srq(ndev->srqs[i]); 1174 kfree(ndev->srqs); 1175 return ret; 1176 } 1177 1178 static void nvmet_rdma_free_dev(struct kref *ref) 1179 { 1180 struct nvmet_rdma_device *ndev = 1181 container_of(ref, struct nvmet_rdma_device, ref); 1182 1183 mutex_lock(&device_list_mutex); 1184 list_del(&ndev->entry); 1185 mutex_unlock(&device_list_mutex); 1186 1187 nvmet_rdma_destroy_srqs(ndev); 1188 ib_dealloc_pd(ndev->pd); 1189 1190 kfree(ndev); 1191 } 1192 1193 static struct nvmet_rdma_device * 1194 nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id) 1195 { 1196 struct nvmet_rdma_port *port = cm_id->context; 1197 struct nvmet_port *nport = port->nport; 1198 struct nvmet_rdma_device *ndev; 1199 int inline_page_count; 1200 int inline_sge_count; 1201 int ret; 1202 1203 mutex_lock(&device_list_mutex); 1204 list_for_each_entry(ndev, &device_list, entry) { 1205 if (ndev->device->node_guid == cm_id->device->node_guid && 1206 kref_get_unless_zero(&ndev->ref)) 1207 goto out_unlock; 1208 } 1209 1210 ndev = kzalloc_obj(*ndev); 1211 if (!ndev) 1212 goto out_err; 1213 1214 inline_page_count = num_pages(nport->inline_data_size); 1215 inline_sge_count = max(cm_id->device->attrs.max_sge_rd, 1216 cm_id->device->attrs.max_recv_sge) - 1; 1217 if (inline_page_count > inline_sge_count) { 1218 pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n", 1219 nport->inline_data_size, cm_id->device->name, 1220 inline_sge_count * PAGE_SIZE); 1221 nport->inline_data_size = inline_sge_count * PAGE_SIZE; 1222 inline_page_count = inline_sge_count; 1223 } 1224 ndev->inline_data_size = nport->inline_data_size; 1225 ndev->inline_page_count = inline_page_count; 1226 1227 if (nport->pi_enable && !(cm_id->device->attrs.kernel_cap_flags & 1228 IBK_INTEGRITY_HANDOVER)) { 1229 pr_warn("T10-PI is not supported by device %s. Disabling it\n", 1230 cm_id->device->name); 1231 nport->pi_enable = false; 1232 } 1233 1234 ndev->device = cm_id->device; 1235 kref_init(&ndev->ref); 1236 1237 ndev->pd = ib_alloc_pd(ndev->device, 0); 1238 if (IS_ERR(ndev->pd)) 1239 goto out_free_dev; 1240 1241 if (nvmet_rdma_use_srq) { 1242 ret = nvmet_rdma_init_srqs(ndev); 1243 if (ret) 1244 goto out_free_pd; 1245 } 1246 1247 list_add(&ndev->entry, &device_list); 1248 out_unlock: 1249 mutex_unlock(&device_list_mutex); 1250 pr_debug("added %s.\n", ndev->device->name); 1251 return ndev; 1252 1253 out_free_pd: 1254 ib_dealloc_pd(ndev->pd); 1255 out_free_dev: 1256 kfree(ndev); 1257 out_err: 1258 mutex_unlock(&device_list_mutex); 1259 return NULL; 1260 } 1261 1262 static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) 1263 { 1264 struct ib_qp_init_attr qp_attr = { }; 1265 struct nvmet_rdma_device *ndev = queue->dev; 1266 int nr_cqe, ret, i, factor; 1267 1268 /* 1269 * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND. 1270 */ 1271 nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size; 1272 1273 queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1, 1274 queue->comp_vector, IB_POLL_WORKQUEUE); 1275 if (IS_ERR(queue->cq)) { 1276 ret = PTR_ERR(queue->cq); 1277 pr_err("failed to create CQ cqe= %d ret= %d\n", 1278 nr_cqe + 1, ret); 1279 goto out; 1280 } 1281 1282 qp_attr.qp_context = queue; 1283 qp_attr.event_handler = nvmet_rdma_qp_event; 1284 qp_attr.send_cq = queue->cq; 1285 qp_attr.recv_cq = queue->cq; 1286 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 1287 qp_attr.qp_type = IB_QPT_RC; 1288 /* +1 for drain */ 1289 qp_attr.cap.max_send_wr = queue->send_queue_size + 1; 1290 factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num, 1291 1 << NVMET_RDMA_MAX_MDTS); 1292 qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor; 1293 qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd, 1294 ndev->device->attrs.max_send_sge); 1295 1296 if (queue->nsrq) { 1297 qp_attr.srq = queue->nsrq->srq; 1298 } else { 1299 /* +1 for drain */ 1300 qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size; 1301 qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count; 1302 } 1303 1304 if (queue->port->pi_enable && queue->host_qid) 1305 qp_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN; 1306 1307 ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr); 1308 if (ret) { 1309 pr_err("failed to create_qp ret= %d\n", ret); 1310 goto err_destroy_cq; 1311 } 1312 queue->qp = queue->cm_id->qp; 1313 1314 atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); 1315 1316 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n", 1317 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge, 1318 qp_attr.cap.max_send_wr, queue->cm_id); 1319 1320 if (!queue->nsrq) { 1321 for (i = 0; i < queue->recv_queue_size; i++) { 1322 queue->cmds[i].queue = queue; 1323 ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]); 1324 if (ret) 1325 goto err_destroy_qp; 1326 } 1327 } 1328 1329 out: 1330 return ret; 1331 1332 err_destroy_qp: 1333 rdma_destroy_qp(queue->cm_id); 1334 err_destroy_cq: 1335 ib_cq_pool_put(queue->cq, nr_cqe + 1); 1336 goto out; 1337 } 1338 1339 static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) 1340 { 1341 ib_drain_qp(queue->qp); 1342 if (queue->cm_id) 1343 rdma_destroy_id(queue->cm_id); 1344 ib_destroy_qp(queue->qp); 1345 ib_cq_pool_put(queue->cq, queue->recv_queue_size + 2 * 1346 queue->send_queue_size + 1); 1347 } 1348 1349 static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) 1350 { 1351 pr_debug("freeing queue %d\n", queue->idx); 1352 1353 nvmet_sq_destroy(&queue->nvme_sq); 1354 nvmet_cq_put(&queue->nvme_cq); 1355 1356 nvmet_rdma_destroy_queue_ib(queue); 1357 if (!queue->nsrq) { 1358 nvmet_rdma_free_cmds(queue->dev, queue->cmds, 1359 queue->recv_queue_size, 1360 !queue->host_qid); 1361 } 1362 nvmet_rdma_free_rsps(queue); 1363 ida_free(&nvmet_rdma_queue_ida, queue->idx); 1364 kfree(queue); 1365 } 1366 1367 static void nvmet_rdma_release_queue_work(struct work_struct *w) 1368 { 1369 struct nvmet_rdma_queue *queue = 1370 container_of(w, struct nvmet_rdma_queue, release_work); 1371 struct nvmet_rdma_device *dev = queue->dev; 1372 1373 nvmet_rdma_free_queue(queue); 1374 1375 kref_put(&dev->ref, nvmet_rdma_free_dev); 1376 } 1377 1378 static int 1379 nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn, 1380 struct nvmet_rdma_queue *queue) 1381 { 1382 struct nvme_rdma_cm_req *req; 1383 1384 req = (struct nvme_rdma_cm_req *)conn->private_data; 1385 if (!req || conn->private_data_len == 0) 1386 return NVME_RDMA_CM_INVALID_LEN; 1387 1388 if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0) 1389 return NVME_RDMA_CM_INVALID_RECFMT; 1390 1391 queue->host_qid = le16_to_cpu(req->qid); 1392 1393 /* 1394 * req->hsqsize corresponds to our recv queue size plus 1 1395 * req->hrqsize corresponds to our send queue size 1396 */ 1397 queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1; 1398 queue->send_queue_size = le16_to_cpu(req->hrqsize); 1399 1400 if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH) 1401 return NVME_RDMA_CM_INVALID_HSQSIZE; 1402 1403 /* XXX: Should we enforce some kind of max for IO queues? */ 1404 1405 return 0; 1406 } 1407 1408 static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id, 1409 enum nvme_rdma_cm_status status) 1410 { 1411 struct nvme_rdma_cm_rej rej; 1412 1413 pr_debug("rejecting connect request: status %d (%s)\n", 1414 status, nvme_rdma_cm_msg(status)); 1415 1416 rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); 1417 rej.sts = cpu_to_le16(status); 1418 1419 return rdma_reject(cm_id, (void *)&rej, sizeof(rej), 1420 IB_CM_REJ_CONSUMER_DEFINED); 1421 } 1422 1423 static struct nvmet_rdma_queue * 1424 nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, 1425 struct rdma_cm_id *cm_id, 1426 struct rdma_cm_event *event) 1427 { 1428 struct nvmet_rdma_port *port = cm_id->context; 1429 struct nvmet_rdma_queue *queue; 1430 int ret; 1431 1432 queue = kzalloc_obj(*queue); 1433 if (!queue) { 1434 ret = NVME_RDMA_CM_NO_RSC; 1435 goto out_reject; 1436 } 1437 1438 nvmet_cq_init(&queue->nvme_cq); 1439 ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq); 1440 if (ret) { 1441 ret = NVME_RDMA_CM_NO_RSC; 1442 goto out_free_queue; 1443 } 1444 1445 ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue); 1446 if (ret) 1447 goto out_destroy_sq; 1448 1449 /* 1450 * Schedules the actual release because calling rdma_destroy_id from 1451 * inside a CM callback would trigger a deadlock. (great API design..) 1452 */ 1453 INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work); 1454 queue->dev = ndev; 1455 queue->cm_id = cm_id; 1456 queue->port = port->nport; 1457 1458 spin_lock_init(&queue->state_lock); 1459 queue->state = NVMET_RDMA_Q_CONNECTING; 1460 INIT_LIST_HEAD(&queue->rsp_wait_list); 1461 INIT_LIST_HEAD(&queue->rsp_wr_wait_list); 1462 spin_lock_init(&queue->rsp_wr_wait_lock); 1463 INIT_LIST_HEAD(&queue->queue_list); 1464 1465 queue->idx = ida_alloc(&nvmet_rdma_queue_ida, GFP_KERNEL); 1466 if (queue->idx < 0) { 1467 ret = NVME_RDMA_CM_NO_RSC; 1468 goto out_destroy_sq; 1469 } 1470 1471 /* 1472 * Spread the io queues across completion vectors, 1473 * but still keep all admin queues on vector 0. 1474 */ 1475 queue->comp_vector = !queue->host_qid ? 0 : 1476 queue->idx % ndev->device->num_comp_vectors; 1477 1478 1479 ret = nvmet_rdma_alloc_rsps(queue); 1480 if (ret) { 1481 ret = NVME_RDMA_CM_NO_RSC; 1482 goto out_ida_remove; 1483 } 1484 1485 if (ndev->srqs) { 1486 queue->nsrq = ndev->srqs[queue->comp_vector % ndev->srq_count]; 1487 } else { 1488 queue->cmds = nvmet_rdma_alloc_cmds(ndev, 1489 queue->recv_queue_size, 1490 !queue->host_qid); 1491 if (IS_ERR(queue->cmds)) { 1492 ret = NVME_RDMA_CM_NO_RSC; 1493 goto out_free_responses; 1494 } 1495 } 1496 1497 ret = nvmet_rdma_create_queue_ib(queue); 1498 if (ret) { 1499 pr_err("%s: creating RDMA queue failed (%d).\n", 1500 __func__, ret); 1501 ret = NVME_RDMA_CM_NO_RSC; 1502 goto out_free_cmds; 1503 } 1504 1505 return queue; 1506 1507 out_free_cmds: 1508 if (!queue->nsrq) { 1509 nvmet_rdma_free_cmds(queue->dev, queue->cmds, 1510 queue->recv_queue_size, 1511 !queue->host_qid); 1512 } 1513 out_free_responses: 1514 nvmet_rdma_free_rsps(queue); 1515 out_ida_remove: 1516 ida_free(&nvmet_rdma_queue_ida, queue->idx); 1517 out_destroy_sq: 1518 nvmet_sq_destroy(&queue->nvme_sq); 1519 out_free_queue: 1520 nvmet_cq_put(&queue->nvme_cq); 1521 kfree(queue); 1522 out_reject: 1523 nvmet_rdma_cm_reject(cm_id, ret); 1524 return NULL; 1525 } 1526 1527 static void nvmet_rdma_qp_event(struct ib_event *event, void *priv) 1528 { 1529 struct nvmet_rdma_queue *queue = priv; 1530 1531 switch (event->event) { 1532 case IB_EVENT_COMM_EST: 1533 rdma_notify(queue->cm_id, event->event); 1534 break; 1535 case IB_EVENT_QP_LAST_WQE_REACHED: 1536 pr_debug("received last WQE reached event for queue=0x%p\n", 1537 queue); 1538 break; 1539 default: 1540 pr_err("received IB QP event: %s (%d)\n", 1541 ib_event_msg(event->event), event->event); 1542 break; 1543 } 1544 } 1545 1546 static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id, 1547 struct nvmet_rdma_queue *queue, 1548 struct rdma_conn_param *p) 1549 { 1550 struct rdma_conn_param param = { }; 1551 struct nvme_rdma_cm_rep priv = { }; 1552 int ret = -ENOMEM; 1553 1554 param.rnr_retry_count = 7; 1555 param.flow_control = 1; 1556 param.initiator_depth = min_t(u8, p->initiator_depth, 1557 queue->dev->device->attrs.max_qp_init_rd_atom); 1558 param.private_data = &priv; 1559 param.private_data_len = sizeof(priv); 1560 priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); 1561 priv.crqsize = cpu_to_le16(queue->recv_queue_size); 1562 1563 ret = rdma_accept(cm_id, ¶m); 1564 if (ret) 1565 pr_err("rdma_accept failed (error code = %d)\n", ret); 1566 1567 return ret; 1568 } 1569 1570 static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, 1571 struct rdma_cm_event *event) 1572 { 1573 struct nvmet_rdma_device *ndev; 1574 struct nvmet_rdma_queue *queue; 1575 int ret = -EINVAL; 1576 1577 ndev = nvmet_rdma_find_get_device(cm_id); 1578 if (!ndev) { 1579 nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC); 1580 return -ECONNREFUSED; 1581 } 1582 1583 queue = nvmet_rdma_alloc_queue(ndev, cm_id, event); 1584 if (!queue) { 1585 ret = -ENOMEM; 1586 goto put_device; 1587 } 1588 1589 if (queue->host_qid == 0) { 1590 struct nvmet_rdma_queue *q; 1591 int pending = 0; 1592 1593 /* Check for pending controller teardown */ 1594 mutex_lock(&nvmet_rdma_queue_mutex); 1595 list_for_each_entry(q, &nvmet_rdma_queue_list, queue_list) { 1596 if (q->nvme_sq.ctrl == queue->nvme_sq.ctrl && 1597 q->state == NVMET_RDMA_Q_DISCONNECTING) 1598 pending++; 1599 } 1600 mutex_unlock(&nvmet_rdma_queue_mutex); 1601 if (pending > NVMET_RDMA_BACKLOG) 1602 return NVME_SC_CONNECT_CTRL_BUSY; 1603 } 1604 1605 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); 1606 if (ret) { 1607 /* 1608 * Don't destroy the cm_id in free path, as we implicitly 1609 * destroy the cm_id here with non-zero ret code. 1610 */ 1611 queue->cm_id = NULL; 1612 goto free_queue; 1613 } 1614 1615 mutex_lock(&nvmet_rdma_queue_mutex); 1616 list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list); 1617 mutex_unlock(&nvmet_rdma_queue_mutex); 1618 1619 return 0; 1620 1621 free_queue: 1622 nvmet_rdma_free_queue(queue); 1623 put_device: 1624 kref_put(&ndev->ref, nvmet_rdma_free_dev); 1625 1626 return ret; 1627 } 1628 1629 static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue) 1630 { 1631 unsigned long flags; 1632 1633 spin_lock_irqsave(&queue->state_lock, flags); 1634 if (queue->state != NVMET_RDMA_Q_CONNECTING) { 1635 pr_warn("trying to establish a connected queue\n"); 1636 goto out_unlock; 1637 } 1638 queue->state = NVMET_RDMA_Q_LIVE; 1639 1640 while (!list_empty(&queue->rsp_wait_list)) { 1641 struct nvmet_rdma_rsp *cmd; 1642 1643 cmd = list_first_entry(&queue->rsp_wait_list, 1644 struct nvmet_rdma_rsp, wait_list); 1645 list_del(&cmd->wait_list); 1646 1647 spin_unlock_irqrestore(&queue->state_lock, flags); 1648 nvmet_rdma_handle_command(queue, cmd); 1649 spin_lock_irqsave(&queue->state_lock, flags); 1650 } 1651 1652 out_unlock: 1653 spin_unlock_irqrestore(&queue->state_lock, flags); 1654 } 1655 1656 static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) 1657 { 1658 bool disconnect = false; 1659 unsigned long flags; 1660 1661 pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state); 1662 1663 spin_lock_irqsave(&queue->state_lock, flags); 1664 switch (queue->state) { 1665 case NVMET_RDMA_Q_CONNECTING: 1666 while (!list_empty(&queue->rsp_wait_list)) { 1667 struct nvmet_rdma_rsp *rsp; 1668 1669 rsp = list_first_entry(&queue->rsp_wait_list, 1670 struct nvmet_rdma_rsp, 1671 wait_list); 1672 list_del(&rsp->wait_list); 1673 nvmet_rdma_put_rsp(rsp); 1674 } 1675 fallthrough; 1676 case NVMET_RDMA_Q_LIVE: 1677 queue->state = NVMET_RDMA_Q_DISCONNECTING; 1678 disconnect = true; 1679 break; 1680 case NVMET_RDMA_Q_DISCONNECTING: 1681 break; 1682 } 1683 spin_unlock_irqrestore(&queue->state_lock, flags); 1684 1685 if (disconnect) { 1686 rdma_disconnect(queue->cm_id); 1687 queue_work(nvmet_wq, &queue->release_work); 1688 } 1689 } 1690 1691 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) 1692 { 1693 bool disconnect = false; 1694 1695 mutex_lock(&nvmet_rdma_queue_mutex); 1696 if (!list_empty(&queue->queue_list)) { 1697 list_del_init(&queue->queue_list); 1698 disconnect = true; 1699 } 1700 mutex_unlock(&nvmet_rdma_queue_mutex); 1701 1702 if (disconnect) 1703 __nvmet_rdma_queue_disconnect(queue); 1704 } 1705 1706 static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, 1707 struct nvmet_rdma_queue *queue) 1708 { 1709 WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); 1710 1711 mutex_lock(&nvmet_rdma_queue_mutex); 1712 if (!list_empty(&queue->queue_list)) 1713 list_del_init(&queue->queue_list); 1714 mutex_unlock(&nvmet_rdma_queue_mutex); 1715 1716 pr_err("failed to connect queue %d\n", queue->idx); 1717 queue_work(nvmet_wq, &queue->release_work); 1718 } 1719 1720 /** 1721 * nvmet_rdma_device_removal() - Handle RDMA device removal 1722 * @cm_id: rdma_cm id, used for nvmet port 1723 * @queue: nvmet rdma queue (cm id qp_context) 1724 * 1725 * DEVICE_REMOVAL event notifies us that the RDMA device is about 1726 * to unplug. Note that this event can be generated on a normal 1727 * queue cm_id and/or a device bound listener cm_id (where in this 1728 * case queue will be null). 1729 * 1730 * We registered an ib_client to handle device removal for queues, 1731 * so we only need to handle the listening port cm_ids. In this case 1732 * we nullify the priv to prevent double cm_id destruction and destroying 1733 * the cm_id implicitly by returning a non-zero rc to the callout. 1734 */ 1735 static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, 1736 struct nvmet_rdma_queue *queue) 1737 { 1738 struct nvmet_rdma_port *port; 1739 1740 if (queue) { 1741 /* 1742 * This is a queue cm_id. we have registered 1743 * an ib_client to handle queues removal 1744 * so don't interfere and just return. 1745 */ 1746 return 0; 1747 } 1748 1749 port = cm_id->context; 1750 1751 /* 1752 * This is a listener cm_id. Make sure that 1753 * future remove_port won't invoke a double 1754 * cm_id destroy. use atomic xchg to make sure 1755 * we don't compete with remove_port. 1756 */ 1757 if (xchg(&port->cm_id, NULL) != cm_id) 1758 return 0; 1759 1760 /* 1761 * We need to return 1 so that the core will destroy 1762 * its own ID. What a great API design.. 1763 */ 1764 return 1; 1765 } 1766 1767 static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, 1768 struct rdma_cm_event *event) 1769 { 1770 struct nvmet_rdma_queue *queue = NULL; 1771 int ret = 0; 1772 1773 if (cm_id->qp) 1774 queue = cm_id->qp->qp_context; 1775 1776 pr_debug("%s (%d): status %d id %p\n", 1777 rdma_event_msg(event->event), event->event, 1778 event->status, cm_id); 1779 1780 switch (event->event) { 1781 case RDMA_CM_EVENT_CONNECT_REQUEST: 1782 ret = nvmet_rdma_queue_connect(cm_id, event); 1783 break; 1784 case RDMA_CM_EVENT_ESTABLISHED: 1785 nvmet_rdma_queue_established(queue); 1786 break; 1787 case RDMA_CM_EVENT_ADDR_CHANGE: 1788 if (!queue) { 1789 struct nvmet_rdma_port *port = cm_id->context; 1790 1791 queue_delayed_work(nvmet_wq, &port->repair_work, 0); 1792 break; 1793 } 1794 fallthrough; 1795 case RDMA_CM_EVENT_DISCONNECTED: 1796 case RDMA_CM_EVENT_TIMEWAIT_EXIT: 1797 nvmet_rdma_queue_disconnect(queue); 1798 break; 1799 case RDMA_CM_EVENT_DEVICE_REMOVAL: 1800 ret = nvmet_rdma_device_removal(cm_id, queue); 1801 break; 1802 case RDMA_CM_EVENT_REJECTED: 1803 pr_debug("Connection rejected: %s\n", 1804 rdma_reject_msg(cm_id, event->status)); 1805 fallthrough; 1806 case RDMA_CM_EVENT_UNREACHABLE: 1807 case RDMA_CM_EVENT_CONNECT_ERROR: 1808 nvmet_rdma_queue_connect_fail(cm_id, queue); 1809 break; 1810 default: 1811 pr_err("received unrecognized RDMA CM event %d\n", 1812 event->event); 1813 break; 1814 } 1815 1816 return ret; 1817 } 1818 1819 static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl) 1820 { 1821 struct nvmet_rdma_queue *queue, *n; 1822 1823 mutex_lock(&nvmet_rdma_queue_mutex); 1824 list_for_each_entry_safe(queue, n, &nvmet_rdma_queue_list, queue_list) { 1825 if (queue->nvme_sq.ctrl != ctrl) 1826 continue; 1827 list_del_init(&queue->queue_list); 1828 __nvmet_rdma_queue_disconnect(queue); 1829 } 1830 mutex_unlock(&nvmet_rdma_queue_mutex); 1831 } 1832 1833 static void nvmet_rdma_destroy_port_queues(struct nvmet_rdma_port *port) 1834 { 1835 struct nvmet_rdma_queue *queue, *tmp; 1836 struct nvmet_port *nport = port->nport; 1837 1838 mutex_lock(&nvmet_rdma_queue_mutex); 1839 list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, 1840 queue_list) { 1841 if (queue->port != nport) 1842 continue; 1843 1844 list_del_init(&queue->queue_list); 1845 __nvmet_rdma_queue_disconnect(queue); 1846 } 1847 mutex_unlock(&nvmet_rdma_queue_mutex); 1848 } 1849 1850 static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port) 1851 { 1852 struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL); 1853 1854 if (cm_id) 1855 rdma_destroy_id(cm_id); 1856 1857 /* 1858 * Destroy the remaining queues, which are not belong to any 1859 * controller yet. Do it here after the RDMA-CM was destroyed 1860 * guarantees that no new queue will be created. 1861 */ 1862 nvmet_rdma_destroy_port_queues(port); 1863 } 1864 1865 static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port) 1866 { 1867 struct sockaddr *addr = (struct sockaddr *)&port->addr; 1868 struct rdma_cm_id *cm_id; 1869 int ret; 1870 1871 cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port, 1872 RDMA_PS_TCP, IB_QPT_RC); 1873 if (IS_ERR(cm_id)) { 1874 pr_err("CM ID creation failed\n"); 1875 return PTR_ERR(cm_id); 1876 } 1877 1878 /* 1879 * Allow both IPv4 and IPv6 sockets to bind a single port 1880 * at the same time. 1881 */ 1882 ret = rdma_set_afonly(cm_id, 1); 1883 if (ret) { 1884 pr_err("rdma_set_afonly failed (%d)\n", ret); 1885 goto out_destroy_id; 1886 } 1887 1888 ret = rdma_bind_addr(cm_id, addr); 1889 if (ret) { 1890 pr_err("binding CM ID to %pISpcs failed (%d)\n", addr, ret); 1891 goto out_destroy_id; 1892 } 1893 1894 ret = rdma_listen(cm_id, NVMET_RDMA_BACKLOG); 1895 if (ret) { 1896 pr_err("listening to %pISpcs failed (%d)\n", addr, ret); 1897 goto out_destroy_id; 1898 } 1899 1900 port->cm_id = cm_id; 1901 return 0; 1902 1903 out_destroy_id: 1904 rdma_destroy_id(cm_id); 1905 return ret; 1906 } 1907 1908 static void nvmet_rdma_repair_port_work(struct work_struct *w) 1909 { 1910 struct nvmet_rdma_port *port = container_of(to_delayed_work(w), 1911 struct nvmet_rdma_port, repair_work); 1912 int ret; 1913 1914 nvmet_rdma_disable_port(port); 1915 ret = nvmet_rdma_enable_port(port); 1916 if (ret) 1917 queue_delayed_work(nvmet_wq, &port->repair_work, 5 * HZ); 1918 } 1919 1920 static int nvmet_rdma_add_port(struct nvmet_port *nport) 1921 { 1922 struct nvmet_rdma_port *port; 1923 __kernel_sa_family_t af; 1924 int ret; 1925 1926 port = kzalloc_obj(*port); 1927 if (!port) 1928 return -ENOMEM; 1929 1930 nport->priv = port; 1931 port->nport = nport; 1932 INIT_DELAYED_WORK(&port->repair_work, nvmet_rdma_repair_port_work); 1933 1934 switch (nport->disc_addr.adrfam) { 1935 case NVMF_ADDR_FAMILY_IP4: 1936 af = AF_INET; 1937 break; 1938 case NVMF_ADDR_FAMILY_IP6: 1939 af = AF_INET6; 1940 break; 1941 default: 1942 pr_err("address family %d not supported\n", 1943 nport->disc_addr.adrfam); 1944 ret = -EINVAL; 1945 goto out_free_port; 1946 } 1947 1948 if (nport->inline_data_size < 0) { 1949 nport->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE; 1950 } else if (nport->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) { 1951 pr_warn("inline_data_size %u is too large, reducing to %u\n", 1952 nport->inline_data_size, 1953 NVMET_RDMA_MAX_INLINE_DATA_SIZE); 1954 nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE; 1955 } 1956 1957 if (nport->max_queue_size < 0) { 1958 nport->max_queue_size = NVME_RDMA_DEFAULT_QUEUE_SIZE; 1959 } else if (nport->max_queue_size > NVME_RDMA_MAX_QUEUE_SIZE) { 1960 pr_warn("max_queue_size %u is too large, reducing to %u\n", 1961 nport->max_queue_size, NVME_RDMA_MAX_QUEUE_SIZE); 1962 nport->max_queue_size = NVME_RDMA_MAX_QUEUE_SIZE; 1963 } 1964 1965 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr, 1966 nport->disc_addr.trsvcid, &port->addr); 1967 if (ret) { 1968 pr_err("malformed ip/port passed: %s:%s\n", 1969 nport->disc_addr.traddr, nport->disc_addr.trsvcid); 1970 goto out_free_port; 1971 } 1972 1973 ret = nvmet_rdma_enable_port(port); 1974 if (ret) 1975 goto out_free_port; 1976 1977 pr_info("enabling port %d (%pISpcs)\n", 1978 le16_to_cpu(nport->disc_addr.portid), 1979 (struct sockaddr *)&port->addr); 1980 1981 return 0; 1982 1983 out_free_port: 1984 kfree(port); 1985 return ret; 1986 } 1987 1988 static void nvmet_rdma_remove_port(struct nvmet_port *nport) 1989 { 1990 struct nvmet_rdma_port *port = nport->priv; 1991 1992 cancel_delayed_work_sync(&port->repair_work); 1993 nvmet_rdma_disable_port(port); 1994 kfree(port); 1995 } 1996 1997 static void nvmet_rdma_disc_port_addr(struct nvmet_req *req, 1998 struct nvmet_port *nport, char *traddr) 1999 { 2000 struct nvmet_rdma_port *port = nport->priv; 2001 struct rdma_cm_id *cm_id = port->cm_id; 2002 2003 if (inet_addr_is_any(&cm_id->route.addr.src_addr)) { 2004 struct nvmet_rdma_rsp *rsp = 2005 container_of(req, struct nvmet_rdma_rsp, req); 2006 struct rdma_cm_id *req_cm_id = rsp->queue->cm_id; 2007 struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr; 2008 2009 sprintf(traddr, "%pISc", addr); 2010 } else { 2011 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE); 2012 } 2013 } 2014 2015 static ssize_t nvmet_rdma_host_port_addr(struct nvmet_ctrl *ctrl, 2016 char *traddr, size_t traddr_len) 2017 { 2018 struct nvmet_sq *nvme_sq = ctrl->sqs[0]; 2019 struct nvmet_rdma_queue *queue = 2020 container_of(nvme_sq, struct nvmet_rdma_queue, nvme_sq); 2021 2022 return snprintf(traddr, traddr_len, "%pISc", 2023 (struct sockaddr *)&queue->cm_id->route.addr.dst_addr); 2024 } 2025 2026 static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl) 2027 { 2028 if (ctrl->pi_support) 2029 return NVMET_RDMA_MAX_METADATA_MDTS; 2030 return NVMET_RDMA_MAX_MDTS; 2031 } 2032 2033 static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl) 2034 { 2035 if (ctrl->pi_support) 2036 return NVME_RDMA_MAX_METADATA_QUEUE_SIZE; 2037 return NVME_RDMA_MAX_QUEUE_SIZE; 2038 } 2039 2040 static const struct nvmet_fabrics_ops nvmet_rdma_ops = { 2041 .owner = THIS_MODULE, 2042 .type = NVMF_TRTYPE_RDMA, 2043 .msdbd = 1, 2044 .flags = NVMF_KEYED_SGLS | NVMF_METADATA_SUPPORTED, 2045 .add_port = nvmet_rdma_add_port, 2046 .remove_port = nvmet_rdma_remove_port, 2047 .queue_response = nvmet_rdma_queue_response, 2048 .delete_ctrl = nvmet_rdma_delete_ctrl, 2049 .disc_traddr = nvmet_rdma_disc_port_addr, 2050 .host_traddr = nvmet_rdma_host_port_addr, 2051 .get_mdts = nvmet_rdma_get_mdts, 2052 .get_max_queue_size = nvmet_rdma_get_max_queue_size, 2053 }; 2054 2055 static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data) 2056 { 2057 struct nvmet_rdma_queue *queue, *tmp; 2058 struct nvmet_rdma_device *ndev; 2059 bool found = false; 2060 2061 mutex_lock(&device_list_mutex); 2062 list_for_each_entry(ndev, &device_list, entry) { 2063 if (ndev->device == ib_device) { 2064 found = true; 2065 break; 2066 } 2067 } 2068 mutex_unlock(&device_list_mutex); 2069 2070 if (!found) 2071 return; 2072 2073 /* 2074 * IB Device that is used by nvmet controllers is being removed, 2075 * delete all queues using this device. 2076 */ 2077 mutex_lock(&nvmet_rdma_queue_mutex); 2078 list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, 2079 queue_list) { 2080 if (queue->dev->device != ib_device) 2081 continue; 2082 2083 pr_info("Removing queue %d\n", queue->idx); 2084 list_del_init(&queue->queue_list); 2085 __nvmet_rdma_queue_disconnect(queue); 2086 } 2087 mutex_unlock(&nvmet_rdma_queue_mutex); 2088 2089 flush_workqueue(nvmet_wq); 2090 } 2091 2092 static struct ib_client nvmet_rdma_ib_client = { 2093 .name = "nvmet_rdma", 2094 .remove = nvmet_rdma_remove_one 2095 }; 2096 2097 static int __init nvmet_rdma_init(void) 2098 { 2099 int ret; 2100 2101 ret = ib_register_client(&nvmet_rdma_ib_client); 2102 if (ret) 2103 return ret; 2104 2105 ret = nvmet_register_transport(&nvmet_rdma_ops); 2106 if (ret) 2107 goto err_ib_client; 2108 2109 return 0; 2110 2111 err_ib_client: 2112 ib_unregister_client(&nvmet_rdma_ib_client); 2113 return ret; 2114 } 2115 2116 static void __exit nvmet_rdma_exit(void) 2117 { 2118 nvmet_unregister_transport(&nvmet_rdma_ops); 2119 ib_unregister_client(&nvmet_rdma_ib_client); 2120 WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list)); 2121 ida_destroy(&nvmet_rdma_queue_ida); 2122 } 2123 2124 module_init(nvmet_rdma_init); 2125 module_exit(nvmet_rdma_exit); 2126 2127 MODULE_DESCRIPTION("NVMe target RDMA transport driver"); 2128 MODULE_LICENSE("GPL v2"); 2129 MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */ 2130