1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * RDMA Transport Layer 4 * 5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved. 6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved. 7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved. 8 */ 9 10 #undef pr_fmt 11 #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt 12 13 #include <linux/module.h> 14 15 #include "rtrs-srv.h" 16 #include "rtrs-log.h" 17 #include <rdma/ib_cm.h> 18 #include <rdma/ib_verbs.h> 19 #include "rtrs-srv-trace.h" 20 21 MODULE_DESCRIPTION("RDMA Transport Server"); 22 MODULE_LICENSE("GPL"); 23 24 /* Must be power of 2, see mask from mr->page_size in ib_sg_to_pages() */ 25 #define DEFAULT_MAX_CHUNK_SIZE (128 << 10) 26 #define DEFAULT_SESS_QUEUE_DEPTH 512 27 #define MAX_HDR_SIZE PAGE_SIZE 28 29 static struct rtrs_rdma_dev_pd dev_pd; 30 const struct class rtrs_dev_class = { 31 .name = "rtrs-server", 32 }; 33 static struct rtrs_srv_ib_ctx ib_ctx; 34 35 static int __read_mostly max_chunk_size = DEFAULT_MAX_CHUNK_SIZE; 36 static int __read_mostly sess_queue_depth = DEFAULT_SESS_QUEUE_DEPTH; 37 38 static bool always_invalidate = true; 39 module_param(always_invalidate, bool, 0444); 40 MODULE_PARM_DESC(always_invalidate, 41 "Invalidate memory registration for contiguous memory regions before accessing."); 42 43 module_param_named(max_chunk_size, max_chunk_size, int, 0444); 44 MODULE_PARM_DESC(max_chunk_size, 45 "Max size for each IO request, when change the unit is in byte (default: " 46 __stringify(DEFAULT_MAX_CHUNK_SIZE) "KB)"); 47 48 module_param_named(sess_queue_depth, sess_queue_depth, int, 0444); 49 MODULE_PARM_DESC(sess_queue_depth, 50 "Number of buffers for pending I/O requests to allocate per session. Maximum: " 51 __stringify(MAX_SESS_QUEUE_DEPTH) " (default: " 52 __stringify(DEFAULT_SESS_QUEUE_DEPTH) ")"); 53 54 static cpumask_t cq_affinity_mask = { CPU_BITS_ALL }; 55 56 static struct workqueue_struct *rtrs_wq; 57 58 static inline struct rtrs_srv_con *to_srv_con(struct rtrs_con *c) 59 { 60 return container_of(c, struct rtrs_srv_con, c); 61 } 62 63 static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path, 64 enum rtrs_srv_state new_state) 65 { 66 enum rtrs_srv_state old_state; 67 bool changed = false; 68 unsigned long flags; 69 70 spin_lock_irqsave(&srv_path->state_lock, flags); 71 old_state = srv_path->state; 72 switch (new_state) { 73 case RTRS_SRV_CONNECTED: 74 if (old_state == RTRS_SRV_CONNECTING) 75 changed = true; 76 break; 77 case RTRS_SRV_CLOSING: 78 if (old_state == RTRS_SRV_CONNECTING || 79 old_state == RTRS_SRV_CONNECTED) 80 changed = true; 81 break; 82 case RTRS_SRV_CLOSED: 83 if (old_state == RTRS_SRV_CLOSING) 84 changed = true; 85 break; 86 default: 87 break; 88 } 89 if (changed) 90 srv_path->state = new_state; 91 spin_unlock_irqrestore(&srv_path->state_lock, flags); 92 93 return changed; 94 } 95 96 static void free_id(struct rtrs_srv_op *id) 97 { 98 if (!id) 99 return; 100 kfree(id); 101 } 102 103 static void rtrs_srv_free_ops_ids(struct rtrs_srv_path *srv_path) 104 { 105 struct rtrs_srv_sess *srv = srv_path->srv; 106 int i; 107 108 if (srv_path->ops_ids) { 109 for (i = 0; i < srv->queue_depth; i++) 110 free_id(srv_path->ops_ids[i]); 111 kfree(srv_path->ops_ids); 112 srv_path->ops_ids = NULL; 113 } 114 } 115 116 static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc); 117 118 static struct ib_cqe io_comp_cqe = { 119 .done = rtrs_srv_rdma_done 120 }; 121 122 static inline void rtrs_srv_inflight_ref_release(struct percpu_ref *ref) 123 { 124 struct rtrs_srv_path *srv_path = container_of(ref, 125 struct rtrs_srv_path, 126 ids_inflight_ref); 127 128 percpu_ref_exit(&srv_path->ids_inflight_ref); 129 complete(&srv_path->complete_done); 130 } 131 132 static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_path *srv_path) 133 { 134 struct rtrs_srv_sess *srv = srv_path->srv; 135 struct rtrs_srv_op *id; 136 int i, ret; 137 138 srv_path->ops_ids = kcalloc(srv->queue_depth, 139 sizeof(*srv_path->ops_ids), 140 GFP_KERNEL); 141 if (!srv_path->ops_ids) 142 goto err; 143 144 for (i = 0; i < srv->queue_depth; ++i) { 145 id = kzalloc(sizeof(*id), GFP_KERNEL); 146 if (!id) 147 goto err; 148 149 srv_path->ops_ids[i] = id; 150 } 151 152 ret = percpu_ref_init(&srv_path->ids_inflight_ref, 153 rtrs_srv_inflight_ref_release, 0, GFP_KERNEL); 154 if (ret) { 155 pr_err("Percpu reference init failed\n"); 156 goto err; 157 } 158 init_completion(&srv_path->complete_done); 159 160 return 0; 161 162 err: 163 rtrs_srv_free_ops_ids(srv_path); 164 return -ENOMEM; 165 } 166 167 static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_path *srv_path) 168 { 169 percpu_ref_get(&srv_path->ids_inflight_ref); 170 } 171 172 static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_path *srv_path) 173 { 174 percpu_ref_put(&srv_path->ids_inflight_ref); 175 } 176 177 static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc) 178 { 179 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context); 180 struct rtrs_path *s = con->c.path; 181 struct rtrs_srv_path *srv_path = to_srv_path(s); 182 183 if (wc->status != IB_WC_SUCCESS) { 184 rtrs_err(s, "REG MR failed: %s\n", 185 ib_wc_status_msg(wc->status)); 186 close_path(srv_path); 187 return; 188 } 189 } 190 191 static struct ib_cqe local_reg_cqe = { 192 .done = rtrs_srv_reg_mr_done 193 }; 194 195 static int rdma_write_sg(struct rtrs_srv_op *id) 196 { 197 struct rtrs_path *s = id->con->c.path; 198 struct rtrs_srv_path *srv_path = to_srv_path(s); 199 dma_addr_t dma_addr = srv_path->dma_addr[id->msg_id]; 200 struct rtrs_srv_mr *srv_mr; 201 struct ib_send_wr inv_wr; 202 struct ib_rdma_wr imm_wr; 203 struct ib_rdma_wr *wr = NULL; 204 enum ib_send_flags flags; 205 size_t sg_cnt; 206 int err, offset; 207 bool need_inval; 208 u32 rkey = 0; 209 struct ib_reg_wr rwr; 210 struct ib_sge *plist; 211 struct ib_sge list; 212 213 sg_cnt = le16_to_cpu(id->rd_msg->sg_cnt); 214 need_inval = le16_to_cpu(id->rd_msg->flags) & RTRS_MSG_NEED_INVAL_F; 215 if (sg_cnt != 1) 216 return -EINVAL; 217 218 offset = 0; 219 220 wr = &id->tx_wr; 221 plist = &id->tx_sg; 222 plist->addr = dma_addr + offset; 223 plist->length = le32_to_cpu(id->rd_msg->desc[0].len); 224 225 /* WR will fail with length error 226 * if this is 0 227 */ 228 if (plist->length == 0) { 229 rtrs_err(s, "Invalid RDMA-Write sg list length 0\n"); 230 return -EINVAL; 231 } 232 233 plist->lkey = srv_path->s.dev->ib_pd->local_dma_lkey; 234 offset += plist->length; 235 236 wr->wr.sg_list = plist; 237 wr->wr.num_sge = 1; 238 wr->remote_addr = le64_to_cpu(id->rd_msg->desc[0].addr); 239 wr->rkey = le32_to_cpu(id->rd_msg->desc[0].key); 240 if (rkey == 0) 241 rkey = wr->rkey; 242 else 243 /* Only one key is actually used */ 244 WARN_ON_ONCE(rkey != wr->rkey); 245 246 wr->wr.opcode = IB_WR_RDMA_WRITE; 247 wr->wr.wr_cqe = &io_comp_cqe; 248 wr->wr.ex.imm_data = 0; 249 wr->wr.send_flags = 0; 250 251 if (need_inval && always_invalidate) { 252 wr->wr.next = &rwr.wr; 253 rwr.wr.next = &inv_wr; 254 inv_wr.next = &imm_wr.wr; 255 } else if (always_invalidate) { 256 wr->wr.next = &rwr.wr; 257 rwr.wr.next = &imm_wr.wr; 258 } else if (need_inval) { 259 wr->wr.next = &inv_wr; 260 inv_wr.next = &imm_wr.wr; 261 } else { 262 wr->wr.next = &imm_wr.wr; 263 } 264 /* 265 * From time to time we have to post signaled sends, 266 * or send queue will fill up and only QP reset can help. 267 */ 268 flags = (atomic_inc_return(&id->con->c.wr_cnt) % s->signal_interval) ? 269 0 : IB_SEND_SIGNALED; 270 271 if (need_inval) { 272 inv_wr.sg_list = NULL; 273 inv_wr.num_sge = 0; 274 inv_wr.opcode = IB_WR_SEND_WITH_INV; 275 inv_wr.wr_cqe = &io_comp_cqe; 276 inv_wr.send_flags = 0; 277 inv_wr.ex.invalidate_rkey = rkey; 278 } 279 280 imm_wr.wr.next = NULL; 281 if (always_invalidate) { 282 struct rtrs_msg_rkey_rsp *msg; 283 284 srv_mr = &srv_path->mrs[id->msg_id]; 285 rwr.wr.opcode = IB_WR_REG_MR; 286 rwr.wr.wr_cqe = &local_reg_cqe; 287 rwr.wr.num_sge = 0; 288 rwr.mr = srv_mr->mr; 289 rwr.wr.send_flags = 0; 290 rwr.key = srv_mr->mr->rkey; 291 rwr.access = (IB_ACCESS_LOCAL_WRITE | 292 IB_ACCESS_REMOTE_WRITE); 293 msg = srv_mr->iu->buf; 294 msg->buf_id = cpu_to_le16(id->msg_id); 295 msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP); 296 msg->rkey = cpu_to_le32(srv_mr->mr->rkey); 297 298 list.addr = srv_mr->iu->dma_addr; 299 list.length = sizeof(*msg); 300 list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey; 301 imm_wr.wr.sg_list = &list; 302 imm_wr.wr.num_sge = 1; 303 imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM; 304 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, 305 srv_mr->iu->dma_addr, 306 srv_mr->iu->size, DMA_TO_DEVICE); 307 } else { 308 imm_wr.wr.sg_list = NULL; 309 imm_wr.wr.num_sge = 0; 310 imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM; 311 } 312 imm_wr.wr.send_flags = flags; 313 imm_wr.wr.ex.imm_data = cpu_to_be32(rtrs_to_io_rsp_imm(id->msg_id, 314 0, need_inval)); 315 316 imm_wr.wr.wr_cqe = &io_comp_cqe; 317 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, dma_addr, 318 offset, DMA_BIDIRECTIONAL); 319 320 err = ib_post_send(id->con->c.qp, &id->tx_wr.wr, NULL); 321 if (err) 322 rtrs_err(s, 323 "Posting RDMA-Write-Request to QP failed, err: %d\n", 324 err); 325 326 return err; 327 } 328 329 /** 330 * send_io_resp_imm() - respond to client with empty IMM on failed READ/WRITE 331 * requests or on successful WRITE request. 332 * @con: the connection to send back result 333 * @id: the id associated with the IO 334 * @errno: the error number of the IO. 335 * 336 * Return 0 on success, errno otherwise. 337 */ 338 static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id, 339 int errno) 340 { 341 struct rtrs_path *s = con->c.path; 342 struct rtrs_srv_path *srv_path = to_srv_path(s); 343 struct ib_send_wr inv_wr, *wr = NULL; 344 struct ib_rdma_wr imm_wr; 345 struct ib_reg_wr rwr; 346 struct rtrs_srv_mr *srv_mr; 347 bool need_inval = false; 348 enum ib_send_flags flags; 349 u32 imm; 350 int err; 351 352 if (id->dir == READ) { 353 struct rtrs_msg_rdma_read *rd_msg = id->rd_msg; 354 size_t sg_cnt; 355 356 need_inval = le16_to_cpu(rd_msg->flags) & 357 RTRS_MSG_NEED_INVAL_F; 358 sg_cnt = le16_to_cpu(rd_msg->sg_cnt); 359 360 if (need_inval) { 361 if (sg_cnt) { 362 inv_wr.wr_cqe = &io_comp_cqe; 363 inv_wr.sg_list = NULL; 364 inv_wr.num_sge = 0; 365 inv_wr.opcode = IB_WR_SEND_WITH_INV; 366 inv_wr.send_flags = 0; 367 /* Only one key is actually used */ 368 inv_wr.ex.invalidate_rkey = 369 le32_to_cpu(rd_msg->desc[0].key); 370 } else { 371 WARN_ON_ONCE(1); 372 need_inval = false; 373 } 374 } 375 } 376 377 trace_send_io_resp_imm(id, need_inval, always_invalidate, errno); 378 379 if (need_inval && always_invalidate) { 380 wr = &inv_wr; 381 inv_wr.next = &rwr.wr; 382 rwr.wr.next = &imm_wr.wr; 383 } else if (always_invalidate) { 384 wr = &rwr.wr; 385 rwr.wr.next = &imm_wr.wr; 386 } else if (need_inval) { 387 wr = &inv_wr; 388 inv_wr.next = &imm_wr.wr; 389 } else { 390 wr = &imm_wr.wr; 391 } 392 /* 393 * From time to time we have to post signalled sends, 394 * or send queue will fill up and only QP reset can help. 395 */ 396 flags = (atomic_inc_return(&con->c.wr_cnt) % s->signal_interval) ? 397 0 : IB_SEND_SIGNALED; 398 imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval); 399 imm_wr.wr.next = NULL; 400 if (always_invalidate) { 401 struct ib_sge list; 402 struct rtrs_msg_rkey_rsp *msg; 403 404 srv_mr = &srv_path->mrs[id->msg_id]; 405 rwr.wr.next = &imm_wr.wr; 406 rwr.wr.opcode = IB_WR_REG_MR; 407 rwr.wr.wr_cqe = &local_reg_cqe; 408 rwr.wr.num_sge = 0; 409 rwr.wr.send_flags = 0; 410 rwr.mr = srv_mr->mr; 411 rwr.key = srv_mr->mr->rkey; 412 rwr.access = (IB_ACCESS_LOCAL_WRITE | 413 IB_ACCESS_REMOTE_WRITE); 414 msg = srv_mr->iu->buf; 415 msg->buf_id = cpu_to_le16(id->msg_id); 416 msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP); 417 msg->rkey = cpu_to_le32(srv_mr->mr->rkey); 418 419 list.addr = srv_mr->iu->dma_addr; 420 list.length = sizeof(*msg); 421 list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey; 422 imm_wr.wr.sg_list = &list; 423 imm_wr.wr.num_sge = 1; 424 imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM; 425 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, 426 srv_mr->iu->dma_addr, 427 srv_mr->iu->size, DMA_TO_DEVICE); 428 } else { 429 imm_wr.wr.sg_list = NULL; 430 imm_wr.wr.num_sge = 0; 431 imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM; 432 } 433 imm_wr.wr.send_flags = flags; 434 imm_wr.wr.wr_cqe = &io_comp_cqe; 435 436 imm_wr.wr.ex.imm_data = cpu_to_be32(imm); 437 438 err = ib_post_send(id->con->c.qp, wr, NULL); 439 if (err) 440 rtrs_err_rl(s, "Posting RDMA-Reply to QP failed, err: %d\n", 441 err); 442 443 return err; 444 } 445 446 void close_path(struct rtrs_srv_path *srv_path) 447 { 448 if (rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSING)) 449 queue_work(rtrs_wq, &srv_path->close_work); 450 WARN_ON(srv_path->state != RTRS_SRV_CLOSING); 451 } 452 453 static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state) 454 { 455 switch (state) { 456 case RTRS_SRV_CONNECTING: 457 return "RTRS_SRV_CONNECTING"; 458 case RTRS_SRV_CONNECTED: 459 return "RTRS_SRV_CONNECTED"; 460 case RTRS_SRV_CLOSING: 461 return "RTRS_SRV_CLOSING"; 462 case RTRS_SRV_CLOSED: 463 return "RTRS_SRV_CLOSED"; 464 default: 465 return "UNKNOWN"; 466 } 467 } 468 469 /** 470 * rtrs_srv_resp_rdma() - Finish an RDMA request 471 * 472 * @id: Internal RTRS operation identifier 473 * @status: Response Code sent to the other side for this operation. 474 * 0 = success, <=0 error 475 * Context: any 476 * 477 * Finish a RDMA operation. A message is sent to the client and the 478 * corresponding memory areas will be released. 479 */ 480 bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status) 481 { 482 struct rtrs_srv_path *srv_path; 483 struct rtrs_srv_con *con; 484 struct rtrs_path *s; 485 int err; 486 487 if (WARN_ON(!id)) 488 return true; 489 490 con = id->con; 491 s = con->c.path; 492 srv_path = to_srv_path(s); 493 494 id->status = status; 495 496 if (srv_path->state != RTRS_SRV_CONNECTED) { 497 rtrs_err_rl(s, 498 "Sending I/O response failed, server path %s is disconnected, path state %s\n", 499 kobject_name(&srv_path->kobj), 500 rtrs_srv_state_str(srv_path->state)); 501 goto out; 502 } 503 if (always_invalidate) { 504 struct rtrs_srv_mr *mr = &srv_path->mrs[id->msg_id]; 505 506 ib_update_fast_reg_key(mr->mr, ib_inc_rkey(mr->mr->rkey)); 507 } 508 if (atomic_sub_return(1, &con->c.sq_wr_avail) < 0) { 509 rtrs_err(s, "IB send queue full: srv_path=%s cid=%d\n", 510 kobject_name(&srv_path->kobj), 511 con->c.cid); 512 atomic_add(1, &con->c.sq_wr_avail); 513 spin_lock(&con->rsp_wr_wait_lock); 514 list_add_tail(&id->wait_list, &con->rsp_wr_wait_list); 515 spin_unlock(&con->rsp_wr_wait_lock); 516 return false; 517 } 518 519 if (status || id->dir == WRITE || !id->rd_msg->sg_cnt) 520 err = send_io_resp_imm(con, id, status); 521 else 522 err = rdma_write_sg(id); 523 524 if (err) { 525 rtrs_err_rl(s, "IO response failed: %d: srv_path=%s\n", err, 526 kobject_name(&srv_path->kobj)); 527 close_path(srv_path); 528 } 529 out: 530 rtrs_srv_put_ops_ids(srv_path); 531 return true; 532 } 533 EXPORT_SYMBOL(rtrs_srv_resp_rdma); 534 535 /** 536 * rtrs_srv_set_sess_priv() - Set private pointer in rtrs_srv. 537 * @srv: Session pointer 538 * @priv: The private pointer that is associated with the session. 539 */ 540 void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *srv, void *priv) 541 { 542 srv->priv = priv; 543 } 544 EXPORT_SYMBOL(rtrs_srv_set_sess_priv); 545 546 static void unmap_cont_bufs(struct rtrs_srv_path *srv_path) 547 { 548 int i; 549 550 for (i = 0; i < srv_path->mrs_num; i++) { 551 struct rtrs_srv_mr *srv_mr; 552 553 srv_mr = &srv_path->mrs[i]; 554 555 if (always_invalidate) 556 rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1); 557 558 ib_dereg_mr(srv_mr->mr); 559 ib_dma_unmap_sg(srv_path->s.dev->ib_dev, srv_mr->sgt.sgl, 560 srv_mr->sgt.nents, DMA_BIDIRECTIONAL); 561 sg_free_table(&srv_mr->sgt); 562 } 563 kfree(srv_path->mrs); 564 } 565 566 static int map_cont_bufs(struct rtrs_srv_path *srv_path) 567 { 568 struct rtrs_srv_sess *srv = srv_path->srv; 569 struct rtrs_path *ss = &srv_path->s; 570 int i, err, mrs_num; 571 unsigned int chunk_bits; 572 int chunks_per_mr = 1; 573 struct ib_mr *mr; 574 struct sg_table *sgt; 575 576 /* 577 * Here we map queue_depth chunks to MR. Firstly we have to 578 * figure out how many chunks can we map per MR. 579 */ 580 if (always_invalidate) { 581 /* 582 * in order to do invalidate for each chunks of memory, we needs 583 * more memory regions. 584 */ 585 mrs_num = srv->queue_depth; 586 } else { 587 chunks_per_mr = 588 srv_path->s.dev->ib_dev->attrs.max_fast_reg_page_list_len; 589 mrs_num = DIV_ROUND_UP(srv->queue_depth, chunks_per_mr); 590 chunks_per_mr = DIV_ROUND_UP(srv->queue_depth, mrs_num); 591 } 592 593 srv_path->mrs = kcalloc(mrs_num, sizeof(*srv_path->mrs), GFP_KERNEL); 594 if (!srv_path->mrs) 595 return -ENOMEM; 596 597 for (srv_path->mrs_num = 0; srv_path->mrs_num < mrs_num; 598 srv_path->mrs_num++) { 599 struct rtrs_srv_mr *srv_mr = &srv_path->mrs[srv_path->mrs_num]; 600 struct scatterlist *s; 601 int nr, nr_sgt, chunks; 602 603 sgt = &srv_mr->sgt; 604 chunks = chunks_per_mr * srv_path->mrs_num; 605 if (!always_invalidate) 606 chunks_per_mr = min_t(int, chunks_per_mr, 607 srv->queue_depth - chunks); 608 609 err = sg_alloc_table(sgt, chunks_per_mr, GFP_KERNEL); 610 if (err) 611 goto err; 612 613 for_each_sg(sgt->sgl, s, chunks_per_mr, i) 614 sg_set_page(s, srv->chunks[chunks + i], 615 max_chunk_size, 0); 616 617 nr_sgt = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl, 618 sgt->nents, DMA_BIDIRECTIONAL); 619 if (!nr_sgt) { 620 err = -EINVAL; 621 goto free_sg; 622 } 623 mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG, 624 nr_sgt); 625 if (IS_ERR(mr)) { 626 err = PTR_ERR(mr); 627 goto unmap_sg; 628 } 629 nr = ib_map_mr_sg(mr, sgt->sgl, nr_sgt, 630 NULL, max_chunk_size); 631 if (nr != nr_sgt) { 632 err = nr < 0 ? nr : -EINVAL; 633 goto dereg_mr; 634 } 635 636 if (always_invalidate) { 637 srv_mr->iu = rtrs_iu_alloc(1, 638 sizeof(struct rtrs_msg_rkey_rsp), 639 GFP_KERNEL, srv_path->s.dev->ib_dev, 640 DMA_TO_DEVICE, rtrs_srv_rdma_done); 641 if (!srv_mr->iu) { 642 err = -ENOMEM; 643 rtrs_err(ss, "rtrs_iu_alloc(), err: %d\n", err); 644 goto dereg_mr; 645 } 646 } 647 /* Eventually dma addr for each chunk can be cached */ 648 for_each_sg(sgt->sgl, s, nr_sgt, i) 649 srv_path->dma_addr[chunks + i] = sg_dma_address(s); 650 651 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); 652 srv_mr->mr = mr; 653 } 654 655 chunk_bits = ilog2(srv->queue_depth - 1) + 1; 656 srv_path->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits); 657 658 return 0; 659 660 dereg_mr: 661 ib_dereg_mr(mr); 662 unmap_sg: 663 ib_dma_unmap_sg(srv_path->s.dev->ib_dev, sgt->sgl, 664 sgt->nents, DMA_BIDIRECTIONAL); 665 free_sg: 666 sg_free_table(sgt); 667 err: 668 unmap_cont_bufs(srv_path); 669 670 return err; 671 } 672 673 static void rtrs_srv_hb_err_handler(struct rtrs_con *c) 674 { 675 close_path(to_srv_path(c->path)); 676 } 677 678 static void rtrs_srv_init_hb(struct rtrs_srv_path *srv_path) 679 { 680 rtrs_init_hb(&srv_path->s, &io_comp_cqe, 681 RTRS_HB_INTERVAL_MS, 682 RTRS_HB_MISSED_MAX, 683 rtrs_srv_hb_err_handler, 684 rtrs_wq); 685 } 686 687 static void rtrs_srv_start_hb(struct rtrs_srv_path *srv_path) 688 { 689 rtrs_start_hb(&srv_path->s); 690 } 691 692 static void rtrs_srv_stop_hb(struct rtrs_srv_path *srv_path) 693 { 694 rtrs_stop_hb(&srv_path->s); 695 } 696 697 static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc) 698 { 699 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context); 700 struct rtrs_path *s = con->c.path; 701 struct rtrs_srv_path *srv_path = to_srv_path(s); 702 struct rtrs_iu *iu; 703 704 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); 705 rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1); 706 707 if (wc->status != IB_WC_SUCCESS) { 708 rtrs_err(s, "Sess info response send failed: %s\n", 709 ib_wc_status_msg(wc->status)); 710 close_path(srv_path); 711 return; 712 } 713 WARN_ON(wc->opcode != IB_WC_SEND); 714 } 715 716 static int rtrs_srv_path_up(struct rtrs_srv_path *srv_path) 717 { 718 struct rtrs_srv_sess *srv = srv_path->srv; 719 struct rtrs_srv_ctx *ctx = srv->ctx; 720 int up, ret = 0; 721 722 mutex_lock(&srv->paths_ev_mutex); 723 up = ++srv->paths_up; 724 if (up == 1) 725 ret = ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL); 726 mutex_unlock(&srv->paths_ev_mutex); 727 728 /* Mark session as established */ 729 if (!ret) 730 srv_path->established = true; 731 732 return ret; 733 } 734 735 static void rtrs_srv_path_down(struct rtrs_srv_path *srv_path) 736 { 737 struct rtrs_srv_sess *srv = srv_path->srv; 738 struct rtrs_srv_ctx *ctx = srv->ctx; 739 740 if (!srv_path->established) 741 return; 742 743 srv_path->established = false; 744 mutex_lock(&srv->paths_ev_mutex); 745 WARN_ON(!srv->paths_up); 746 if (--srv->paths_up == 0) 747 ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_DISCONNECTED, srv->priv); 748 mutex_unlock(&srv->paths_ev_mutex); 749 } 750 751 static bool exist_pathname(struct rtrs_srv_ctx *ctx, 752 const char *pathname, const uuid_t *path_uuid) 753 { 754 struct rtrs_srv_sess *srv; 755 struct rtrs_srv_path *srv_path; 756 bool found = false; 757 758 mutex_lock(&ctx->srv_mutex); 759 list_for_each_entry(srv, &ctx->srv_list, ctx_list) { 760 mutex_lock(&srv->paths_mutex); 761 762 /* when a client with same uuid and same sessname tried to add a path */ 763 if (uuid_equal(&srv->paths_uuid, path_uuid)) { 764 mutex_unlock(&srv->paths_mutex); 765 continue; 766 } 767 768 list_for_each_entry(srv_path, &srv->paths_list, s.entry) { 769 if (strlen(srv_path->s.sessname) == strlen(pathname) && 770 !strcmp(srv_path->s.sessname, pathname)) { 771 found = true; 772 break; 773 } 774 } 775 mutex_unlock(&srv->paths_mutex); 776 if (found) 777 break; 778 } 779 mutex_unlock(&ctx->srv_mutex); 780 return found; 781 } 782 783 static int post_recv_path(struct rtrs_srv_path *srv_path); 784 static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno); 785 786 static int process_info_req(struct rtrs_srv_con *con, 787 struct rtrs_msg_info_req *msg) 788 { 789 struct rtrs_path *s = con->c.path; 790 struct rtrs_srv_path *srv_path = to_srv_path(s); 791 struct ib_send_wr *reg_wr = NULL; 792 struct rtrs_msg_info_rsp *rsp; 793 struct rtrs_iu *tx_iu; 794 struct ib_reg_wr *rwr; 795 int mri, err; 796 size_t tx_sz; 797 798 err = post_recv_path(srv_path); 799 if (err) { 800 rtrs_err(s, "post_recv_path(), err: %d\n", err); 801 return err; 802 } 803 804 if (strchr(msg->pathname, '/') || strchr(msg->pathname, '.')) { 805 rtrs_err(s, "pathname cannot contain / and .\n"); 806 return -EINVAL; 807 } 808 809 if (exist_pathname(srv_path->srv->ctx, 810 msg->pathname, &srv_path->srv->paths_uuid)) { 811 rtrs_err(s, "pathname is duplicated: %s\n", msg->pathname); 812 return -EPERM; 813 } 814 strscpy(srv_path->s.sessname, msg->pathname, 815 sizeof(srv_path->s.sessname)); 816 817 rwr = kcalloc(srv_path->mrs_num, sizeof(*rwr), GFP_KERNEL); 818 if (!rwr) 819 return -ENOMEM; 820 821 tx_sz = sizeof(*rsp); 822 tx_sz += sizeof(rsp->desc[0]) * srv_path->mrs_num; 823 tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, srv_path->s.dev->ib_dev, 824 DMA_TO_DEVICE, rtrs_srv_info_rsp_done); 825 if (!tx_iu) { 826 err = -ENOMEM; 827 goto rwr_free; 828 } 829 830 rsp = tx_iu->buf; 831 rsp->type = cpu_to_le16(RTRS_MSG_INFO_RSP); 832 rsp->sg_cnt = cpu_to_le16(srv_path->mrs_num); 833 834 for (mri = 0; mri < srv_path->mrs_num; mri++) { 835 struct ib_mr *mr = srv_path->mrs[mri].mr; 836 837 rsp->desc[mri].addr = cpu_to_le64(mr->iova); 838 rsp->desc[mri].key = cpu_to_le32(mr->rkey); 839 rsp->desc[mri].len = cpu_to_le32(mr->length); 840 841 /* 842 * Fill in reg MR request and chain them *backwards* 843 */ 844 rwr[mri].wr.next = mri ? &rwr[mri - 1].wr : NULL; 845 rwr[mri].wr.opcode = IB_WR_REG_MR; 846 rwr[mri].wr.wr_cqe = &local_reg_cqe; 847 rwr[mri].wr.num_sge = 0; 848 rwr[mri].wr.send_flags = 0; 849 rwr[mri].mr = mr; 850 rwr[mri].key = mr->rkey; 851 rwr[mri].access = (IB_ACCESS_LOCAL_WRITE | 852 IB_ACCESS_REMOTE_WRITE); 853 reg_wr = &rwr[mri].wr; 854 } 855 856 err = rtrs_srv_create_path_files(srv_path); 857 if (err) 858 goto iu_free; 859 kobject_get(&srv_path->kobj); 860 get_device(&srv_path->srv->dev); 861 err = rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED); 862 if (!err) { 863 rtrs_err(s, "rtrs_srv_change_state(), err: %d\n", err); 864 goto iu_free; 865 } 866 867 rtrs_srv_start_hb(srv_path); 868 869 /* 870 * We do not account number of established connections at the current 871 * moment, we rely on the client, which should send info request when 872 * all connections are successfully established. Thus, simply notify 873 * listener with a proper event if we are the first path. 874 */ 875 err = rtrs_srv_path_up(srv_path); 876 if (err) { 877 rtrs_err(s, "rtrs_srv_path_up(), err: %d\n", err); 878 goto iu_free; 879 } 880 881 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, 882 tx_iu->dma_addr, 883 tx_iu->size, DMA_TO_DEVICE); 884 885 /* Send info response */ 886 err = rtrs_iu_post_send(&con->c, tx_iu, tx_sz, reg_wr); 887 if (err) { 888 rtrs_err(s, "rtrs_iu_post_send(), err: %d\n", err); 889 iu_free: 890 rtrs_iu_free(tx_iu, srv_path->s.dev->ib_dev, 1); 891 } 892 rwr_free: 893 kfree(rwr); 894 895 return err; 896 } 897 898 static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc) 899 { 900 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context); 901 struct rtrs_path *s = con->c.path; 902 struct rtrs_srv_path *srv_path = to_srv_path(s); 903 struct rtrs_msg_info_req *msg; 904 struct rtrs_iu *iu; 905 int err; 906 907 WARN_ON(con->c.cid); 908 909 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); 910 if (wc->status != IB_WC_SUCCESS) { 911 rtrs_err(s, "Sess info request receive failed: %s\n", 912 ib_wc_status_msg(wc->status)); 913 goto close; 914 } 915 WARN_ON(wc->opcode != IB_WC_RECV); 916 917 if (wc->byte_len < sizeof(*msg)) { 918 rtrs_err(s, "Sess info request is malformed: size %d\n", 919 wc->byte_len); 920 goto close; 921 } 922 ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev, iu->dma_addr, 923 iu->size, DMA_FROM_DEVICE); 924 msg = iu->buf; 925 if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_REQ) { 926 rtrs_err(s, "Sess info request is malformed: type %d\n", 927 le16_to_cpu(msg->type)); 928 goto close; 929 } 930 err = process_info_req(con, msg); 931 if (err) 932 goto close; 933 934 out: 935 rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1); 936 return; 937 close: 938 close_path(srv_path); 939 goto out; 940 } 941 942 static int post_recv_info_req(struct rtrs_srv_con *con) 943 { 944 struct rtrs_path *s = con->c.path; 945 struct rtrs_srv_path *srv_path = to_srv_path(s); 946 struct rtrs_iu *rx_iu; 947 int err; 948 949 rx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), 950 GFP_KERNEL, srv_path->s.dev->ib_dev, 951 DMA_FROM_DEVICE, rtrs_srv_info_req_done); 952 if (!rx_iu) 953 return -ENOMEM; 954 /* Prepare for getting info response */ 955 err = rtrs_iu_post_recv(&con->c, rx_iu); 956 if (err) { 957 rtrs_err(s, "rtrs_iu_post_recv(), err: %d\n", err); 958 rtrs_iu_free(rx_iu, srv_path->s.dev->ib_dev, 1); 959 return err; 960 } 961 962 return 0; 963 } 964 965 static int post_recv_io(struct rtrs_srv_con *con, size_t q_size) 966 { 967 int i, err; 968 969 for (i = 0; i < q_size; i++) { 970 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); 971 if (err) 972 return err; 973 } 974 975 return 0; 976 } 977 978 static int post_recv_path(struct rtrs_srv_path *srv_path) 979 { 980 struct rtrs_srv_sess *srv = srv_path->srv; 981 struct rtrs_path *s = &srv_path->s; 982 size_t q_size; 983 int err, cid; 984 985 for (cid = 0; cid < srv_path->s.con_num; cid++) { 986 if (cid == 0) 987 q_size = SERVICE_CON_QUEUE_DEPTH; 988 else 989 q_size = srv->queue_depth; 990 991 err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size); 992 if (err) { 993 rtrs_err(s, "post_recv_io(), err: %d\n", err); 994 return err; 995 } 996 } 997 998 return 0; 999 } 1000 1001 static void process_read(struct rtrs_srv_con *con, 1002 struct rtrs_msg_rdma_read *msg, 1003 u32 buf_id, u32 off) 1004 { 1005 struct rtrs_path *s = con->c.path; 1006 struct rtrs_srv_path *srv_path = to_srv_path(s); 1007 struct rtrs_srv_sess *srv = srv_path->srv; 1008 struct rtrs_srv_ctx *ctx = srv->ctx; 1009 struct rtrs_srv_op *id; 1010 1011 size_t usr_len, data_len; 1012 void *data; 1013 int ret; 1014 1015 if (srv_path->state != RTRS_SRV_CONNECTED) { 1016 rtrs_err_rl(s, 1017 "Processing read request failed, session is disconnected, sess state %s\n", 1018 rtrs_srv_state_str(srv_path->state)); 1019 return; 1020 } 1021 if (msg->sg_cnt != 1 && msg->sg_cnt != 0) { 1022 rtrs_err_rl(s, 1023 "Processing read request failed, invalid message\n"); 1024 return; 1025 } 1026 rtrs_srv_get_ops_ids(srv_path); 1027 rtrs_srv_update_rdma_stats(srv_path->stats, off, READ); 1028 id = srv_path->ops_ids[buf_id]; 1029 id->con = con; 1030 id->dir = READ; 1031 id->msg_id = buf_id; 1032 id->rd_msg = msg; 1033 usr_len = le16_to_cpu(msg->usr_len); 1034 data_len = off - usr_len; 1035 data = page_address(srv->chunks[buf_id]); 1036 ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len, 1037 data + data_len, usr_len); 1038 1039 if (ret) { 1040 rtrs_err_rl(s, 1041 "Processing read request failed, user module cb reported for msg_id %d, err: %d\n", 1042 buf_id, ret); 1043 goto send_err_msg; 1044 } 1045 1046 return; 1047 1048 send_err_msg: 1049 ret = send_io_resp_imm(con, id, ret); 1050 if (ret < 0) { 1051 rtrs_err_rl(s, 1052 "Sending err msg for failed RDMA-Write-Req failed, msg_id %d, err: %d\n", 1053 buf_id, ret); 1054 close_path(srv_path); 1055 } 1056 rtrs_srv_put_ops_ids(srv_path); 1057 } 1058 1059 static void process_write(struct rtrs_srv_con *con, 1060 struct rtrs_msg_rdma_write *req, 1061 u32 buf_id, u32 off) 1062 { 1063 struct rtrs_path *s = con->c.path; 1064 struct rtrs_srv_path *srv_path = to_srv_path(s); 1065 struct rtrs_srv_sess *srv = srv_path->srv; 1066 struct rtrs_srv_ctx *ctx = srv->ctx; 1067 struct rtrs_srv_op *id; 1068 1069 size_t data_len, usr_len; 1070 void *data; 1071 int ret; 1072 1073 if (srv_path->state != RTRS_SRV_CONNECTED) { 1074 rtrs_err_rl(s, 1075 "Processing write request failed, session is disconnected, sess state %s\n", 1076 rtrs_srv_state_str(srv_path->state)); 1077 return; 1078 } 1079 rtrs_srv_get_ops_ids(srv_path); 1080 rtrs_srv_update_rdma_stats(srv_path->stats, off, WRITE); 1081 id = srv_path->ops_ids[buf_id]; 1082 id->con = con; 1083 id->dir = WRITE; 1084 id->msg_id = buf_id; 1085 1086 usr_len = le16_to_cpu(req->usr_len); 1087 data_len = off - usr_len; 1088 data = page_address(srv->chunks[buf_id]); 1089 ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len, 1090 data + data_len, usr_len); 1091 if (ret) { 1092 rtrs_err_rl(s, 1093 "Processing write request failed, user module callback reports err: %d\n", 1094 ret); 1095 goto send_err_msg; 1096 } 1097 1098 return; 1099 1100 send_err_msg: 1101 ret = send_io_resp_imm(con, id, ret); 1102 if (ret < 0) { 1103 rtrs_err_rl(s, 1104 "Processing write request failed, sending I/O response failed, msg_id %d, err: %d\n", 1105 buf_id, ret); 1106 close_path(srv_path); 1107 } 1108 rtrs_srv_put_ops_ids(srv_path); 1109 } 1110 1111 static void process_io_req(struct rtrs_srv_con *con, void *msg, 1112 u32 id, u32 off) 1113 { 1114 struct rtrs_path *s = con->c.path; 1115 struct rtrs_srv_path *srv_path = to_srv_path(s); 1116 struct rtrs_msg_rdma_hdr *hdr; 1117 unsigned int type; 1118 1119 ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev, 1120 srv_path->dma_addr[id], 1121 max_chunk_size, DMA_BIDIRECTIONAL); 1122 hdr = msg; 1123 type = le16_to_cpu(hdr->type); 1124 1125 switch (type) { 1126 case RTRS_MSG_WRITE: 1127 process_write(con, msg, id, off); 1128 break; 1129 case RTRS_MSG_READ: 1130 process_read(con, msg, id, off); 1131 break; 1132 default: 1133 rtrs_err(s, 1134 "Processing I/O request failed, unknown message type received: 0x%02x\n", 1135 type); 1136 goto err; 1137 } 1138 1139 return; 1140 1141 err: 1142 close_path(srv_path); 1143 } 1144 1145 static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) 1146 { 1147 struct rtrs_srv_mr *mr = 1148 container_of(wc->wr_cqe, typeof(*mr), inv_cqe); 1149 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context); 1150 struct rtrs_path *s = con->c.path; 1151 struct rtrs_srv_path *srv_path = to_srv_path(s); 1152 struct rtrs_srv_sess *srv = srv_path->srv; 1153 u32 msg_id, off; 1154 void *data; 1155 1156 if (wc->status != IB_WC_SUCCESS) { 1157 rtrs_err(s, "Failed IB_WR_LOCAL_INV: %s\n", 1158 ib_wc_status_msg(wc->status)); 1159 close_path(srv_path); 1160 } 1161 msg_id = mr->msg_id; 1162 off = mr->msg_off; 1163 data = page_address(srv->chunks[msg_id]) + off; 1164 process_io_req(con, data, msg_id, off); 1165 } 1166 1167 static int rtrs_srv_inv_rkey(struct rtrs_srv_con *con, 1168 struct rtrs_srv_mr *mr) 1169 { 1170 struct ib_send_wr wr = { 1171 .opcode = IB_WR_LOCAL_INV, 1172 .wr_cqe = &mr->inv_cqe, 1173 .send_flags = IB_SEND_SIGNALED, 1174 .ex.invalidate_rkey = mr->mr->rkey, 1175 }; 1176 mr->inv_cqe.done = rtrs_srv_inv_rkey_done; 1177 1178 return ib_post_send(con->c.qp, &wr, NULL); 1179 } 1180 1181 static void rtrs_rdma_process_wr_wait_list(struct rtrs_srv_con *con) 1182 { 1183 spin_lock(&con->rsp_wr_wait_lock); 1184 while (!list_empty(&con->rsp_wr_wait_list)) { 1185 struct rtrs_srv_op *id; 1186 int ret; 1187 1188 id = list_entry(con->rsp_wr_wait_list.next, 1189 struct rtrs_srv_op, wait_list); 1190 list_del(&id->wait_list); 1191 1192 spin_unlock(&con->rsp_wr_wait_lock); 1193 ret = rtrs_srv_resp_rdma(id, id->status); 1194 spin_lock(&con->rsp_wr_wait_lock); 1195 1196 if (!ret) { 1197 list_add(&id->wait_list, &con->rsp_wr_wait_list); 1198 break; 1199 } 1200 } 1201 spin_unlock(&con->rsp_wr_wait_lock); 1202 } 1203 1204 static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) 1205 { 1206 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context); 1207 struct rtrs_path *s = con->c.path; 1208 struct rtrs_srv_path *srv_path = to_srv_path(s); 1209 struct rtrs_srv_sess *srv = srv_path->srv; 1210 u32 imm_type, imm_payload; 1211 int err; 1212 1213 if (wc->status != IB_WC_SUCCESS) { 1214 if (wc->status != IB_WC_WR_FLUSH_ERR) { 1215 rtrs_err(s, 1216 "%s (wr_cqe: %p, type: %d, vendor_err: 0x%x, len: %u)\n", 1217 ib_wc_status_msg(wc->status), wc->wr_cqe, 1218 wc->opcode, wc->vendor_err, wc->byte_len); 1219 close_path(srv_path); 1220 } 1221 return; 1222 } 1223 1224 switch (wc->opcode) { 1225 case IB_WC_RECV_RDMA_WITH_IMM: 1226 /* 1227 * post_recv() RDMA write completions of IO reqs (read/write) 1228 * and hb 1229 */ 1230 if (WARN_ON(wc->wr_cqe != &io_comp_cqe)) 1231 return; 1232 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); 1233 if (err) { 1234 rtrs_err(s, "rtrs_post_recv(), err: %d\n", err); 1235 close_path(srv_path); 1236 break; 1237 } 1238 rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), 1239 &imm_type, &imm_payload); 1240 if (imm_type == RTRS_IO_REQ_IMM) { 1241 u32 msg_id, off; 1242 void *data; 1243 1244 msg_id = imm_payload >> srv_path->mem_bits; 1245 off = imm_payload & ((1 << srv_path->mem_bits) - 1); 1246 if (msg_id >= srv->queue_depth || off >= max_chunk_size) { 1247 rtrs_err(s, "Wrong msg_id %u, off %u\n", 1248 msg_id, off); 1249 close_path(srv_path); 1250 return; 1251 } 1252 if (always_invalidate) { 1253 struct rtrs_srv_mr *mr = &srv_path->mrs[msg_id]; 1254 1255 mr->msg_off = off; 1256 mr->msg_id = msg_id; 1257 err = rtrs_srv_inv_rkey(con, mr); 1258 if (err) { 1259 rtrs_err(s, "rtrs_post_recv(), err: %d\n", 1260 err); 1261 close_path(srv_path); 1262 break; 1263 } 1264 } else { 1265 data = page_address(srv->chunks[msg_id]) + off; 1266 process_io_req(con, data, msg_id, off); 1267 } 1268 } else if (imm_type == RTRS_HB_MSG_IMM) { 1269 WARN_ON(con->c.cid); 1270 rtrs_send_hb_ack(&srv_path->s); 1271 } else if (imm_type == RTRS_HB_ACK_IMM) { 1272 WARN_ON(con->c.cid); 1273 srv_path->s.hb_missed_cnt = 0; 1274 } else { 1275 rtrs_wrn(s, "Unknown IMM type %u\n", imm_type); 1276 } 1277 break; 1278 case IB_WC_RDMA_WRITE: 1279 case IB_WC_SEND: 1280 /* 1281 * post_send() RDMA write completions of IO reqs (read/write) 1282 * and hb. 1283 */ 1284 atomic_add(s->signal_interval, &con->c.sq_wr_avail); 1285 1286 if (!list_empty_careful(&con->rsp_wr_wait_list)) 1287 rtrs_rdma_process_wr_wait_list(con); 1288 1289 break; 1290 default: 1291 rtrs_wrn(s, "Unexpected WC type: %d\n", wc->opcode); 1292 return; 1293 } 1294 } 1295 1296 /** 1297 * rtrs_srv_get_path_name() - Get rtrs_srv peer hostname. 1298 * @srv: Session 1299 * @pathname: Pathname buffer 1300 * @len: Length of sessname buffer 1301 */ 1302 int rtrs_srv_get_path_name(struct rtrs_srv_sess *srv, char *pathname, 1303 size_t len) 1304 { 1305 struct rtrs_srv_path *srv_path; 1306 int err = -ENOTCONN; 1307 1308 mutex_lock(&srv->paths_mutex); 1309 list_for_each_entry(srv_path, &srv->paths_list, s.entry) { 1310 if (srv_path->state != RTRS_SRV_CONNECTED) 1311 continue; 1312 strscpy(pathname, srv_path->s.sessname, 1313 min_t(size_t, sizeof(srv_path->s.sessname), len)); 1314 err = 0; 1315 break; 1316 } 1317 mutex_unlock(&srv->paths_mutex); 1318 1319 return err; 1320 } 1321 EXPORT_SYMBOL(rtrs_srv_get_path_name); 1322 1323 /** 1324 * rtrs_srv_get_queue_depth() - Get rtrs_srv qdepth. 1325 * @srv: Session 1326 */ 1327 int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *srv) 1328 { 1329 return srv->queue_depth; 1330 } 1331 EXPORT_SYMBOL(rtrs_srv_get_queue_depth); 1332 1333 static int find_next_bit_ring(struct rtrs_srv_path *srv_path) 1334 { 1335 struct ib_device *ib_dev = srv_path->s.dev->ib_dev; 1336 int v; 1337 1338 v = cpumask_next(srv_path->cur_cq_vector, &cq_affinity_mask); 1339 if (v >= nr_cpu_ids || v >= ib_dev->num_comp_vectors) 1340 v = cpumask_first(&cq_affinity_mask); 1341 return v; 1342 } 1343 1344 static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_path *srv_path) 1345 { 1346 srv_path->cur_cq_vector = find_next_bit_ring(srv_path); 1347 1348 return srv_path->cur_cq_vector; 1349 } 1350 1351 static void rtrs_srv_dev_release(struct device *dev) 1352 { 1353 struct rtrs_srv_sess *srv = container_of(dev, struct rtrs_srv_sess, 1354 dev); 1355 1356 kfree(srv); 1357 } 1358 1359 static void free_srv(struct rtrs_srv_sess *srv) 1360 { 1361 int i; 1362 1363 WARN_ON(refcount_read(&srv->refcount)); 1364 for (i = 0; i < srv->queue_depth; i++) 1365 __free_pages(srv->chunks[i], get_order(max_chunk_size)); 1366 kfree(srv->chunks); 1367 mutex_destroy(&srv->paths_mutex); 1368 mutex_destroy(&srv->paths_ev_mutex); 1369 /* last put to release the srv structure */ 1370 put_device(&srv->dev); 1371 } 1372 1373 static struct rtrs_srv_sess *get_or_create_srv(struct rtrs_srv_ctx *ctx, 1374 const uuid_t *paths_uuid, 1375 bool first_conn) 1376 { 1377 struct rtrs_srv_sess *srv; 1378 int i; 1379 1380 mutex_lock(&ctx->srv_mutex); 1381 list_for_each_entry(srv, &ctx->srv_list, ctx_list) { 1382 if (uuid_equal(&srv->paths_uuid, paths_uuid) && 1383 refcount_inc_not_zero(&srv->refcount)) { 1384 mutex_unlock(&ctx->srv_mutex); 1385 return srv; 1386 } 1387 } 1388 mutex_unlock(&ctx->srv_mutex); 1389 /* 1390 * If this request is not the first connection request from the 1391 * client for this session then fail and return error. 1392 */ 1393 if (!first_conn) { 1394 pr_err_ratelimited("Error: Not the first connection request for this session\n"); 1395 return ERR_PTR(-ENXIO); 1396 } 1397 1398 /* need to allocate a new srv */ 1399 srv = kzalloc(sizeof(*srv), GFP_KERNEL); 1400 if (!srv) 1401 return ERR_PTR(-ENOMEM); 1402 1403 INIT_LIST_HEAD(&srv->paths_list); 1404 mutex_init(&srv->paths_mutex); 1405 mutex_init(&srv->paths_ev_mutex); 1406 uuid_copy(&srv->paths_uuid, paths_uuid); 1407 srv->queue_depth = sess_queue_depth; 1408 srv->ctx = ctx; 1409 device_initialize(&srv->dev); 1410 srv->dev.release = rtrs_srv_dev_release; 1411 1412 srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks), 1413 GFP_KERNEL); 1414 if (!srv->chunks) 1415 goto err_free_srv; 1416 1417 for (i = 0; i < srv->queue_depth; i++) { 1418 srv->chunks[i] = alloc_pages(GFP_KERNEL, 1419 get_order(max_chunk_size)); 1420 if (!srv->chunks[i]) 1421 goto err_free_chunks; 1422 } 1423 refcount_set(&srv->refcount, 1); 1424 mutex_lock(&ctx->srv_mutex); 1425 list_add(&srv->ctx_list, &ctx->srv_list); 1426 mutex_unlock(&ctx->srv_mutex); 1427 1428 return srv; 1429 1430 err_free_chunks: 1431 while (i--) 1432 __free_pages(srv->chunks[i], get_order(max_chunk_size)); 1433 kfree(srv->chunks); 1434 1435 err_free_srv: 1436 kfree(srv); 1437 return ERR_PTR(-ENOMEM); 1438 } 1439 1440 static void put_srv(struct rtrs_srv_sess *srv) 1441 { 1442 if (refcount_dec_and_test(&srv->refcount)) { 1443 struct rtrs_srv_ctx *ctx = srv->ctx; 1444 1445 WARN_ON(srv->dev.kobj.state_in_sysfs); 1446 1447 mutex_lock(&ctx->srv_mutex); 1448 list_del(&srv->ctx_list); 1449 mutex_unlock(&ctx->srv_mutex); 1450 free_srv(srv); 1451 } 1452 } 1453 1454 static void __add_path_to_srv(struct rtrs_srv_sess *srv, 1455 struct rtrs_srv_path *srv_path) 1456 { 1457 list_add_tail(&srv_path->s.entry, &srv->paths_list); 1458 srv->paths_num++; 1459 WARN_ON(srv->paths_num >= MAX_PATHS_NUM); 1460 } 1461 1462 static void del_path_from_srv(struct rtrs_srv_path *srv_path) 1463 { 1464 struct rtrs_srv_sess *srv = srv_path->srv; 1465 1466 if (WARN_ON(!srv)) 1467 return; 1468 1469 mutex_lock(&srv->paths_mutex); 1470 list_del(&srv_path->s.entry); 1471 WARN_ON(!srv->paths_num); 1472 srv->paths_num--; 1473 mutex_unlock(&srv->paths_mutex); 1474 } 1475 1476 /* return true if addresses are the same, error other wise */ 1477 static int sockaddr_cmp(const struct sockaddr *a, const struct sockaddr *b) 1478 { 1479 switch (a->sa_family) { 1480 case AF_IB: 1481 return memcmp(&((struct sockaddr_ib *)a)->sib_addr, 1482 &((struct sockaddr_ib *)b)->sib_addr, 1483 sizeof(struct ib_addr)) && 1484 (b->sa_family == AF_IB); 1485 case AF_INET: 1486 return memcmp(&((struct sockaddr_in *)a)->sin_addr, 1487 &((struct sockaddr_in *)b)->sin_addr, 1488 sizeof(struct in_addr)) && 1489 (b->sa_family == AF_INET); 1490 case AF_INET6: 1491 return memcmp(&((struct sockaddr_in6 *)a)->sin6_addr, 1492 &((struct sockaddr_in6 *)b)->sin6_addr, 1493 sizeof(struct in6_addr)) && 1494 (b->sa_family == AF_INET6); 1495 default: 1496 return -ENOENT; 1497 } 1498 } 1499 1500 static bool __is_path_w_addr_exists(struct rtrs_srv_sess *srv, 1501 struct rdma_addr *addr) 1502 { 1503 struct rtrs_srv_path *srv_path; 1504 1505 list_for_each_entry(srv_path, &srv->paths_list, s.entry) 1506 if (!sockaddr_cmp((struct sockaddr *)&srv_path->s.dst_addr, 1507 (struct sockaddr *)&addr->dst_addr) && 1508 !sockaddr_cmp((struct sockaddr *)&srv_path->s.src_addr, 1509 (struct sockaddr *)&addr->src_addr)) 1510 return true; 1511 1512 return false; 1513 } 1514 1515 static void free_path(struct rtrs_srv_path *srv_path) 1516 { 1517 if (srv_path->kobj.state_in_sysfs) { 1518 kobject_del(&srv_path->kobj); 1519 kobject_put(&srv_path->kobj); 1520 } else { 1521 free_percpu(srv_path->stats->rdma_stats); 1522 kfree(srv_path->stats); 1523 kfree(srv_path); 1524 } 1525 } 1526 1527 static void rtrs_srv_close_work(struct work_struct *work) 1528 { 1529 struct rtrs_srv_path *srv_path; 1530 struct rtrs_srv_con *con; 1531 int i; 1532 1533 srv_path = container_of(work, typeof(*srv_path), close_work); 1534 1535 rtrs_srv_stop_hb(srv_path); 1536 1537 for (i = 0; i < srv_path->s.con_num; i++) { 1538 if (!srv_path->s.con[i]) 1539 continue; 1540 con = to_srv_con(srv_path->s.con[i]); 1541 rdma_disconnect(con->c.cm_id); 1542 ib_drain_qp(con->c.qp); 1543 } 1544 1545 /* 1546 * Degrade ref count to the usual model with a single shared 1547 * atomic_t counter 1548 */ 1549 percpu_ref_kill(&srv_path->ids_inflight_ref); 1550 1551 /* Wait for all completion */ 1552 wait_for_completion(&srv_path->complete_done); 1553 1554 rtrs_srv_destroy_path_files(srv_path); 1555 1556 /* Notify upper layer if we are the last path */ 1557 rtrs_srv_path_down(srv_path); 1558 1559 unmap_cont_bufs(srv_path); 1560 rtrs_srv_free_ops_ids(srv_path); 1561 1562 for (i = 0; i < srv_path->s.con_num; i++) { 1563 if (!srv_path->s.con[i]) 1564 continue; 1565 con = to_srv_con(srv_path->s.con[i]); 1566 rtrs_cq_qp_destroy(&con->c); 1567 rdma_destroy_id(con->c.cm_id); 1568 kfree(con); 1569 } 1570 rtrs_ib_dev_put(srv_path->s.dev); 1571 1572 del_path_from_srv(srv_path); 1573 put_srv(srv_path->srv); 1574 srv_path->srv = NULL; 1575 rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSED); 1576 1577 kfree(srv_path->dma_addr); 1578 kfree(srv_path->s.con); 1579 free_path(srv_path); 1580 } 1581 1582 static int rtrs_rdma_do_accept(struct rtrs_srv_path *srv_path, 1583 struct rdma_cm_id *cm_id) 1584 { 1585 struct rtrs_srv_sess *srv = srv_path->srv; 1586 struct rtrs_msg_conn_rsp msg; 1587 struct rdma_conn_param param; 1588 int err; 1589 1590 param = (struct rdma_conn_param) { 1591 .rnr_retry_count = 7, 1592 .private_data = &msg, 1593 .private_data_len = sizeof(msg), 1594 }; 1595 1596 msg = (struct rtrs_msg_conn_rsp) { 1597 .magic = cpu_to_le16(RTRS_MAGIC), 1598 .version = cpu_to_le16(RTRS_PROTO_VER), 1599 .queue_depth = cpu_to_le16(srv->queue_depth), 1600 .max_io_size = cpu_to_le32(max_chunk_size - MAX_HDR_SIZE), 1601 .max_hdr_size = cpu_to_le32(MAX_HDR_SIZE), 1602 }; 1603 1604 if (always_invalidate) 1605 msg.flags = cpu_to_le32(RTRS_MSG_NEW_RKEY_F); 1606 1607 err = rdma_accept(cm_id, ¶m); 1608 if (err) 1609 pr_err("rdma_accept(), err: %d\n", err); 1610 1611 return err; 1612 } 1613 1614 static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno) 1615 { 1616 struct rtrs_msg_conn_rsp msg; 1617 int err; 1618 1619 msg = (struct rtrs_msg_conn_rsp) { 1620 .magic = cpu_to_le16(RTRS_MAGIC), 1621 .version = cpu_to_le16(RTRS_PROTO_VER), 1622 .errno = cpu_to_le16(errno), 1623 }; 1624 1625 err = rdma_reject(cm_id, &msg, sizeof(msg), IB_CM_REJ_CONSUMER_DEFINED); 1626 if (err) 1627 pr_err("rdma_reject(), err: %d\n", err); 1628 1629 /* Bounce errno back */ 1630 return errno; 1631 } 1632 1633 static struct rtrs_srv_path * 1634 __find_path(struct rtrs_srv_sess *srv, const uuid_t *sess_uuid) 1635 { 1636 struct rtrs_srv_path *srv_path; 1637 1638 list_for_each_entry(srv_path, &srv->paths_list, s.entry) { 1639 if (uuid_equal(&srv_path->s.uuid, sess_uuid)) 1640 return srv_path; 1641 } 1642 1643 return NULL; 1644 } 1645 1646 static int create_con(struct rtrs_srv_path *srv_path, 1647 struct rdma_cm_id *cm_id, 1648 unsigned int cid) 1649 { 1650 struct rtrs_srv_sess *srv = srv_path->srv; 1651 struct rtrs_path *s = &srv_path->s; 1652 struct rtrs_srv_con *con; 1653 1654 u32 cq_num, max_send_wr, max_recv_wr, wr_limit; 1655 int err, cq_vector; 1656 1657 con = kzalloc(sizeof(*con), GFP_KERNEL); 1658 if (!con) { 1659 err = -ENOMEM; 1660 goto err; 1661 } 1662 1663 spin_lock_init(&con->rsp_wr_wait_lock); 1664 INIT_LIST_HEAD(&con->rsp_wr_wait_list); 1665 con->c.cm_id = cm_id; 1666 con->c.path = &srv_path->s; 1667 con->c.cid = cid; 1668 atomic_set(&con->c.wr_cnt, 1); 1669 wr_limit = srv_path->s.dev->ib_dev->attrs.max_qp_wr; 1670 1671 if (con->c.cid == 0) { 1672 /* 1673 * All receive and all send (each requiring invalidate) 1674 * + 2 for drain and heartbeat 1675 */ 1676 max_send_wr = min_t(int, wr_limit, 1677 SERVICE_CON_QUEUE_DEPTH * 2 + 2); 1678 max_recv_wr = max_send_wr; 1679 s->signal_interval = min_not_zero(srv->queue_depth, 1680 (size_t)SERVICE_CON_QUEUE_DEPTH); 1681 } else { 1682 /* when always_invlaidate enalbed, we need linv+rinv+mr+imm */ 1683 if (always_invalidate) 1684 max_send_wr = 1685 min_t(int, wr_limit, 1686 srv->queue_depth * (1 + 4) + 1); 1687 else 1688 max_send_wr = 1689 min_t(int, wr_limit, 1690 srv->queue_depth * (1 + 2) + 1); 1691 1692 max_recv_wr = srv->queue_depth + 1; 1693 } 1694 cq_num = max_send_wr + max_recv_wr; 1695 atomic_set(&con->c.sq_wr_avail, max_send_wr); 1696 cq_vector = rtrs_srv_get_next_cq_vector(srv_path); 1697 1698 /* TODO: SOFTIRQ can be faster, but be careful with softirq context */ 1699 err = rtrs_cq_qp_create(&srv_path->s, &con->c, 1, cq_vector, cq_num, 1700 max_send_wr, max_recv_wr, 1701 IB_POLL_WORKQUEUE); 1702 if (err) { 1703 rtrs_err(s, "rtrs_cq_qp_create(), err: %d\n", err); 1704 goto free_con; 1705 } 1706 if (con->c.cid == 0) { 1707 err = post_recv_info_req(con); 1708 if (err) 1709 goto free_cqqp; 1710 } 1711 WARN_ON(srv_path->s.con[cid]); 1712 srv_path->s.con[cid] = &con->c; 1713 1714 /* 1715 * Change context from server to current connection. The other 1716 * way is to use cm_id->qp->qp_context, which does not work on OFED. 1717 */ 1718 cm_id->context = &con->c; 1719 1720 return 0; 1721 1722 free_cqqp: 1723 rtrs_cq_qp_destroy(&con->c); 1724 free_con: 1725 kfree(con); 1726 1727 err: 1728 return err; 1729 } 1730 1731 static struct rtrs_srv_path *__alloc_path(struct rtrs_srv_sess *srv, 1732 struct rdma_cm_id *cm_id, 1733 unsigned int con_num, 1734 unsigned int recon_cnt, 1735 const uuid_t *uuid) 1736 { 1737 struct rtrs_srv_path *srv_path; 1738 int err = -ENOMEM; 1739 char str[NAME_MAX]; 1740 struct rtrs_addr path; 1741 1742 if (srv->paths_num >= MAX_PATHS_NUM) { 1743 err = -ECONNRESET; 1744 goto err; 1745 } 1746 if (__is_path_w_addr_exists(srv, &cm_id->route.addr)) { 1747 err = -EEXIST; 1748 pr_err("Path with same addr exists\n"); 1749 goto err; 1750 } 1751 srv_path = kzalloc(sizeof(*srv_path), GFP_KERNEL); 1752 if (!srv_path) 1753 goto err; 1754 1755 srv_path->stats = kzalloc(sizeof(*srv_path->stats), GFP_KERNEL); 1756 if (!srv_path->stats) 1757 goto err_free_sess; 1758 1759 srv_path->stats->rdma_stats = alloc_percpu(struct rtrs_srv_stats_rdma_stats); 1760 if (!srv_path->stats->rdma_stats) 1761 goto err_free_stats; 1762 1763 srv_path->stats->srv_path = srv_path; 1764 1765 srv_path->dma_addr = kcalloc(srv->queue_depth, 1766 sizeof(*srv_path->dma_addr), 1767 GFP_KERNEL); 1768 if (!srv_path->dma_addr) 1769 goto err_free_percpu; 1770 1771 srv_path->s.con = kcalloc(con_num, sizeof(*srv_path->s.con), 1772 GFP_KERNEL); 1773 if (!srv_path->s.con) 1774 goto err_free_dma_addr; 1775 1776 srv_path->state = RTRS_SRV_CONNECTING; 1777 srv_path->srv = srv; 1778 srv_path->cur_cq_vector = -1; 1779 srv_path->s.dst_addr = cm_id->route.addr.dst_addr; 1780 srv_path->s.src_addr = cm_id->route.addr.src_addr; 1781 1782 /* temporary until receiving session-name from client */ 1783 path.src = &srv_path->s.src_addr; 1784 path.dst = &srv_path->s.dst_addr; 1785 rtrs_addr_to_str(&path, str, sizeof(str)); 1786 strscpy(srv_path->s.sessname, str, sizeof(srv_path->s.sessname)); 1787 1788 srv_path->s.con_num = con_num; 1789 srv_path->s.irq_con_num = con_num; 1790 srv_path->s.recon_cnt = recon_cnt; 1791 uuid_copy(&srv_path->s.uuid, uuid); 1792 spin_lock_init(&srv_path->state_lock); 1793 INIT_WORK(&srv_path->close_work, rtrs_srv_close_work); 1794 rtrs_srv_init_hb(srv_path); 1795 1796 srv_path->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd); 1797 if (!srv_path->s.dev) { 1798 err = -ENOMEM; 1799 goto err_free_con; 1800 } 1801 err = map_cont_bufs(srv_path); 1802 if (err) 1803 goto err_put_dev; 1804 1805 err = rtrs_srv_alloc_ops_ids(srv_path); 1806 if (err) 1807 goto err_unmap_bufs; 1808 1809 __add_path_to_srv(srv, srv_path); 1810 1811 return srv_path; 1812 1813 err_unmap_bufs: 1814 unmap_cont_bufs(srv_path); 1815 err_put_dev: 1816 rtrs_ib_dev_put(srv_path->s.dev); 1817 err_free_con: 1818 kfree(srv_path->s.con); 1819 err_free_dma_addr: 1820 kfree(srv_path->dma_addr); 1821 err_free_percpu: 1822 free_percpu(srv_path->stats->rdma_stats); 1823 err_free_stats: 1824 kfree(srv_path->stats); 1825 err_free_sess: 1826 kfree(srv_path); 1827 err: 1828 return ERR_PTR(err); 1829 } 1830 1831 static int rtrs_rdma_connect(struct rdma_cm_id *cm_id, 1832 const struct rtrs_msg_conn_req *msg, 1833 size_t len) 1834 { 1835 struct rtrs_srv_ctx *ctx = cm_id->context; 1836 struct rtrs_srv_path *srv_path; 1837 struct rtrs_srv_sess *srv; 1838 1839 u16 version, con_num, cid; 1840 u16 recon_cnt; 1841 int err = -ECONNRESET; 1842 1843 if (len < sizeof(*msg)) { 1844 pr_err("Invalid RTRS connection request\n"); 1845 goto reject_w_err; 1846 } 1847 if (le16_to_cpu(msg->magic) != RTRS_MAGIC) { 1848 pr_err("Invalid RTRS magic\n"); 1849 goto reject_w_err; 1850 } 1851 version = le16_to_cpu(msg->version); 1852 if (version >> 8 != RTRS_PROTO_VER_MAJOR) { 1853 pr_err("Unsupported major RTRS version: %d, expected %d\n", 1854 version >> 8, RTRS_PROTO_VER_MAJOR); 1855 goto reject_w_err; 1856 } 1857 con_num = le16_to_cpu(msg->cid_num); 1858 if (con_num > 4096) { 1859 /* Sanity check */ 1860 pr_err("Too many connections requested: %d\n", con_num); 1861 goto reject_w_err; 1862 } 1863 cid = le16_to_cpu(msg->cid); 1864 if (cid >= con_num) { 1865 /* Sanity check */ 1866 pr_err("Incorrect cid: %d >= %d\n", cid, con_num); 1867 goto reject_w_err; 1868 } 1869 recon_cnt = le16_to_cpu(msg->recon_cnt); 1870 srv = get_or_create_srv(ctx, &msg->paths_uuid, msg->first_conn); 1871 if (IS_ERR(srv)) { 1872 err = PTR_ERR(srv); 1873 pr_err("get_or_create_srv(), error %d\n", err); 1874 goto reject_w_err; 1875 } 1876 mutex_lock(&srv->paths_mutex); 1877 srv_path = __find_path(srv, &msg->sess_uuid); 1878 if (srv_path) { 1879 struct rtrs_path *s = &srv_path->s; 1880 1881 /* Session already holds a reference */ 1882 put_srv(srv); 1883 1884 if (srv_path->state != RTRS_SRV_CONNECTING) { 1885 rtrs_err(s, "Session in wrong state: %s\n", 1886 rtrs_srv_state_str(srv_path->state)); 1887 mutex_unlock(&srv->paths_mutex); 1888 goto reject_w_err; 1889 } 1890 /* 1891 * Sanity checks 1892 */ 1893 if (con_num != s->con_num || cid >= s->con_num) { 1894 rtrs_err(s, "Incorrect request: %d, %d\n", 1895 cid, con_num); 1896 mutex_unlock(&srv->paths_mutex); 1897 goto reject_w_err; 1898 } 1899 if (s->con[cid]) { 1900 rtrs_err(s, "Connection already exists: %d\n", 1901 cid); 1902 mutex_unlock(&srv->paths_mutex); 1903 goto reject_w_err; 1904 } 1905 } else { 1906 srv_path = __alloc_path(srv, cm_id, con_num, recon_cnt, 1907 &msg->sess_uuid); 1908 if (IS_ERR(srv_path)) { 1909 mutex_unlock(&srv->paths_mutex); 1910 put_srv(srv); 1911 err = PTR_ERR(srv_path); 1912 pr_err("RTRS server session allocation failed: %d\n", err); 1913 goto reject_w_err; 1914 } 1915 } 1916 err = create_con(srv_path, cm_id, cid); 1917 if (err) { 1918 rtrs_err((&srv_path->s), "create_con(), error %d\n", err); 1919 rtrs_rdma_do_reject(cm_id, err); 1920 /* 1921 * Since session has other connections we follow normal way 1922 * through workqueue, but still return an error to tell cma.c 1923 * to call rdma_destroy_id() for current connection. 1924 */ 1925 goto close_and_return_err; 1926 } 1927 err = rtrs_rdma_do_accept(srv_path, cm_id); 1928 if (err) { 1929 rtrs_err((&srv_path->s), "rtrs_rdma_do_accept(), error %d\n", err); 1930 rtrs_rdma_do_reject(cm_id, err); 1931 /* 1932 * Since current connection was successfully added to the 1933 * session we follow normal way through workqueue to close the 1934 * session, thus return 0 to tell cma.c we call 1935 * rdma_destroy_id() ourselves. 1936 */ 1937 err = 0; 1938 goto close_and_return_err; 1939 } 1940 mutex_unlock(&srv->paths_mutex); 1941 1942 return 0; 1943 1944 reject_w_err: 1945 return rtrs_rdma_do_reject(cm_id, err); 1946 1947 close_and_return_err: 1948 mutex_unlock(&srv->paths_mutex); 1949 close_path(srv_path); 1950 1951 return err; 1952 } 1953 1954 static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id, 1955 struct rdma_cm_event *ev) 1956 { 1957 struct rtrs_srv_path *srv_path = NULL; 1958 struct rtrs_path *s = NULL; 1959 struct rtrs_con *c = NULL; 1960 1961 if (ev->event == RDMA_CM_EVENT_CONNECT_REQUEST) 1962 /* 1963 * In case of error cma.c will destroy cm_id, 1964 * see cma_process_remove() 1965 */ 1966 return rtrs_rdma_connect(cm_id, ev->param.conn.private_data, 1967 ev->param.conn.private_data_len); 1968 1969 c = cm_id->context; 1970 s = c->path; 1971 srv_path = to_srv_path(s); 1972 1973 switch (ev->event) { 1974 case RDMA_CM_EVENT_ESTABLISHED: 1975 /* Nothing here */ 1976 break; 1977 case RDMA_CM_EVENT_REJECTED: 1978 case RDMA_CM_EVENT_CONNECT_ERROR: 1979 case RDMA_CM_EVENT_UNREACHABLE: 1980 rtrs_err(s, "CM error (CM event: %s, err: %d)\n", 1981 rdma_event_msg(ev->event), ev->status); 1982 fallthrough; 1983 case RDMA_CM_EVENT_DISCONNECTED: 1984 case RDMA_CM_EVENT_ADDR_CHANGE: 1985 case RDMA_CM_EVENT_TIMEWAIT_EXIT: 1986 case RDMA_CM_EVENT_DEVICE_REMOVAL: 1987 close_path(srv_path); 1988 break; 1989 default: 1990 pr_err("Ignoring unexpected CM event %s, err %d\n", 1991 rdma_event_msg(ev->event), ev->status); 1992 break; 1993 } 1994 1995 return 0; 1996 } 1997 1998 static struct rdma_cm_id *rtrs_srv_cm_init(struct rtrs_srv_ctx *ctx, 1999 struct sockaddr *addr, 2000 enum rdma_ucm_port_space ps) 2001 { 2002 struct rdma_cm_id *cm_id; 2003 int ret; 2004 2005 cm_id = rdma_create_id(&init_net, rtrs_srv_rdma_cm_handler, 2006 ctx, ps, IB_QPT_RC); 2007 if (IS_ERR(cm_id)) { 2008 ret = PTR_ERR(cm_id); 2009 pr_err("Creating id for RDMA connection failed, err: %d\n", 2010 ret); 2011 goto err_out; 2012 } 2013 ret = rdma_bind_addr(cm_id, addr); 2014 if (ret) { 2015 pr_err("Binding RDMA address failed, err: %d\n", ret); 2016 goto err_cm; 2017 } 2018 ret = rdma_listen(cm_id, 64); 2019 if (ret) { 2020 pr_err("Listening on RDMA connection failed, err: %d\n", 2021 ret); 2022 goto err_cm; 2023 } 2024 2025 return cm_id; 2026 2027 err_cm: 2028 rdma_destroy_id(cm_id); 2029 err_out: 2030 2031 return ERR_PTR(ret); 2032 } 2033 2034 static int rtrs_srv_rdma_init(struct rtrs_srv_ctx *ctx, u16 port) 2035 { 2036 struct sockaddr_in6 sin = { 2037 .sin6_family = AF_INET6, 2038 .sin6_addr = IN6ADDR_ANY_INIT, 2039 .sin6_port = htons(port), 2040 }; 2041 struct sockaddr_ib sib = { 2042 .sib_family = AF_IB, 2043 .sib_sid = cpu_to_be64(RDMA_IB_IP_PS_IB | port), 2044 .sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL), 2045 .sib_pkey = cpu_to_be16(0xffff), 2046 }; 2047 struct rdma_cm_id *cm_ip, *cm_ib; 2048 int ret; 2049 2050 /* 2051 * We accept both IPoIB and IB connections, so we need to keep 2052 * two cm id's, one for each socket type and port space. 2053 * If the cm initialization of one of the id's fails, we abort 2054 * everything. 2055 */ 2056 cm_ip = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sin, RDMA_PS_TCP); 2057 if (IS_ERR(cm_ip)) 2058 return PTR_ERR(cm_ip); 2059 2060 cm_ib = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sib, RDMA_PS_IB); 2061 if (IS_ERR(cm_ib)) { 2062 ret = PTR_ERR(cm_ib); 2063 goto free_cm_ip; 2064 } 2065 2066 ctx->cm_id_ip = cm_ip; 2067 ctx->cm_id_ib = cm_ib; 2068 2069 return 0; 2070 2071 free_cm_ip: 2072 rdma_destroy_id(cm_ip); 2073 2074 return ret; 2075 } 2076 2077 static struct rtrs_srv_ctx *alloc_srv_ctx(struct rtrs_srv_ops *ops) 2078 { 2079 struct rtrs_srv_ctx *ctx; 2080 2081 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 2082 if (!ctx) 2083 return NULL; 2084 2085 ctx->ops = *ops; 2086 mutex_init(&ctx->srv_mutex); 2087 INIT_LIST_HEAD(&ctx->srv_list); 2088 2089 return ctx; 2090 } 2091 2092 static void free_srv_ctx(struct rtrs_srv_ctx *ctx) 2093 { 2094 WARN_ON(!list_empty(&ctx->srv_list)); 2095 mutex_destroy(&ctx->srv_mutex); 2096 kfree(ctx); 2097 } 2098 2099 static int rtrs_srv_add_one(struct ib_device *device) 2100 { 2101 struct rtrs_srv_ctx *ctx; 2102 int ret = 0; 2103 2104 mutex_lock(&ib_ctx.ib_dev_mutex); 2105 if (ib_ctx.ib_dev_count) 2106 goto out; 2107 2108 /* 2109 * Since our CM IDs are NOT bound to any ib device we will create them 2110 * only once 2111 */ 2112 ctx = ib_ctx.srv_ctx; 2113 ret = rtrs_srv_rdma_init(ctx, ib_ctx.port); 2114 if (ret) { 2115 /* 2116 * We errored out here. 2117 * According to the ib code, if we encounter an error here then the 2118 * error code is ignored, and no more calls to our ops are made. 2119 */ 2120 pr_err("Failed to initialize RDMA connection"); 2121 goto err_out; 2122 } 2123 2124 out: 2125 /* 2126 * Keep a track on the number of ib devices added 2127 */ 2128 ib_ctx.ib_dev_count++; 2129 2130 err_out: 2131 mutex_unlock(&ib_ctx.ib_dev_mutex); 2132 return ret; 2133 } 2134 2135 static void rtrs_srv_remove_one(struct ib_device *device, void *client_data) 2136 { 2137 struct rtrs_srv_ctx *ctx; 2138 2139 mutex_lock(&ib_ctx.ib_dev_mutex); 2140 ib_ctx.ib_dev_count--; 2141 2142 if (ib_ctx.ib_dev_count) 2143 goto out; 2144 2145 /* 2146 * Since our CM IDs are NOT bound to any ib device we will remove them 2147 * only once, when the last device is removed 2148 */ 2149 ctx = ib_ctx.srv_ctx; 2150 rdma_destroy_id(ctx->cm_id_ip); 2151 rdma_destroy_id(ctx->cm_id_ib); 2152 2153 out: 2154 mutex_unlock(&ib_ctx.ib_dev_mutex); 2155 } 2156 2157 static struct ib_client rtrs_srv_client = { 2158 .name = "rtrs_server", 2159 .add = rtrs_srv_add_one, 2160 .remove = rtrs_srv_remove_one 2161 }; 2162 2163 /** 2164 * rtrs_srv_open() - open RTRS server context 2165 * @ops: callback functions 2166 * @port: port to listen on 2167 * 2168 * Creates server context with specified callbacks. 2169 * 2170 * Return a valid pointer on success otherwise PTR_ERR. 2171 */ 2172 struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port) 2173 { 2174 struct rtrs_srv_ctx *ctx; 2175 int err; 2176 2177 ctx = alloc_srv_ctx(ops); 2178 if (!ctx) 2179 return ERR_PTR(-ENOMEM); 2180 2181 mutex_init(&ib_ctx.ib_dev_mutex); 2182 ib_ctx.srv_ctx = ctx; 2183 ib_ctx.port = port; 2184 2185 err = ib_register_client(&rtrs_srv_client); 2186 if (err) { 2187 free_srv_ctx(ctx); 2188 return ERR_PTR(err); 2189 } 2190 2191 return ctx; 2192 } 2193 EXPORT_SYMBOL(rtrs_srv_open); 2194 2195 static void close_paths(struct rtrs_srv_sess *srv) 2196 { 2197 struct rtrs_srv_path *srv_path; 2198 2199 mutex_lock(&srv->paths_mutex); 2200 list_for_each_entry(srv_path, &srv->paths_list, s.entry) 2201 close_path(srv_path); 2202 mutex_unlock(&srv->paths_mutex); 2203 } 2204 2205 static void close_ctx(struct rtrs_srv_ctx *ctx) 2206 { 2207 struct rtrs_srv_sess *srv; 2208 2209 mutex_lock(&ctx->srv_mutex); 2210 list_for_each_entry(srv, &ctx->srv_list, ctx_list) 2211 close_paths(srv); 2212 mutex_unlock(&ctx->srv_mutex); 2213 flush_workqueue(rtrs_wq); 2214 } 2215 2216 /** 2217 * rtrs_srv_close() - close RTRS server context 2218 * @ctx: pointer to server context 2219 * 2220 * Closes RTRS server context with all client sessions. 2221 */ 2222 void rtrs_srv_close(struct rtrs_srv_ctx *ctx) 2223 { 2224 ib_unregister_client(&rtrs_srv_client); 2225 mutex_destroy(&ib_ctx.ib_dev_mutex); 2226 close_ctx(ctx); 2227 free_srv_ctx(ctx); 2228 } 2229 EXPORT_SYMBOL(rtrs_srv_close); 2230 2231 static int check_module_params(void) 2232 { 2233 if (sess_queue_depth < 1 || sess_queue_depth > MAX_SESS_QUEUE_DEPTH) { 2234 pr_err("Invalid sess_queue_depth value %d, has to be >= %d, <= %d.\n", 2235 sess_queue_depth, 1, MAX_SESS_QUEUE_DEPTH); 2236 return -EINVAL; 2237 } 2238 if (max_chunk_size < MIN_CHUNK_SIZE || !is_power_of_2(max_chunk_size)) { 2239 pr_err("Invalid max_chunk_size value %d, has to be >= %d and should be power of two.\n", 2240 max_chunk_size, MIN_CHUNK_SIZE); 2241 return -EINVAL; 2242 } 2243 2244 /* 2245 * Check if IB immediate data size is enough to hold the mem_id and the 2246 * offset inside the memory chunk 2247 */ 2248 if ((ilog2(sess_queue_depth - 1) + 1) + 2249 (ilog2(max_chunk_size - 1) + 1) > MAX_IMM_PAYL_BITS) { 2250 pr_err("RDMA immediate size (%db) not enough to encode %d buffers of size %dB. Reduce 'sess_queue_depth' or 'max_chunk_size' parameters.\n", 2251 MAX_IMM_PAYL_BITS, sess_queue_depth, max_chunk_size); 2252 return -EINVAL; 2253 } 2254 2255 return 0; 2256 } 2257 2258 static int __init rtrs_server_init(void) 2259 { 2260 int err; 2261 2262 pr_info("Loading module %s, proto %s: (max_chunk_size: %d (pure IO %ld, headers %ld) , sess_queue_depth: %d, always_invalidate: %d)\n", 2263 KBUILD_MODNAME, RTRS_PROTO_VER_STRING, 2264 max_chunk_size, max_chunk_size - MAX_HDR_SIZE, MAX_HDR_SIZE, 2265 sess_queue_depth, always_invalidate); 2266 2267 rtrs_rdma_dev_pd_init(0, &dev_pd); 2268 2269 err = check_module_params(); 2270 if (err) { 2271 pr_err("Failed to load module, invalid module parameters, err: %d\n", 2272 err); 2273 return err; 2274 } 2275 err = class_register(&rtrs_dev_class); 2276 if (err) 2277 goto out_err; 2278 2279 rtrs_wq = alloc_workqueue("rtrs_server_wq", 0, 0); 2280 if (!rtrs_wq) { 2281 err = -ENOMEM; 2282 goto out_dev_class; 2283 } 2284 2285 return 0; 2286 2287 out_dev_class: 2288 class_unregister(&rtrs_dev_class); 2289 out_err: 2290 return err; 2291 } 2292 2293 static void __exit rtrs_server_exit(void) 2294 { 2295 destroy_workqueue(rtrs_wq); 2296 class_unregister(&rtrs_dev_class); 2297 rtrs_rdma_dev_pd_deinit(&dev_pd); 2298 } 2299 2300 module_init(rtrs_server_init); 2301 module_exit(rtrs_server_exit); 2302