1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2017, Microsoft Corporation. 4 * Copyright (C) 2018, LG Electronics. 5 * 6 * Author(s): Long Li <longli@microsoft.com>, 7 * Hyunchul Lee <hyc.lee@gmail.com> 8 */ 9 10 #define SUBMOD_NAME "smb_direct" 11 12 #include <linux/kthread.h> 13 #include <linux/list.h> 14 #include <linux/mempool.h> 15 #include <linux/highmem.h> 16 #include <linux/scatterlist.h> 17 #include <linux/string_choices.h> 18 #include <rdma/ib_verbs.h> 19 #include <rdma/rdma_cm.h> 20 #include <rdma/rw.h> 21 22 #include "glob.h" 23 #include "connection.h" 24 #include "smb_common.h" 25 #include "../common/smb2status.h" 26 #include "transport_rdma.h" 27 28 #define SMB_DIRECT_PORT_IWARP 5445 29 #define SMB_DIRECT_PORT_INFINIBAND 445 30 31 #define SMB_DIRECT_VERSION_LE cpu_to_le16(0x0100) 32 33 /* SMB_DIRECT negotiation timeout in seconds */ 34 #define SMB_DIRECT_NEGOTIATE_TIMEOUT 120 35 36 #define SMB_DIRECT_MAX_SEND_SGES 6 37 #define SMB_DIRECT_MAX_RECV_SGES 1 38 39 /* 40 * Default maximum number of RDMA read/write outstanding on this connection 41 * This value is possibly decreased during QP creation on hardware limit 42 */ 43 #define SMB_DIRECT_CM_INITIATOR_DEPTH 8 44 45 /* Maximum number of retries on data transfer operations */ 46 #define SMB_DIRECT_CM_RETRY 6 47 /* No need to retry on Receiver Not Ready since SMB_DIRECT manages credits */ 48 #define SMB_DIRECT_CM_RNR_RETRY 0 49 50 /* 51 * User configurable initial values per SMB_DIRECT transport connection 52 * as defined in [MS-SMBD] 3.1.1.1 53 * Those may change after a SMB_DIRECT negotiation 54 */ 55 56 /* Set 445 port to SMB Direct port by default */ 57 static int smb_direct_port = SMB_DIRECT_PORT_INFINIBAND; 58 59 /* The local peer's maximum number of credits to grant to the peer */ 60 static int smb_direct_receive_credit_max = 255; 61 62 /* The remote peer's credit request of local peer */ 63 static int smb_direct_send_credit_target = 255; 64 65 /* The maximum single message size can be sent to remote peer */ 66 static int smb_direct_max_send_size = 1364; 67 68 /* The maximum fragmented upper-layer payload receive size supported */ 69 static int smb_direct_max_fragmented_recv_size = 1024 * 1024; 70 71 /* The maximum single-message size which can be received */ 72 static int smb_direct_max_receive_size = 1364; 73 74 static int smb_direct_max_read_write_size = SMBD_DEFAULT_IOSIZE; 75 76 static LIST_HEAD(smb_direct_device_list); 77 static DEFINE_RWLOCK(smb_direct_device_lock); 78 79 struct smb_direct_device { 80 struct ib_device *ib_dev; 81 struct list_head list; 82 }; 83 84 static struct smb_direct_listener { 85 struct rdma_cm_id *cm_id; 86 } smb_direct_listener; 87 88 static struct workqueue_struct *smb_direct_wq; 89 90 enum smb_direct_status { 91 SMB_DIRECT_CS_NEW = 0, 92 SMB_DIRECT_CS_CONNECTED, 93 SMB_DIRECT_CS_DISCONNECTING, 94 SMB_DIRECT_CS_DISCONNECTED, 95 }; 96 97 struct smb_direct_transport { 98 struct ksmbd_transport transport; 99 100 enum smb_direct_status status; 101 bool full_packet_received; 102 wait_queue_head_t wait_status; 103 104 struct rdma_cm_id *cm_id; 105 struct ib_cq *send_cq; 106 struct ib_cq *recv_cq; 107 struct ib_pd *pd; 108 struct ib_qp *qp; 109 110 int max_send_size; 111 int max_recv_size; 112 int max_fragmented_send_size; 113 int max_fragmented_recv_size; 114 int max_rdma_rw_size; 115 116 spinlock_t reassembly_queue_lock; 117 struct list_head reassembly_queue; 118 int reassembly_data_length; 119 int reassembly_queue_length; 120 int first_entry_offset; 121 wait_queue_head_t wait_reassembly_queue; 122 123 spinlock_t receive_credit_lock; 124 int recv_credits; 125 int count_avail_recvmsg; 126 int recv_credit_max; 127 int recv_credit_target; 128 129 spinlock_t recvmsg_queue_lock; 130 struct list_head recvmsg_queue; 131 132 int send_credit_target; 133 atomic_t send_credits; 134 spinlock_t lock_new_recv_credits; 135 int new_recv_credits; 136 int max_rw_credits; 137 int pages_per_rw_credit; 138 atomic_t rw_credits; 139 140 wait_queue_head_t wait_send_credits; 141 wait_queue_head_t wait_rw_credits; 142 143 mempool_t *sendmsg_mempool; 144 struct kmem_cache *sendmsg_cache; 145 mempool_t *recvmsg_mempool; 146 struct kmem_cache *recvmsg_cache; 147 148 wait_queue_head_t wait_send_pending; 149 atomic_t send_pending; 150 151 struct work_struct post_recv_credits_work; 152 struct work_struct send_immediate_work; 153 struct work_struct disconnect_work; 154 155 bool negotiation_requested; 156 }; 157 158 #define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport)) 159 #define SMBD_TRANS(t) ((struct smb_direct_transport *)container_of(t, \ 160 struct smb_direct_transport, transport)) 161 enum { 162 SMB_DIRECT_MSG_NEGOTIATE_REQ = 0, 163 SMB_DIRECT_MSG_DATA_TRANSFER 164 }; 165 166 static const struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops; 167 168 struct smb_direct_send_ctx { 169 struct list_head msg_list; 170 int wr_cnt; 171 bool need_invalidate_rkey; 172 unsigned int remote_key; 173 }; 174 175 struct smb_direct_sendmsg { 176 struct smb_direct_transport *transport; 177 struct ib_send_wr wr; 178 struct list_head list; 179 int num_sge; 180 struct ib_sge sge[SMB_DIRECT_MAX_SEND_SGES]; 181 struct ib_cqe cqe; 182 u8 packet[]; 183 }; 184 185 struct smb_direct_recvmsg { 186 struct smb_direct_transport *transport; 187 struct list_head list; 188 int type; 189 struct ib_sge sge; 190 struct ib_cqe cqe; 191 bool first_segment; 192 u8 packet[]; 193 }; 194 195 struct smb_direct_rdma_rw_msg { 196 struct smb_direct_transport *t; 197 struct ib_cqe cqe; 198 int status; 199 struct completion *completion; 200 struct list_head list; 201 struct rdma_rw_ctx rw_ctx; 202 struct sg_table sgt; 203 struct scatterlist sg_list[]; 204 }; 205 206 void init_smbd_max_io_size(unsigned int sz) 207 { 208 sz = clamp_val(sz, SMBD_MIN_IOSIZE, SMBD_MAX_IOSIZE); 209 smb_direct_max_read_write_size = sz; 210 } 211 212 unsigned int get_smbd_max_read_write_size(void) 213 { 214 return smb_direct_max_read_write_size; 215 } 216 217 static inline int get_buf_page_count(void *buf, int size) 218 { 219 return DIV_ROUND_UP((uintptr_t)buf + size, PAGE_SIZE) - 220 (uintptr_t)buf / PAGE_SIZE; 221 } 222 223 static void smb_direct_destroy_pools(struct smb_direct_transport *transport); 224 static void smb_direct_post_recv_credits(struct work_struct *work); 225 static int smb_direct_post_send_data(struct smb_direct_transport *t, 226 struct smb_direct_send_ctx *send_ctx, 227 struct kvec *iov, int niov, 228 int remaining_data_length); 229 230 static inline struct smb_direct_transport * 231 smb_trans_direct_transfort(struct ksmbd_transport *t) 232 { 233 return container_of(t, struct smb_direct_transport, transport); 234 } 235 236 static inline void 237 *smb_direct_recvmsg_payload(struct smb_direct_recvmsg *recvmsg) 238 { 239 return (void *)recvmsg->packet; 240 } 241 242 static inline bool is_receive_credit_post_required(int receive_credits, 243 int avail_recvmsg_count) 244 { 245 return receive_credits <= (smb_direct_receive_credit_max >> 3) && 246 avail_recvmsg_count >= (receive_credits >> 2); 247 } 248 249 static struct 250 smb_direct_recvmsg *get_free_recvmsg(struct smb_direct_transport *t) 251 { 252 struct smb_direct_recvmsg *recvmsg = NULL; 253 254 spin_lock(&t->recvmsg_queue_lock); 255 if (!list_empty(&t->recvmsg_queue)) { 256 recvmsg = list_first_entry(&t->recvmsg_queue, 257 struct smb_direct_recvmsg, 258 list); 259 list_del(&recvmsg->list); 260 } 261 spin_unlock(&t->recvmsg_queue_lock); 262 return recvmsg; 263 } 264 265 static void put_recvmsg(struct smb_direct_transport *t, 266 struct smb_direct_recvmsg *recvmsg) 267 { 268 if (likely(recvmsg->sge.length != 0)) { 269 ib_dma_unmap_single(t->cm_id->device, 270 recvmsg->sge.addr, 271 recvmsg->sge.length, 272 DMA_FROM_DEVICE); 273 recvmsg->sge.length = 0; 274 } 275 276 spin_lock(&t->recvmsg_queue_lock); 277 list_add(&recvmsg->list, &t->recvmsg_queue); 278 spin_unlock(&t->recvmsg_queue_lock); 279 } 280 281 static void enqueue_reassembly(struct smb_direct_transport *t, 282 struct smb_direct_recvmsg *recvmsg, 283 int data_length) 284 { 285 spin_lock(&t->reassembly_queue_lock); 286 list_add_tail(&recvmsg->list, &t->reassembly_queue); 287 t->reassembly_queue_length++; 288 /* 289 * Make sure reassembly_data_length is updated after list and 290 * reassembly_queue_length are updated. On the dequeue side 291 * reassembly_data_length is checked without a lock to determine 292 * if reassembly_queue_length and list is up to date 293 */ 294 virt_wmb(); 295 t->reassembly_data_length += data_length; 296 spin_unlock(&t->reassembly_queue_lock); 297 } 298 299 static struct smb_direct_recvmsg *get_first_reassembly(struct smb_direct_transport *t) 300 { 301 if (!list_empty(&t->reassembly_queue)) 302 return list_first_entry(&t->reassembly_queue, 303 struct smb_direct_recvmsg, list); 304 else 305 return NULL; 306 } 307 308 static void smb_direct_disconnect_rdma_work(struct work_struct *work) 309 { 310 struct smb_direct_transport *t = 311 container_of(work, struct smb_direct_transport, 312 disconnect_work); 313 314 if (t->status == SMB_DIRECT_CS_CONNECTED) { 315 t->status = SMB_DIRECT_CS_DISCONNECTING; 316 rdma_disconnect(t->cm_id); 317 } 318 } 319 320 static void 321 smb_direct_disconnect_rdma_connection(struct smb_direct_transport *t) 322 { 323 if (t->status == SMB_DIRECT_CS_CONNECTED) 324 queue_work(smb_direct_wq, &t->disconnect_work); 325 } 326 327 static void smb_direct_send_immediate_work(struct work_struct *work) 328 { 329 struct smb_direct_transport *t = container_of(work, 330 struct smb_direct_transport, send_immediate_work); 331 332 if (t->status != SMB_DIRECT_CS_CONNECTED) 333 return; 334 335 smb_direct_post_send_data(t, NULL, NULL, 0, 0); 336 } 337 338 static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id) 339 { 340 struct smb_direct_transport *t; 341 struct ksmbd_conn *conn; 342 343 t = kzalloc(sizeof(*t), KSMBD_DEFAULT_GFP); 344 if (!t) 345 return NULL; 346 347 t->cm_id = cm_id; 348 cm_id->context = t; 349 350 t->status = SMB_DIRECT_CS_NEW; 351 init_waitqueue_head(&t->wait_status); 352 353 spin_lock_init(&t->reassembly_queue_lock); 354 INIT_LIST_HEAD(&t->reassembly_queue); 355 t->reassembly_data_length = 0; 356 t->reassembly_queue_length = 0; 357 init_waitqueue_head(&t->wait_reassembly_queue); 358 init_waitqueue_head(&t->wait_send_credits); 359 init_waitqueue_head(&t->wait_rw_credits); 360 361 spin_lock_init(&t->receive_credit_lock); 362 spin_lock_init(&t->recvmsg_queue_lock); 363 INIT_LIST_HEAD(&t->recvmsg_queue); 364 365 init_waitqueue_head(&t->wait_send_pending); 366 atomic_set(&t->send_pending, 0); 367 368 spin_lock_init(&t->lock_new_recv_credits); 369 370 INIT_WORK(&t->post_recv_credits_work, 371 smb_direct_post_recv_credits); 372 INIT_WORK(&t->send_immediate_work, smb_direct_send_immediate_work); 373 INIT_WORK(&t->disconnect_work, smb_direct_disconnect_rdma_work); 374 375 conn = ksmbd_conn_alloc(); 376 if (!conn) 377 goto err; 378 conn->transport = KSMBD_TRANS(t); 379 KSMBD_TRANS(t)->conn = conn; 380 KSMBD_TRANS(t)->ops = &ksmbd_smb_direct_transport_ops; 381 return t; 382 err: 383 kfree(t); 384 return NULL; 385 } 386 387 static void smb_direct_free_transport(struct ksmbd_transport *kt) 388 { 389 kfree(SMBD_TRANS(kt)); 390 } 391 392 static void free_transport(struct smb_direct_transport *t) 393 { 394 struct smb_direct_recvmsg *recvmsg; 395 396 wake_up_interruptible(&t->wait_send_credits); 397 398 ksmbd_debug(RDMA, "wait for all send posted to IB to finish\n"); 399 wait_event(t->wait_send_pending, 400 atomic_read(&t->send_pending) == 0); 401 402 disable_work_sync(&t->disconnect_work); 403 disable_work_sync(&t->post_recv_credits_work); 404 disable_work_sync(&t->send_immediate_work); 405 406 if (t->qp) { 407 ib_drain_qp(t->qp); 408 ib_mr_pool_destroy(t->qp, &t->qp->rdma_mrs); 409 t->qp = NULL; 410 rdma_destroy_qp(t->cm_id); 411 } 412 413 ksmbd_debug(RDMA, "drain the reassembly queue\n"); 414 do { 415 spin_lock(&t->reassembly_queue_lock); 416 recvmsg = get_first_reassembly(t); 417 if (recvmsg) { 418 list_del(&recvmsg->list); 419 spin_unlock(&t->reassembly_queue_lock); 420 put_recvmsg(t, recvmsg); 421 } else { 422 spin_unlock(&t->reassembly_queue_lock); 423 } 424 } while (recvmsg); 425 t->reassembly_data_length = 0; 426 427 if (t->send_cq) 428 ib_free_cq(t->send_cq); 429 if (t->recv_cq) 430 ib_free_cq(t->recv_cq); 431 if (t->pd) 432 ib_dealloc_pd(t->pd); 433 if (t->cm_id) 434 rdma_destroy_id(t->cm_id); 435 436 smb_direct_destroy_pools(t); 437 ksmbd_conn_free(KSMBD_TRANS(t)->conn); 438 } 439 440 static struct smb_direct_sendmsg 441 *smb_direct_alloc_sendmsg(struct smb_direct_transport *t) 442 { 443 struct smb_direct_sendmsg *msg; 444 445 msg = mempool_alloc(t->sendmsg_mempool, KSMBD_DEFAULT_GFP); 446 if (!msg) 447 return ERR_PTR(-ENOMEM); 448 msg->transport = t; 449 INIT_LIST_HEAD(&msg->list); 450 msg->num_sge = 0; 451 return msg; 452 } 453 454 static void smb_direct_free_sendmsg(struct smb_direct_transport *t, 455 struct smb_direct_sendmsg *msg) 456 { 457 int i; 458 459 if (msg->num_sge > 0) { 460 ib_dma_unmap_single(t->cm_id->device, 461 msg->sge[0].addr, msg->sge[0].length, 462 DMA_TO_DEVICE); 463 for (i = 1; i < msg->num_sge; i++) 464 ib_dma_unmap_page(t->cm_id->device, 465 msg->sge[i].addr, msg->sge[i].length, 466 DMA_TO_DEVICE); 467 } 468 mempool_free(msg, t->sendmsg_mempool); 469 } 470 471 static int smb_direct_check_recvmsg(struct smb_direct_recvmsg *recvmsg) 472 { 473 switch (recvmsg->type) { 474 case SMB_DIRECT_MSG_DATA_TRANSFER: { 475 struct smb_direct_data_transfer *req = 476 (struct smb_direct_data_transfer *)recvmsg->packet; 477 struct smb2_hdr *hdr = (struct smb2_hdr *)(recvmsg->packet 478 + le32_to_cpu(req->data_offset)); 479 ksmbd_debug(RDMA, 480 "CreditGranted: %u, CreditRequested: %u, DataLength: %u, RemainingDataLength: %u, SMB: %x, Command: %u\n", 481 le16_to_cpu(req->credits_granted), 482 le16_to_cpu(req->credits_requested), 483 req->data_length, req->remaining_data_length, 484 hdr->ProtocolId, hdr->Command); 485 break; 486 } 487 case SMB_DIRECT_MSG_NEGOTIATE_REQ: { 488 struct smb_direct_negotiate_req *req = 489 (struct smb_direct_negotiate_req *)recvmsg->packet; 490 ksmbd_debug(RDMA, 491 "MinVersion: %u, MaxVersion: %u, CreditRequested: %u, MaxSendSize: %u, MaxRecvSize: %u, MaxFragmentedSize: %u\n", 492 le16_to_cpu(req->min_version), 493 le16_to_cpu(req->max_version), 494 le16_to_cpu(req->credits_requested), 495 le32_to_cpu(req->preferred_send_size), 496 le32_to_cpu(req->max_receive_size), 497 le32_to_cpu(req->max_fragmented_size)); 498 if (le16_to_cpu(req->min_version) > 0x0100 || 499 le16_to_cpu(req->max_version) < 0x0100) 500 return -EOPNOTSUPP; 501 if (le16_to_cpu(req->credits_requested) <= 0 || 502 le32_to_cpu(req->max_receive_size) <= 128 || 503 le32_to_cpu(req->max_fragmented_size) <= 504 128 * 1024) 505 return -ECONNABORTED; 506 507 break; 508 } 509 default: 510 return -EINVAL; 511 } 512 return 0; 513 } 514 515 static void recv_done(struct ib_cq *cq, struct ib_wc *wc) 516 { 517 struct smb_direct_recvmsg *recvmsg; 518 struct smb_direct_transport *t; 519 520 recvmsg = container_of(wc->wr_cqe, struct smb_direct_recvmsg, cqe); 521 t = recvmsg->transport; 522 523 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) { 524 put_recvmsg(t, recvmsg); 525 if (wc->status != IB_WC_WR_FLUSH_ERR) { 526 pr_err("Recv error. status='%s (%d)' opcode=%d\n", 527 ib_wc_status_msg(wc->status), wc->status, 528 wc->opcode); 529 smb_direct_disconnect_rdma_connection(t); 530 } 531 return; 532 } 533 534 ksmbd_debug(RDMA, "Recv completed. status='%s (%d)', opcode=%d\n", 535 ib_wc_status_msg(wc->status), wc->status, 536 wc->opcode); 537 538 ib_dma_sync_single_for_cpu(wc->qp->device, recvmsg->sge.addr, 539 recvmsg->sge.length, DMA_FROM_DEVICE); 540 541 switch (recvmsg->type) { 542 case SMB_DIRECT_MSG_NEGOTIATE_REQ: 543 if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) { 544 put_recvmsg(t, recvmsg); 545 smb_direct_disconnect_rdma_connection(t); 546 return; 547 } 548 t->negotiation_requested = true; 549 t->full_packet_received = true; 550 t->status = SMB_DIRECT_CS_CONNECTED; 551 enqueue_reassembly(t, recvmsg, 0); 552 wake_up_interruptible(&t->wait_status); 553 return; 554 case SMB_DIRECT_MSG_DATA_TRANSFER: { 555 struct smb_direct_data_transfer *data_transfer = 556 (struct smb_direct_data_transfer *)recvmsg->packet; 557 u32 remaining_data_length, data_offset, data_length; 558 int avail_recvmsg_count, receive_credits; 559 560 if (wc->byte_len < 561 offsetof(struct smb_direct_data_transfer, padding)) { 562 put_recvmsg(t, recvmsg); 563 smb_direct_disconnect_rdma_connection(t); 564 return; 565 } 566 567 remaining_data_length = le32_to_cpu(data_transfer->remaining_data_length); 568 data_length = le32_to_cpu(data_transfer->data_length); 569 data_offset = le32_to_cpu(data_transfer->data_offset); 570 if (wc->byte_len < data_offset || 571 wc->byte_len < (u64)data_offset + data_length) { 572 put_recvmsg(t, recvmsg); 573 smb_direct_disconnect_rdma_connection(t); 574 return; 575 } 576 if (remaining_data_length > t->max_fragmented_recv_size || 577 data_length > t->max_fragmented_recv_size || 578 (u64)remaining_data_length + (u64)data_length > 579 (u64)t->max_fragmented_recv_size) { 580 put_recvmsg(t, recvmsg); 581 smb_direct_disconnect_rdma_connection(t); 582 return; 583 } 584 585 if (data_length) { 586 if (t->full_packet_received) 587 recvmsg->first_segment = true; 588 589 if (le32_to_cpu(data_transfer->remaining_data_length)) 590 t->full_packet_received = false; 591 else 592 t->full_packet_received = true; 593 594 spin_lock(&t->receive_credit_lock); 595 receive_credits = --(t->recv_credits); 596 avail_recvmsg_count = t->count_avail_recvmsg; 597 spin_unlock(&t->receive_credit_lock); 598 } else { 599 spin_lock(&t->receive_credit_lock); 600 receive_credits = --(t->recv_credits); 601 avail_recvmsg_count = ++(t->count_avail_recvmsg); 602 spin_unlock(&t->receive_credit_lock); 603 } 604 605 t->recv_credit_target = 606 le16_to_cpu(data_transfer->credits_requested); 607 atomic_add(le16_to_cpu(data_transfer->credits_granted), 608 &t->send_credits); 609 610 if (le16_to_cpu(data_transfer->flags) & 611 SMB_DIRECT_RESPONSE_REQUESTED) 612 queue_work(smb_direct_wq, &t->send_immediate_work); 613 614 if (atomic_read(&t->send_credits) > 0) 615 wake_up_interruptible(&t->wait_send_credits); 616 617 if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count)) 618 queue_work(smb_direct_wq, &t->post_recv_credits_work); 619 620 if (data_length) { 621 enqueue_reassembly(t, recvmsg, (int)data_length); 622 wake_up_interruptible(&t->wait_reassembly_queue); 623 } else 624 put_recvmsg(t, recvmsg); 625 626 return; 627 } 628 } 629 630 /* 631 * This is an internal error! 632 */ 633 WARN_ON_ONCE(recvmsg->type != SMB_DIRECT_MSG_DATA_TRANSFER); 634 put_recvmsg(t, recvmsg); 635 smb_direct_disconnect_rdma_connection(t); 636 } 637 638 static int smb_direct_post_recv(struct smb_direct_transport *t, 639 struct smb_direct_recvmsg *recvmsg) 640 { 641 struct ib_recv_wr wr; 642 int ret; 643 644 recvmsg->sge.addr = ib_dma_map_single(t->cm_id->device, 645 recvmsg->packet, t->max_recv_size, 646 DMA_FROM_DEVICE); 647 ret = ib_dma_mapping_error(t->cm_id->device, recvmsg->sge.addr); 648 if (ret) 649 return ret; 650 recvmsg->sge.length = t->max_recv_size; 651 recvmsg->sge.lkey = t->pd->local_dma_lkey; 652 recvmsg->cqe.done = recv_done; 653 654 wr.wr_cqe = &recvmsg->cqe; 655 wr.next = NULL; 656 wr.sg_list = &recvmsg->sge; 657 wr.num_sge = 1; 658 659 ret = ib_post_recv(t->qp, &wr, NULL); 660 if (ret) { 661 pr_err("Can't post recv: %d\n", ret); 662 ib_dma_unmap_single(t->cm_id->device, 663 recvmsg->sge.addr, recvmsg->sge.length, 664 DMA_FROM_DEVICE); 665 recvmsg->sge.length = 0; 666 smb_direct_disconnect_rdma_connection(t); 667 return ret; 668 } 669 return ret; 670 } 671 672 static int smb_direct_read(struct ksmbd_transport *t, char *buf, 673 unsigned int size, int unused) 674 { 675 struct smb_direct_recvmsg *recvmsg; 676 struct smb_direct_data_transfer *data_transfer; 677 int to_copy, to_read, data_read, offset; 678 u32 data_length, remaining_data_length, data_offset; 679 int rc; 680 struct smb_direct_transport *st = smb_trans_direct_transfort(t); 681 682 again: 683 if (st->status != SMB_DIRECT_CS_CONNECTED) { 684 pr_err("disconnected\n"); 685 return -ENOTCONN; 686 } 687 688 /* 689 * No need to hold the reassembly queue lock all the time as we are 690 * the only one reading from the front of the queue. The transport 691 * may add more entries to the back of the queue at the same time 692 */ 693 if (st->reassembly_data_length >= size) { 694 int queue_length; 695 int queue_removed = 0; 696 697 /* 698 * Need to make sure reassembly_data_length is read before 699 * reading reassembly_queue_length and calling 700 * get_first_reassembly. This call is lock free 701 * as we never read at the end of the queue which are being 702 * updated in SOFTIRQ as more data is received 703 */ 704 virt_rmb(); 705 queue_length = st->reassembly_queue_length; 706 data_read = 0; 707 to_read = size; 708 offset = st->first_entry_offset; 709 while (data_read < size) { 710 recvmsg = get_first_reassembly(st); 711 data_transfer = smb_direct_recvmsg_payload(recvmsg); 712 data_length = le32_to_cpu(data_transfer->data_length); 713 remaining_data_length = 714 le32_to_cpu(data_transfer->remaining_data_length); 715 data_offset = le32_to_cpu(data_transfer->data_offset); 716 717 /* 718 * The upper layer expects RFC1002 length at the 719 * beginning of the payload. Return it to indicate 720 * the total length of the packet. This minimize the 721 * change to upper layer packet processing logic. This 722 * will be eventually remove when an intermediate 723 * transport layer is added 724 */ 725 if (recvmsg->first_segment && size == 4) { 726 unsigned int rfc1002_len = 727 data_length + remaining_data_length; 728 *((__be32 *)buf) = cpu_to_be32(rfc1002_len); 729 data_read = 4; 730 recvmsg->first_segment = false; 731 ksmbd_debug(RDMA, 732 "returning rfc1002 length %d\n", 733 rfc1002_len); 734 goto read_rfc1002_done; 735 } 736 737 to_copy = min_t(int, data_length - offset, to_read); 738 memcpy(buf + data_read, (char *)data_transfer + data_offset + offset, 739 to_copy); 740 741 /* move on to the next buffer? */ 742 if (to_copy == data_length - offset) { 743 queue_length--; 744 /* 745 * No need to lock if we are not at the 746 * end of the queue 747 */ 748 if (queue_length) { 749 list_del(&recvmsg->list); 750 } else { 751 spin_lock_irq(&st->reassembly_queue_lock); 752 list_del(&recvmsg->list); 753 spin_unlock_irq(&st->reassembly_queue_lock); 754 } 755 queue_removed++; 756 put_recvmsg(st, recvmsg); 757 offset = 0; 758 } else { 759 offset += to_copy; 760 } 761 762 to_read -= to_copy; 763 data_read += to_copy; 764 } 765 766 spin_lock_irq(&st->reassembly_queue_lock); 767 st->reassembly_data_length -= data_read; 768 st->reassembly_queue_length -= queue_removed; 769 spin_unlock_irq(&st->reassembly_queue_lock); 770 771 spin_lock(&st->receive_credit_lock); 772 st->count_avail_recvmsg += queue_removed; 773 if (is_receive_credit_post_required(st->recv_credits, st->count_avail_recvmsg)) { 774 spin_unlock(&st->receive_credit_lock); 775 queue_work(smb_direct_wq, &st->post_recv_credits_work); 776 } else { 777 spin_unlock(&st->receive_credit_lock); 778 } 779 780 st->first_entry_offset = offset; 781 ksmbd_debug(RDMA, 782 "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n", 783 data_read, st->reassembly_data_length, 784 st->first_entry_offset); 785 read_rfc1002_done: 786 return data_read; 787 } 788 789 ksmbd_debug(RDMA, "wait_event on more data\n"); 790 rc = wait_event_interruptible(st->wait_reassembly_queue, 791 st->reassembly_data_length >= size || 792 st->status != SMB_DIRECT_CS_CONNECTED); 793 if (rc) 794 return -EINTR; 795 796 goto again; 797 } 798 799 static void smb_direct_post_recv_credits(struct work_struct *work) 800 { 801 struct smb_direct_transport *t = container_of(work, 802 struct smb_direct_transport, post_recv_credits_work); 803 struct smb_direct_recvmsg *recvmsg; 804 int receive_credits, credits = 0; 805 int ret; 806 807 spin_lock(&t->receive_credit_lock); 808 receive_credits = t->recv_credits; 809 spin_unlock(&t->receive_credit_lock); 810 811 if (receive_credits < t->recv_credit_target) { 812 while (true) { 813 recvmsg = get_free_recvmsg(t); 814 if (!recvmsg) 815 break; 816 817 recvmsg->type = SMB_DIRECT_MSG_DATA_TRANSFER; 818 recvmsg->first_segment = false; 819 820 ret = smb_direct_post_recv(t, recvmsg); 821 if (ret) { 822 pr_err("Can't post recv: %d\n", ret); 823 put_recvmsg(t, recvmsg); 824 break; 825 } 826 credits++; 827 } 828 } 829 830 spin_lock(&t->receive_credit_lock); 831 t->recv_credits += credits; 832 t->count_avail_recvmsg -= credits; 833 spin_unlock(&t->receive_credit_lock); 834 835 spin_lock(&t->lock_new_recv_credits); 836 t->new_recv_credits += credits; 837 spin_unlock(&t->lock_new_recv_credits); 838 839 if (credits) 840 queue_work(smb_direct_wq, &t->send_immediate_work); 841 } 842 843 static void send_done(struct ib_cq *cq, struct ib_wc *wc) 844 { 845 struct smb_direct_sendmsg *sendmsg, *sibling; 846 struct smb_direct_transport *t; 847 struct list_head *pos, *prev, *end; 848 849 sendmsg = container_of(wc->wr_cqe, struct smb_direct_sendmsg, cqe); 850 t = sendmsg->transport; 851 852 ksmbd_debug(RDMA, "Send completed. status='%s (%d)', opcode=%d\n", 853 ib_wc_status_msg(wc->status), wc->status, 854 wc->opcode); 855 856 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { 857 pr_err("Send error. status='%s (%d)', opcode=%d\n", 858 ib_wc_status_msg(wc->status), wc->status, 859 wc->opcode); 860 smb_direct_disconnect_rdma_connection(t); 861 } 862 863 if (atomic_dec_and_test(&t->send_pending)) 864 wake_up(&t->wait_send_pending); 865 866 /* iterate and free the list of messages in reverse. the list's head 867 * is invalid. 868 */ 869 for (pos = &sendmsg->list, prev = pos->prev, end = sendmsg->list.next; 870 prev != end; pos = prev, prev = prev->prev) { 871 sibling = container_of(pos, struct smb_direct_sendmsg, list); 872 smb_direct_free_sendmsg(t, sibling); 873 } 874 875 sibling = container_of(pos, struct smb_direct_sendmsg, list); 876 smb_direct_free_sendmsg(t, sibling); 877 } 878 879 static int manage_credits_prior_sending(struct smb_direct_transport *t) 880 { 881 int new_credits; 882 883 spin_lock(&t->lock_new_recv_credits); 884 new_credits = t->new_recv_credits; 885 t->new_recv_credits = 0; 886 spin_unlock(&t->lock_new_recv_credits); 887 888 return new_credits; 889 } 890 891 static int smb_direct_post_send(struct smb_direct_transport *t, 892 struct ib_send_wr *wr) 893 { 894 int ret; 895 896 atomic_inc(&t->send_pending); 897 ret = ib_post_send(t->qp, wr, NULL); 898 if (ret) { 899 pr_err("failed to post send: %d\n", ret); 900 if (atomic_dec_and_test(&t->send_pending)) 901 wake_up(&t->wait_send_pending); 902 smb_direct_disconnect_rdma_connection(t); 903 } 904 return ret; 905 } 906 907 static void smb_direct_send_ctx_init(struct smb_direct_transport *t, 908 struct smb_direct_send_ctx *send_ctx, 909 bool need_invalidate_rkey, 910 unsigned int remote_key) 911 { 912 INIT_LIST_HEAD(&send_ctx->msg_list); 913 send_ctx->wr_cnt = 0; 914 send_ctx->need_invalidate_rkey = need_invalidate_rkey; 915 send_ctx->remote_key = remote_key; 916 } 917 918 static int smb_direct_flush_send_list(struct smb_direct_transport *t, 919 struct smb_direct_send_ctx *send_ctx, 920 bool is_last) 921 { 922 struct smb_direct_sendmsg *first, *last; 923 int ret; 924 925 if (list_empty(&send_ctx->msg_list)) 926 return 0; 927 928 first = list_first_entry(&send_ctx->msg_list, 929 struct smb_direct_sendmsg, 930 list); 931 last = list_last_entry(&send_ctx->msg_list, 932 struct smb_direct_sendmsg, 933 list); 934 935 last->wr.send_flags = IB_SEND_SIGNALED; 936 last->wr.wr_cqe = &last->cqe; 937 if (is_last && send_ctx->need_invalidate_rkey) { 938 last->wr.opcode = IB_WR_SEND_WITH_INV; 939 last->wr.ex.invalidate_rkey = send_ctx->remote_key; 940 } 941 942 ret = smb_direct_post_send(t, &first->wr); 943 if (!ret) { 944 smb_direct_send_ctx_init(t, send_ctx, 945 send_ctx->need_invalidate_rkey, 946 send_ctx->remote_key); 947 } else { 948 atomic_add(send_ctx->wr_cnt, &t->send_credits); 949 wake_up(&t->wait_send_credits); 950 list_for_each_entry_safe(first, last, &send_ctx->msg_list, 951 list) { 952 smb_direct_free_sendmsg(t, first); 953 } 954 } 955 return ret; 956 } 957 958 static int wait_for_credits(struct smb_direct_transport *t, 959 wait_queue_head_t *waitq, atomic_t *total_credits, 960 int needed) 961 { 962 int ret; 963 964 do { 965 if (atomic_sub_return(needed, total_credits) >= 0) 966 return 0; 967 968 atomic_add(needed, total_credits); 969 ret = wait_event_interruptible(*waitq, 970 atomic_read(total_credits) >= needed || 971 t->status != SMB_DIRECT_CS_CONNECTED); 972 973 if (t->status != SMB_DIRECT_CS_CONNECTED) 974 return -ENOTCONN; 975 else if (ret < 0) 976 return ret; 977 } while (true); 978 } 979 980 static int wait_for_send_credits(struct smb_direct_transport *t, 981 struct smb_direct_send_ctx *send_ctx) 982 { 983 int ret; 984 985 if (send_ctx && 986 (send_ctx->wr_cnt >= 16 || atomic_read(&t->send_credits) <= 1)) { 987 ret = smb_direct_flush_send_list(t, send_ctx, false); 988 if (ret) 989 return ret; 990 } 991 992 return wait_for_credits(t, &t->wait_send_credits, &t->send_credits, 1); 993 } 994 995 static int wait_for_rw_credits(struct smb_direct_transport *t, int credits) 996 { 997 return wait_for_credits(t, &t->wait_rw_credits, &t->rw_credits, credits); 998 } 999 1000 static int calc_rw_credits(struct smb_direct_transport *t, 1001 char *buf, unsigned int len) 1002 { 1003 return DIV_ROUND_UP(get_buf_page_count(buf, len), 1004 t->pages_per_rw_credit); 1005 } 1006 1007 static int smb_direct_create_header(struct smb_direct_transport *t, 1008 int size, int remaining_data_length, 1009 struct smb_direct_sendmsg **sendmsg_out) 1010 { 1011 struct smb_direct_sendmsg *sendmsg; 1012 struct smb_direct_data_transfer *packet; 1013 int header_length; 1014 int ret; 1015 1016 sendmsg = smb_direct_alloc_sendmsg(t); 1017 if (IS_ERR(sendmsg)) 1018 return PTR_ERR(sendmsg); 1019 1020 /* Fill in the packet header */ 1021 packet = (struct smb_direct_data_transfer *)sendmsg->packet; 1022 packet->credits_requested = cpu_to_le16(t->send_credit_target); 1023 packet->credits_granted = cpu_to_le16(manage_credits_prior_sending(t)); 1024 1025 packet->flags = 0; 1026 packet->reserved = 0; 1027 if (!size) 1028 packet->data_offset = 0; 1029 else 1030 packet->data_offset = cpu_to_le32(24); 1031 packet->data_length = cpu_to_le32(size); 1032 packet->remaining_data_length = cpu_to_le32(remaining_data_length); 1033 packet->padding = 0; 1034 1035 ksmbd_debug(RDMA, 1036 "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n", 1037 le16_to_cpu(packet->credits_requested), 1038 le16_to_cpu(packet->credits_granted), 1039 le32_to_cpu(packet->data_offset), 1040 le32_to_cpu(packet->data_length), 1041 le32_to_cpu(packet->remaining_data_length)); 1042 1043 /* Map the packet to DMA */ 1044 header_length = sizeof(struct smb_direct_data_transfer); 1045 /* If this is a packet without payload, don't send padding */ 1046 if (!size) 1047 header_length = 1048 offsetof(struct smb_direct_data_transfer, padding); 1049 1050 sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device, 1051 (void *)packet, 1052 header_length, 1053 DMA_TO_DEVICE); 1054 ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr); 1055 if (ret) { 1056 smb_direct_free_sendmsg(t, sendmsg); 1057 return ret; 1058 } 1059 1060 sendmsg->num_sge = 1; 1061 sendmsg->sge[0].length = header_length; 1062 sendmsg->sge[0].lkey = t->pd->local_dma_lkey; 1063 1064 *sendmsg_out = sendmsg; 1065 return 0; 1066 } 1067 1068 static int get_sg_list(void *buf, int size, struct scatterlist *sg_list, int nentries) 1069 { 1070 bool high = is_vmalloc_addr(buf); 1071 struct page *page; 1072 int offset, len; 1073 int i = 0; 1074 1075 if (size <= 0 || nentries < get_buf_page_count(buf, size)) 1076 return -EINVAL; 1077 1078 offset = offset_in_page(buf); 1079 buf -= offset; 1080 while (size > 0) { 1081 len = min_t(int, PAGE_SIZE - offset, size); 1082 if (high) 1083 page = vmalloc_to_page(buf); 1084 else 1085 page = kmap_to_page(buf); 1086 1087 if (!sg_list) 1088 return -EINVAL; 1089 sg_set_page(sg_list, page, len, offset); 1090 sg_list = sg_next(sg_list); 1091 1092 buf += PAGE_SIZE; 1093 size -= len; 1094 offset = 0; 1095 i++; 1096 } 1097 return i; 1098 } 1099 1100 static int get_mapped_sg_list(struct ib_device *device, void *buf, int size, 1101 struct scatterlist *sg_list, int nentries, 1102 enum dma_data_direction dir) 1103 { 1104 int npages; 1105 1106 npages = get_sg_list(buf, size, sg_list, nentries); 1107 if (npages < 0) 1108 return -EINVAL; 1109 return ib_dma_map_sg(device, sg_list, npages, dir); 1110 } 1111 1112 static int post_sendmsg(struct smb_direct_transport *t, 1113 struct smb_direct_send_ctx *send_ctx, 1114 struct smb_direct_sendmsg *msg) 1115 { 1116 int i; 1117 1118 for (i = 0; i < msg->num_sge; i++) 1119 ib_dma_sync_single_for_device(t->cm_id->device, 1120 msg->sge[i].addr, msg->sge[i].length, 1121 DMA_TO_DEVICE); 1122 1123 msg->cqe.done = send_done; 1124 msg->wr.opcode = IB_WR_SEND; 1125 msg->wr.sg_list = &msg->sge[0]; 1126 msg->wr.num_sge = msg->num_sge; 1127 msg->wr.next = NULL; 1128 1129 if (send_ctx) { 1130 msg->wr.wr_cqe = NULL; 1131 msg->wr.send_flags = 0; 1132 if (!list_empty(&send_ctx->msg_list)) { 1133 struct smb_direct_sendmsg *last; 1134 1135 last = list_last_entry(&send_ctx->msg_list, 1136 struct smb_direct_sendmsg, 1137 list); 1138 last->wr.next = &msg->wr; 1139 } 1140 list_add_tail(&msg->list, &send_ctx->msg_list); 1141 send_ctx->wr_cnt++; 1142 return 0; 1143 } 1144 1145 msg->wr.wr_cqe = &msg->cqe; 1146 msg->wr.send_flags = IB_SEND_SIGNALED; 1147 return smb_direct_post_send(t, &msg->wr); 1148 } 1149 1150 static int smb_direct_post_send_data(struct smb_direct_transport *t, 1151 struct smb_direct_send_ctx *send_ctx, 1152 struct kvec *iov, int niov, 1153 int remaining_data_length) 1154 { 1155 int i, j, ret; 1156 struct smb_direct_sendmsg *msg; 1157 int data_length; 1158 struct scatterlist sg[SMB_DIRECT_MAX_SEND_SGES - 1]; 1159 1160 ret = wait_for_send_credits(t, send_ctx); 1161 if (ret) 1162 return ret; 1163 1164 data_length = 0; 1165 for (i = 0; i < niov; i++) 1166 data_length += iov[i].iov_len; 1167 1168 ret = smb_direct_create_header(t, data_length, remaining_data_length, 1169 &msg); 1170 if (ret) { 1171 atomic_inc(&t->send_credits); 1172 return ret; 1173 } 1174 1175 for (i = 0; i < niov; i++) { 1176 struct ib_sge *sge; 1177 int sg_cnt; 1178 1179 sg_init_table(sg, SMB_DIRECT_MAX_SEND_SGES - 1); 1180 sg_cnt = get_mapped_sg_list(t->cm_id->device, 1181 iov[i].iov_base, iov[i].iov_len, 1182 sg, SMB_DIRECT_MAX_SEND_SGES - 1, 1183 DMA_TO_DEVICE); 1184 if (sg_cnt <= 0) { 1185 pr_err("failed to map buffer\n"); 1186 ret = -ENOMEM; 1187 goto err; 1188 } else if (sg_cnt + msg->num_sge > SMB_DIRECT_MAX_SEND_SGES) { 1189 pr_err("buffer not fitted into sges\n"); 1190 ret = -E2BIG; 1191 ib_dma_unmap_sg(t->cm_id->device, sg, sg_cnt, 1192 DMA_TO_DEVICE); 1193 goto err; 1194 } 1195 1196 for (j = 0; j < sg_cnt; j++) { 1197 sge = &msg->sge[msg->num_sge]; 1198 sge->addr = sg_dma_address(&sg[j]); 1199 sge->length = sg_dma_len(&sg[j]); 1200 sge->lkey = t->pd->local_dma_lkey; 1201 msg->num_sge++; 1202 } 1203 } 1204 1205 ret = post_sendmsg(t, send_ctx, msg); 1206 if (ret) 1207 goto err; 1208 return 0; 1209 err: 1210 smb_direct_free_sendmsg(t, msg); 1211 atomic_inc(&t->send_credits); 1212 return ret; 1213 } 1214 1215 static int smb_direct_writev(struct ksmbd_transport *t, 1216 struct kvec *iov, int niovs, int buflen, 1217 bool need_invalidate, unsigned int remote_key) 1218 { 1219 struct smb_direct_transport *st = smb_trans_direct_transfort(t); 1220 size_t remaining_data_length; 1221 size_t iov_idx; 1222 size_t iov_ofs; 1223 size_t max_iov_size = st->max_send_size - 1224 sizeof(struct smb_direct_data_transfer); 1225 int ret; 1226 struct smb_direct_send_ctx send_ctx; 1227 int error = 0; 1228 1229 if (st->status != SMB_DIRECT_CS_CONNECTED) 1230 return -ENOTCONN; 1231 1232 //FIXME: skip RFC1002 header.. 1233 if (WARN_ON_ONCE(niovs <= 1 || iov[0].iov_len != 4)) 1234 return -EINVAL; 1235 buflen -= 4; 1236 iov_idx = 1; 1237 iov_ofs = 0; 1238 1239 remaining_data_length = buflen; 1240 ksmbd_debug(RDMA, "Sending smb (RDMA): smb_len=%u\n", buflen); 1241 1242 smb_direct_send_ctx_init(st, &send_ctx, need_invalidate, remote_key); 1243 while (remaining_data_length) { 1244 struct kvec vecs[SMB_DIRECT_MAX_SEND_SGES - 1]; /* minus smbdirect hdr */ 1245 size_t possible_bytes = max_iov_size; 1246 size_t possible_vecs; 1247 size_t bytes = 0; 1248 size_t nvecs = 0; 1249 1250 /* 1251 * For the last message remaining_data_length should be 1252 * have been 0 already! 1253 */ 1254 if (WARN_ON_ONCE(iov_idx >= niovs)) { 1255 error = -EINVAL; 1256 goto done; 1257 } 1258 1259 /* 1260 * We have 2 factors which limit the arguments we pass 1261 * to smb_direct_post_send_data(): 1262 * 1263 * 1. The number of supported sges for the send, 1264 * while one is reserved for the smbdirect header. 1265 * And we currently need one SGE per page. 1266 * 2. The number of negotiated payload bytes per send. 1267 */ 1268 possible_vecs = min_t(size_t, ARRAY_SIZE(vecs), niovs - iov_idx); 1269 1270 while (iov_idx < niovs && possible_vecs && possible_bytes) { 1271 struct kvec *v = &vecs[nvecs]; 1272 int page_count; 1273 1274 v->iov_base = ((u8 *)iov[iov_idx].iov_base) + iov_ofs; 1275 v->iov_len = min_t(size_t, 1276 iov[iov_idx].iov_len - iov_ofs, 1277 possible_bytes); 1278 page_count = get_buf_page_count(v->iov_base, v->iov_len); 1279 if (page_count > possible_vecs) { 1280 /* 1281 * If the number of pages in the buffer 1282 * is to much (because we currently require 1283 * one SGE per page), we need to limit the 1284 * length. 1285 * 1286 * We know possible_vecs is at least 1, 1287 * so we always keep the first page. 1288 * 1289 * We need to calculate the number extra 1290 * pages (epages) we can also keep. 1291 * 1292 * We calculate the number of bytes in the 1293 * first page (fplen), this should never be 1294 * larger than v->iov_len because page_count is 1295 * at least 2, but adding a limitation feels 1296 * better. 1297 * 1298 * Then we calculate the number of bytes (elen) 1299 * we can keep for the extra pages. 1300 */ 1301 size_t epages = possible_vecs - 1; 1302 size_t fpofs = offset_in_page(v->iov_base); 1303 size_t fplen = min_t(size_t, PAGE_SIZE - fpofs, v->iov_len); 1304 size_t elen = min_t(size_t, v->iov_len - fplen, epages*PAGE_SIZE); 1305 1306 v->iov_len = fplen + elen; 1307 page_count = get_buf_page_count(v->iov_base, v->iov_len); 1308 if (WARN_ON_ONCE(page_count > possible_vecs)) { 1309 /* 1310 * Something went wrong in the above 1311 * logic... 1312 */ 1313 error = -EINVAL; 1314 goto done; 1315 } 1316 } 1317 possible_vecs -= page_count; 1318 nvecs += 1; 1319 possible_bytes -= v->iov_len; 1320 bytes += v->iov_len; 1321 1322 iov_ofs += v->iov_len; 1323 if (iov_ofs >= iov[iov_idx].iov_len) { 1324 iov_idx += 1; 1325 iov_ofs = 0; 1326 } 1327 } 1328 1329 remaining_data_length -= bytes; 1330 1331 ret = smb_direct_post_send_data(st, &send_ctx, 1332 vecs, nvecs, 1333 remaining_data_length); 1334 if (unlikely(ret)) { 1335 error = ret; 1336 goto done; 1337 } 1338 } 1339 1340 done: 1341 ret = smb_direct_flush_send_list(st, &send_ctx, true); 1342 if (unlikely(!ret && error)) 1343 ret = error; 1344 1345 /* 1346 * As an optimization, we don't wait for individual I/O to finish 1347 * before sending the next one. 1348 * Send them all and wait for pending send count to get to 0 1349 * that means all the I/Os have been out and we are good to return 1350 */ 1351 1352 wait_event(st->wait_send_pending, 1353 atomic_read(&st->send_pending) == 0); 1354 return ret; 1355 } 1356 1357 static void smb_direct_free_rdma_rw_msg(struct smb_direct_transport *t, 1358 struct smb_direct_rdma_rw_msg *msg, 1359 enum dma_data_direction dir) 1360 { 1361 rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port, 1362 msg->sgt.sgl, msg->sgt.nents, dir); 1363 sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE); 1364 kfree(msg); 1365 } 1366 1367 static void read_write_done(struct ib_cq *cq, struct ib_wc *wc, 1368 enum dma_data_direction dir) 1369 { 1370 struct smb_direct_rdma_rw_msg *msg = container_of(wc->wr_cqe, 1371 struct smb_direct_rdma_rw_msg, cqe); 1372 struct smb_direct_transport *t = msg->t; 1373 1374 if (wc->status != IB_WC_SUCCESS) { 1375 msg->status = -EIO; 1376 pr_err("read/write error. opcode = %d, status = %s(%d)\n", 1377 wc->opcode, ib_wc_status_msg(wc->status), wc->status); 1378 if (wc->status != IB_WC_WR_FLUSH_ERR) 1379 smb_direct_disconnect_rdma_connection(t); 1380 } 1381 1382 complete(msg->completion); 1383 } 1384 1385 static void read_done(struct ib_cq *cq, struct ib_wc *wc) 1386 { 1387 read_write_done(cq, wc, DMA_FROM_DEVICE); 1388 } 1389 1390 static void write_done(struct ib_cq *cq, struct ib_wc *wc) 1391 { 1392 read_write_done(cq, wc, DMA_TO_DEVICE); 1393 } 1394 1395 static int smb_direct_rdma_xmit(struct smb_direct_transport *t, 1396 void *buf, int buf_len, 1397 struct smb2_buffer_desc_v1 *desc, 1398 unsigned int desc_len, 1399 bool is_read) 1400 { 1401 struct smb_direct_rdma_rw_msg *msg, *next_msg; 1402 int i, ret; 1403 DECLARE_COMPLETION_ONSTACK(completion); 1404 struct ib_send_wr *first_wr; 1405 LIST_HEAD(msg_list); 1406 char *desc_buf; 1407 int credits_needed; 1408 unsigned int desc_buf_len, desc_num = 0; 1409 1410 if (t->status != SMB_DIRECT_CS_CONNECTED) 1411 return -ENOTCONN; 1412 1413 if (buf_len > t->max_rdma_rw_size) 1414 return -EINVAL; 1415 1416 /* calculate needed credits */ 1417 credits_needed = 0; 1418 desc_buf = buf; 1419 for (i = 0; i < desc_len / sizeof(*desc); i++) { 1420 if (!buf_len) 1421 break; 1422 1423 desc_buf_len = le32_to_cpu(desc[i].length); 1424 if (!desc_buf_len) 1425 return -EINVAL; 1426 1427 if (desc_buf_len > buf_len) { 1428 desc_buf_len = buf_len; 1429 desc[i].length = cpu_to_le32(desc_buf_len); 1430 buf_len = 0; 1431 } 1432 1433 credits_needed += calc_rw_credits(t, desc_buf, desc_buf_len); 1434 desc_buf += desc_buf_len; 1435 buf_len -= desc_buf_len; 1436 desc_num++; 1437 } 1438 1439 ksmbd_debug(RDMA, "RDMA %s, len %#x, needed credits %#x\n", 1440 str_read_write(is_read), buf_len, credits_needed); 1441 1442 ret = wait_for_rw_credits(t, credits_needed); 1443 if (ret < 0) 1444 return ret; 1445 1446 /* build rdma_rw_ctx for each descriptor */ 1447 desc_buf = buf; 1448 for (i = 0; i < desc_num; i++) { 1449 msg = kzalloc(struct_size(msg, sg_list, SG_CHUNK_SIZE), 1450 KSMBD_DEFAULT_GFP); 1451 if (!msg) { 1452 ret = -ENOMEM; 1453 goto out; 1454 } 1455 1456 desc_buf_len = le32_to_cpu(desc[i].length); 1457 1458 msg->t = t; 1459 msg->cqe.done = is_read ? read_done : write_done; 1460 msg->completion = &completion; 1461 1462 msg->sgt.sgl = &msg->sg_list[0]; 1463 ret = sg_alloc_table_chained(&msg->sgt, 1464 get_buf_page_count(desc_buf, desc_buf_len), 1465 msg->sg_list, SG_CHUNK_SIZE); 1466 if (ret) { 1467 kfree(msg); 1468 ret = -ENOMEM; 1469 goto out; 1470 } 1471 1472 ret = get_sg_list(desc_buf, desc_buf_len, 1473 msg->sgt.sgl, msg->sgt.orig_nents); 1474 if (ret < 0) { 1475 sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE); 1476 kfree(msg); 1477 goto out; 1478 } 1479 1480 ret = rdma_rw_ctx_init(&msg->rw_ctx, t->qp, t->qp->port, 1481 msg->sgt.sgl, 1482 get_buf_page_count(desc_buf, desc_buf_len), 1483 0, 1484 le64_to_cpu(desc[i].offset), 1485 le32_to_cpu(desc[i].token), 1486 is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 1487 if (ret < 0) { 1488 pr_err("failed to init rdma_rw_ctx: %d\n", ret); 1489 sg_free_table_chained(&msg->sgt, SG_CHUNK_SIZE); 1490 kfree(msg); 1491 goto out; 1492 } 1493 1494 list_add_tail(&msg->list, &msg_list); 1495 desc_buf += desc_buf_len; 1496 } 1497 1498 /* concatenate work requests of rdma_rw_ctxs */ 1499 first_wr = NULL; 1500 list_for_each_entry_reverse(msg, &msg_list, list) { 1501 first_wr = rdma_rw_ctx_wrs(&msg->rw_ctx, t->qp, t->qp->port, 1502 &msg->cqe, first_wr); 1503 } 1504 1505 ret = ib_post_send(t->qp, first_wr, NULL); 1506 if (ret) { 1507 pr_err("failed to post send wr for RDMA R/W: %d\n", ret); 1508 goto out; 1509 } 1510 1511 msg = list_last_entry(&msg_list, struct smb_direct_rdma_rw_msg, list); 1512 wait_for_completion(&completion); 1513 ret = msg->status; 1514 out: 1515 list_for_each_entry_safe(msg, next_msg, &msg_list, list) { 1516 list_del(&msg->list); 1517 smb_direct_free_rdma_rw_msg(t, msg, 1518 is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 1519 } 1520 atomic_add(credits_needed, &t->rw_credits); 1521 wake_up(&t->wait_rw_credits); 1522 return ret; 1523 } 1524 1525 static int smb_direct_rdma_write(struct ksmbd_transport *t, 1526 void *buf, unsigned int buflen, 1527 struct smb2_buffer_desc_v1 *desc, 1528 unsigned int desc_len) 1529 { 1530 return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen, 1531 desc, desc_len, false); 1532 } 1533 1534 static int smb_direct_rdma_read(struct ksmbd_transport *t, 1535 void *buf, unsigned int buflen, 1536 struct smb2_buffer_desc_v1 *desc, 1537 unsigned int desc_len) 1538 { 1539 return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen, 1540 desc, desc_len, true); 1541 } 1542 1543 static void smb_direct_disconnect(struct ksmbd_transport *t) 1544 { 1545 struct smb_direct_transport *st = smb_trans_direct_transfort(t); 1546 1547 ksmbd_debug(RDMA, "Disconnecting cm_id=%p\n", st->cm_id); 1548 1549 smb_direct_disconnect_rdma_work(&st->disconnect_work); 1550 wait_event_interruptible(st->wait_status, 1551 st->status == SMB_DIRECT_CS_DISCONNECTED); 1552 free_transport(st); 1553 } 1554 1555 static void smb_direct_shutdown(struct ksmbd_transport *t) 1556 { 1557 struct smb_direct_transport *st = smb_trans_direct_transfort(t); 1558 1559 ksmbd_debug(RDMA, "smb-direct shutdown cm_id=%p\n", st->cm_id); 1560 1561 smb_direct_disconnect_rdma_work(&st->disconnect_work); 1562 } 1563 1564 static int smb_direct_cm_handler(struct rdma_cm_id *cm_id, 1565 struct rdma_cm_event *event) 1566 { 1567 struct smb_direct_transport *t = cm_id->context; 1568 1569 ksmbd_debug(RDMA, "RDMA CM event. cm_id=%p event=%s (%d)\n", 1570 cm_id, rdma_event_msg(event->event), event->event); 1571 1572 switch (event->event) { 1573 case RDMA_CM_EVENT_ESTABLISHED: { 1574 t->status = SMB_DIRECT_CS_CONNECTED; 1575 wake_up_interruptible(&t->wait_status); 1576 break; 1577 } 1578 case RDMA_CM_EVENT_DEVICE_REMOVAL: 1579 case RDMA_CM_EVENT_DISCONNECTED: { 1580 ib_drain_qp(t->qp); 1581 1582 t->status = SMB_DIRECT_CS_DISCONNECTED; 1583 wake_up_interruptible(&t->wait_status); 1584 wake_up_interruptible(&t->wait_reassembly_queue); 1585 wake_up(&t->wait_send_credits); 1586 break; 1587 } 1588 case RDMA_CM_EVENT_CONNECT_ERROR: { 1589 t->status = SMB_DIRECT_CS_DISCONNECTED; 1590 wake_up_interruptible(&t->wait_status); 1591 break; 1592 } 1593 default: 1594 pr_err("Unexpected RDMA CM event. cm_id=%p, event=%s (%d)\n", 1595 cm_id, rdma_event_msg(event->event), 1596 event->event); 1597 break; 1598 } 1599 return 0; 1600 } 1601 1602 static void smb_direct_qpair_handler(struct ib_event *event, void *context) 1603 { 1604 struct smb_direct_transport *t = context; 1605 1606 ksmbd_debug(RDMA, "Received QP event. cm_id=%p, event=%s (%d)\n", 1607 t->cm_id, ib_event_msg(event->event), event->event); 1608 1609 switch (event->event) { 1610 case IB_EVENT_CQ_ERR: 1611 case IB_EVENT_QP_FATAL: 1612 smb_direct_disconnect_rdma_connection(t); 1613 break; 1614 default: 1615 break; 1616 } 1617 } 1618 1619 static int smb_direct_send_negotiate_response(struct smb_direct_transport *t, 1620 int failed) 1621 { 1622 struct smb_direct_sendmsg *sendmsg; 1623 struct smb_direct_negotiate_resp *resp; 1624 int ret; 1625 1626 sendmsg = smb_direct_alloc_sendmsg(t); 1627 if (IS_ERR(sendmsg)) 1628 return -ENOMEM; 1629 1630 resp = (struct smb_direct_negotiate_resp *)sendmsg->packet; 1631 if (failed) { 1632 memset(resp, 0, sizeof(*resp)); 1633 resp->min_version = cpu_to_le16(0x0100); 1634 resp->max_version = cpu_to_le16(0x0100); 1635 resp->status = STATUS_NOT_SUPPORTED; 1636 } else { 1637 resp->status = STATUS_SUCCESS; 1638 resp->min_version = SMB_DIRECT_VERSION_LE; 1639 resp->max_version = SMB_DIRECT_VERSION_LE; 1640 resp->negotiated_version = SMB_DIRECT_VERSION_LE; 1641 resp->reserved = 0; 1642 resp->credits_requested = 1643 cpu_to_le16(t->send_credit_target); 1644 resp->credits_granted = cpu_to_le16(manage_credits_prior_sending(t)); 1645 resp->max_readwrite_size = cpu_to_le32(t->max_rdma_rw_size); 1646 resp->preferred_send_size = cpu_to_le32(t->max_send_size); 1647 resp->max_receive_size = cpu_to_le32(t->max_recv_size); 1648 resp->max_fragmented_size = 1649 cpu_to_le32(t->max_fragmented_recv_size); 1650 } 1651 1652 sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device, 1653 (void *)resp, sizeof(*resp), 1654 DMA_TO_DEVICE); 1655 ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr); 1656 if (ret) { 1657 smb_direct_free_sendmsg(t, sendmsg); 1658 return ret; 1659 } 1660 1661 sendmsg->num_sge = 1; 1662 sendmsg->sge[0].length = sizeof(*resp); 1663 sendmsg->sge[0].lkey = t->pd->local_dma_lkey; 1664 1665 ret = post_sendmsg(t, NULL, sendmsg); 1666 if (ret) { 1667 smb_direct_free_sendmsg(t, sendmsg); 1668 return ret; 1669 } 1670 1671 wait_event(t->wait_send_pending, 1672 atomic_read(&t->send_pending) == 0); 1673 return 0; 1674 } 1675 1676 static int smb_direct_accept_client(struct smb_direct_transport *t) 1677 { 1678 struct rdma_conn_param conn_param; 1679 struct ib_port_immutable port_immutable; 1680 u32 ird_ord_hdr[2]; 1681 int ret; 1682 1683 memset(&conn_param, 0, sizeof(conn_param)); 1684 conn_param.initiator_depth = min_t(u8, t->cm_id->device->attrs.max_qp_rd_atom, 1685 SMB_DIRECT_CM_INITIATOR_DEPTH); 1686 conn_param.responder_resources = 0; 1687 1688 t->cm_id->device->ops.get_port_immutable(t->cm_id->device, 1689 t->cm_id->port_num, 1690 &port_immutable); 1691 if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) { 1692 ird_ord_hdr[0] = conn_param.responder_resources; 1693 ird_ord_hdr[1] = 1; 1694 conn_param.private_data = ird_ord_hdr; 1695 conn_param.private_data_len = sizeof(ird_ord_hdr); 1696 } else { 1697 conn_param.private_data = NULL; 1698 conn_param.private_data_len = 0; 1699 } 1700 conn_param.retry_count = SMB_DIRECT_CM_RETRY; 1701 conn_param.rnr_retry_count = SMB_DIRECT_CM_RNR_RETRY; 1702 conn_param.flow_control = 0; 1703 1704 ret = rdma_accept(t->cm_id, &conn_param); 1705 if (ret) { 1706 pr_err("error at rdma_accept: %d\n", ret); 1707 return ret; 1708 } 1709 return 0; 1710 } 1711 1712 static int smb_direct_prepare_negotiation(struct smb_direct_transport *t) 1713 { 1714 int ret; 1715 struct smb_direct_recvmsg *recvmsg; 1716 1717 recvmsg = get_free_recvmsg(t); 1718 if (!recvmsg) 1719 return -ENOMEM; 1720 recvmsg->type = SMB_DIRECT_MSG_NEGOTIATE_REQ; 1721 1722 ret = smb_direct_post_recv(t, recvmsg); 1723 if (ret) { 1724 pr_err("Can't post recv: %d\n", ret); 1725 goto out_err; 1726 } 1727 1728 t->negotiation_requested = false; 1729 ret = smb_direct_accept_client(t); 1730 if (ret) { 1731 pr_err("Can't accept client\n"); 1732 goto out_err; 1733 } 1734 1735 smb_direct_post_recv_credits(&t->post_recv_credits_work); 1736 return 0; 1737 out_err: 1738 put_recvmsg(t, recvmsg); 1739 return ret; 1740 } 1741 1742 static unsigned int smb_direct_get_max_fr_pages(struct smb_direct_transport *t) 1743 { 1744 return min_t(unsigned int, 1745 t->cm_id->device->attrs.max_fast_reg_page_list_len, 1746 256); 1747 } 1748 1749 static int smb_direct_init_params(struct smb_direct_transport *t, 1750 struct ib_qp_cap *cap) 1751 { 1752 struct ib_device *device = t->cm_id->device; 1753 int max_send_sges, max_rw_wrs, max_send_wrs; 1754 unsigned int max_sge_per_wr, wrs_per_credit; 1755 1756 /* need 3 more sge. because a SMB_DIRECT header, SMB2 header, 1757 * SMB2 response could be mapped. 1758 */ 1759 t->max_send_size = smb_direct_max_send_size; 1760 max_send_sges = DIV_ROUND_UP(t->max_send_size, PAGE_SIZE) + 3; 1761 if (max_send_sges > SMB_DIRECT_MAX_SEND_SGES) { 1762 pr_err("max_send_size %d is too large\n", t->max_send_size); 1763 return -EINVAL; 1764 } 1765 1766 /* Calculate the number of work requests for RDMA R/W. 1767 * The maximum number of pages which can be registered 1768 * with one Memory region can be transferred with one 1769 * R/W credit. And at least 4 work requests for each credit 1770 * are needed for MR registration, RDMA R/W, local & remote 1771 * MR invalidation. 1772 */ 1773 t->max_rdma_rw_size = smb_direct_max_read_write_size; 1774 t->pages_per_rw_credit = smb_direct_get_max_fr_pages(t); 1775 t->max_rw_credits = DIV_ROUND_UP(t->max_rdma_rw_size, 1776 (t->pages_per_rw_credit - 1) * 1777 PAGE_SIZE); 1778 1779 max_sge_per_wr = min_t(unsigned int, device->attrs.max_send_sge, 1780 device->attrs.max_sge_rd); 1781 max_sge_per_wr = max_t(unsigned int, max_sge_per_wr, 1782 max_send_sges); 1783 wrs_per_credit = max_t(unsigned int, 4, 1784 DIV_ROUND_UP(t->pages_per_rw_credit, 1785 max_sge_per_wr) + 1); 1786 max_rw_wrs = t->max_rw_credits * wrs_per_credit; 1787 1788 max_send_wrs = smb_direct_send_credit_target + max_rw_wrs; 1789 if (max_send_wrs > device->attrs.max_cqe || 1790 max_send_wrs > device->attrs.max_qp_wr) { 1791 pr_err("consider lowering send_credit_target = %d\n", 1792 smb_direct_send_credit_target); 1793 pr_err("Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n", 1794 device->attrs.max_cqe, device->attrs.max_qp_wr); 1795 return -EINVAL; 1796 } 1797 1798 if (smb_direct_receive_credit_max > device->attrs.max_cqe || 1799 smb_direct_receive_credit_max > device->attrs.max_qp_wr) { 1800 pr_err("consider lowering receive_credit_max = %d\n", 1801 smb_direct_receive_credit_max); 1802 pr_err("Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n", 1803 device->attrs.max_cqe, device->attrs.max_qp_wr); 1804 return -EINVAL; 1805 } 1806 1807 if (device->attrs.max_send_sge < SMB_DIRECT_MAX_SEND_SGES) { 1808 pr_err("warning: device max_send_sge = %d too small\n", 1809 device->attrs.max_send_sge); 1810 return -EINVAL; 1811 } 1812 if (device->attrs.max_recv_sge < SMB_DIRECT_MAX_RECV_SGES) { 1813 pr_err("warning: device max_recv_sge = %d too small\n", 1814 device->attrs.max_recv_sge); 1815 return -EINVAL; 1816 } 1817 1818 t->recv_credits = 0; 1819 t->count_avail_recvmsg = 0; 1820 1821 t->recv_credit_max = smb_direct_receive_credit_max; 1822 t->recv_credit_target = 10; 1823 t->new_recv_credits = 0; 1824 1825 t->send_credit_target = smb_direct_send_credit_target; 1826 atomic_set(&t->send_credits, 0); 1827 atomic_set(&t->rw_credits, t->max_rw_credits); 1828 1829 t->max_send_size = smb_direct_max_send_size; 1830 t->max_recv_size = smb_direct_max_receive_size; 1831 t->max_fragmented_recv_size = smb_direct_max_fragmented_recv_size; 1832 1833 cap->max_send_wr = max_send_wrs; 1834 cap->max_recv_wr = t->recv_credit_max; 1835 cap->max_send_sge = SMB_DIRECT_MAX_SEND_SGES; 1836 cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES; 1837 cap->max_inline_data = 0; 1838 cap->max_rdma_ctxs = t->max_rw_credits; 1839 return 0; 1840 } 1841 1842 static void smb_direct_destroy_pools(struct smb_direct_transport *t) 1843 { 1844 struct smb_direct_recvmsg *recvmsg; 1845 1846 while ((recvmsg = get_free_recvmsg(t))) 1847 mempool_free(recvmsg, t->recvmsg_mempool); 1848 1849 mempool_destroy(t->recvmsg_mempool); 1850 t->recvmsg_mempool = NULL; 1851 1852 kmem_cache_destroy(t->recvmsg_cache); 1853 t->recvmsg_cache = NULL; 1854 1855 mempool_destroy(t->sendmsg_mempool); 1856 t->sendmsg_mempool = NULL; 1857 1858 kmem_cache_destroy(t->sendmsg_cache); 1859 t->sendmsg_cache = NULL; 1860 } 1861 1862 static int smb_direct_create_pools(struct smb_direct_transport *t) 1863 { 1864 char name[80]; 1865 int i; 1866 struct smb_direct_recvmsg *recvmsg; 1867 1868 snprintf(name, sizeof(name), "smb_direct_rqst_pool_%p", t); 1869 t->sendmsg_cache = kmem_cache_create(name, 1870 sizeof(struct smb_direct_sendmsg) + 1871 sizeof(struct smb_direct_negotiate_resp), 1872 0, SLAB_HWCACHE_ALIGN, NULL); 1873 if (!t->sendmsg_cache) 1874 return -ENOMEM; 1875 1876 t->sendmsg_mempool = mempool_create(t->send_credit_target, 1877 mempool_alloc_slab, mempool_free_slab, 1878 t->sendmsg_cache); 1879 if (!t->sendmsg_mempool) 1880 goto err; 1881 1882 snprintf(name, sizeof(name), "smb_direct_resp_%p", t); 1883 t->recvmsg_cache = kmem_cache_create(name, 1884 sizeof(struct smb_direct_recvmsg) + 1885 t->max_recv_size, 1886 0, SLAB_HWCACHE_ALIGN, NULL); 1887 if (!t->recvmsg_cache) 1888 goto err; 1889 1890 t->recvmsg_mempool = 1891 mempool_create(t->recv_credit_max, mempool_alloc_slab, 1892 mempool_free_slab, t->recvmsg_cache); 1893 if (!t->recvmsg_mempool) 1894 goto err; 1895 1896 INIT_LIST_HEAD(&t->recvmsg_queue); 1897 1898 for (i = 0; i < t->recv_credit_max; i++) { 1899 recvmsg = mempool_alloc(t->recvmsg_mempool, KSMBD_DEFAULT_GFP); 1900 if (!recvmsg) 1901 goto err; 1902 recvmsg->transport = t; 1903 recvmsg->sge.length = 0; 1904 list_add(&recvmsg->list, &t->recvmsg_queue); 1905 } 1906 t->count_avail_recvmsg = t->recv_credit_max; 1907 1908 return 0; 1909 err: 1910 smb_direct_destroy_pools(t); 1911 return -ENOMEM; 1912 } 1913 1914 static int smb_direct_create_qpair(struct smb_direct_transport *t, 1915 struct ib_qp_cap *cap) 1916 { 1917 int ret; 1918 struct ib_qp_init_attr qp_attr; 1919 int pages_per_rw; 1920 1921 t->pd = ib_alloc_pd(t->cm_id->device, 0); 1922 if (IS_ERR(t->pd)) { 1923 pr_err("Can't create RDMA PD\n"); 1924 ret = PTR_ERR(t->pd); 1925 t->pd = NULL; 1926 return ret; 1927 } 1928 1929 t->send_cq = ib_alloc_cq(t->cm_id->device, t, 1930 smb_direct_send_credit_target + cap->max_rdma_ctxs, 1931 0, IB_POLL_WORKQUEUE); 1932 if (IS_ERR(t->send_cq)) { 1933 pr_err("Can't create RDMA send CQ\n"); 1934 ret = PTR_ERR(t->send_cq); 1935 t->send_cq = NULL; 1936 goto err; 1937 } 1938 1939 t->recv_cq = ib_alloc_cq(t->cm_id->device, t, 1940 t->recv_credit_max, 0, IB_POLL_WORKQUEUE); 1941 if (IS_ERR(t->recv_cq)) { 1942 pr_err("Can't create RDMA recv CQ\n"); 1943 ret = PTR_ERR(t->recv_cq); 1944 t->recv_cq = NULL; 1945 goto err; 1946 } 1947 1948 memset(&qp_attr, 0, sizeof(qp_attr)); 1949 qp_attr.event_handler = smb_direct_qpair_handler; 1950 qp_attr.qp_context = t; 1951 qp_attr.cap = *cap; 1952 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 1953 qp_attr.qp_type = IB_QPT_RC; 1954 qp_attr.send_cq = t->send_cq; 1955 qp_attr.recv_cq = t->recv_cq; 1956 qp_attr.port_num = ~0; 1957 1958 ret = rdma_create_qp(t->cm_id, t->pd, &qp_attr); 1959 if (ret) { 1960 pr_err("Can't create RDMA QP: %d\n", ret); 1961 goto err; 1962 } 1963 1964 t->qp = t->cm_id->qp; 1965 t->cm_id->event_handler = smb_direct_cm_handler; 1966 1967 pages_per_rw = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1; 1968 if (pages_per_rw > t->cm_id->device->attrs.max_sgl_rd) { 1969 ret = ib_mr_pool_init(t->qp, &t->qp->rdma_mrs, 1970 t->max_rw_credits, IB_MR_TYPE_MEM_REG, 1971 t->pages_per_rw_credit, 0); 1972 if (ret) { 1973 pr_err("failed to init mr pool count %d pages %d\n", 1974 t->max_rw_credits, t->pages_per_rw_credit); 1975 goto err; 1976 } 1977 } 1978 1979 return 0; 1980 err: 1981 if (t->qp) { 1982 t->qp = NULL; 1983 rdma_destroy_qp(t->cm_id); 1984 } 1985 if (t->recv_cq) { 1986 ib_destroy_cq(t->recv_cq); 1987 t->recv_cq = NULL; 1988 } 1989 if (t->send_cq) { 1990 ib_destroy_cq(t->send_cq); 1991 t->send_cq = NULL; 1992 } 1993 if (t->pd) { 1994 ib_dealloc_pd(t->pd); 1995 t->pd = NULL; 1996 } 1997 return ret; 1998 } 1999 2000 static int smb_direct_prepare(struct ksmbd_transport *t) 2001 { 2002 struct smb_direct_transport *st = smb_trans_direct_transfort(t); 2003 struct smb_direct_recvmsg *recvmsg; 2004 struct smb_direct_negotiate_req *req; 2005 int ret; 2006 2007 ksmbd_debug(RDMA, "Waiting for SMB_DIRECT negotiate request\n"); 2008 ret = wait_event_interruptible_timeout(st->wait_status, 2009 st->negotiation_requested || 2010 st->status == SMB_DIRECT_CS_DISCONNECTED, 2011 SMB_DIRECT_NEGOTIATE_TIMEOUT * HZ); 2012 if (ret <= 0 || st->status == SMB_DIRECT_CS_DISCONNECTED) 2013 return ret < 0 ? ret : -ETIMEDOUT; 2014 2015 recvmsg = get_first_reassembly(st); 2016 if (!recvmsg) 2017 return -ECONNABORTED; 2018 2019 ret = smb_direct_check_recvmsg(recvmsg); 2020 if (ret == -ECONNABORTED) 2021 goto out; 2022 2023 req = (struct smb_direct_negotiate_req *)recvmsg->packet; 2024 st->max_recv_size = min_t(int, st->max_recv_size, 2025 le32_to_cpu(req->preferred_send_size)); 2026 st->max_send_size = min_t(int, st->max_send_size, 2027 le32_to_cpu(req->max_receive_size)); 2028 st->max_fragmented_send_size = 2029 le32_to_cpu(req->max_fragmented_size); 2030 st->max_fragmented_recv_size = 2031 (st->recv_credit_max * st->max_recv_size) / 2; 2032 2033 ret = smb_direct_send_negotiate_response(st, ret); 2034 out: 2035 spin_lock_irq(&st->reassembly_queue_lock); 2036 st->reassembly_queue_length--; 2037 list_del(&recvmsg->list); 2038 spin_unlock_irq(&st->reassembly_queue_lock); 2039 put_recvmsg(st, recvmsg); 2040 2041 return ret; 2042 } 2043 2044 static int smb_direct_connect(struct smb_direct_transport *st) 2045 { 2046 int ret; 2047 struct ib_qp_cap qp_cap; 2048 2049 ret = smb_direct_init_params(st, &qp_cap); 2050 if (ret) { 2051 pr_err("Can't configure RDMA parameters\n"); 2052 return ret; 2053 } 2054 2055 ret = smb_direct_create_pools(st); 2056 if (ret) { 2057 pr_err("Can't init RDMA pool: %d\n", ret); 2058 return ret; 2059 } 2060 2061 ret = smb_direct_create_qpair(st, &qp_cap); 2062 if (ret) { 2063 pr_err("Can't accept RDMA client: %d\n", ret); 2064 return ret; 2065 } 2066 2067 ret = smb_direct_prepare_negotiation(st); 2068 if (ret) { 2069 pr_err("Can't negotiate: %d\n", ret); 2070 return ret; 2071 } 2072 return 0; 2073 } 2074 2075 static bool rdma_frwr_is_supported(struct ib_device_attr *attrs) 2076 { 2077 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) 2078 return false; 2079 if (attrs->max_fast_reg_page_list_len == 0) 2080 return false; 2081 return true; 2082 } 2083 2084 static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id) 2085 { 2086 struct smb_direct_transport *t; 2087 struct task_struct *handler; 2088 int ret; 2089 2090 if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) { 2091 ksmbd_debug(RDMA, 2092 "Fast Registration Work Requests is not supported. device capabilities=%llx\n", 2093 new_cm_id->device->attrs.device_cap_flags); 2094 return -EPROTONOSUPPORT; 2095 } 2096 2097 t = alloc_transport(new_cm_id); 2098 if (!t) 2099 return -ENOMEM; 2100 2101 ret = smb_direct_connect(t); 2102 if (ret) 2103 goto out_err; 2104 2105 handler = kthread_run(ksmbd_conn_handler_loop, 2106 KSMBD_TRANS(t)->conn, "ksmbd:r%u", 2107 smb_direct_port); 2108 if (IS_ERR(handler)) { 2109 ret = PTR_ERR(handler); 2110 pr_err("Can't start thread\n"); 2111 goto out_err; 2112 } 2113 2114 return 0; 2115 out_err: 2116 free_transport(t); 2117 return ret; 2118 } 2119 2120 static int smb_direct_listen_handler(struct rdma_cm_id *cm_id, 2121 struct rdma_cm_event *event) 2122 { 2123 switch (event->event) { 2124 case RDMA_CM_EVENT_CONNECT_REQUEST: { 2125 int ret = smb_direct_handle_connect_request(cm_id); 2126 2127 if (ret) { 2128 pr_err("Can't create transport: %d\n", ret); 2129 return ret; 2130 } 2131 2132 ksmbd_debug(RDMA, "Received connection request. cm_id=%p\n", 2133 cm_id); 2134 break; 2135 } 2136 default: 2137 pr_err("Unexpected listen event. cm_id=%p, event=%s (%d)\n", 2138 cm_id, rdma_event_msg(event->event), event->event); 2139 break; 2140 } 2141 return 0; 2142 } 2143 2144 static int smb_direct_listen(int port) 2145 { 2146 int ret; 2147 struct rdma_cm_id *cm_id; 2148 struct sockaddr_in sin = { 2149 .sin_family = AF_INET, 2150 .sin_addr.s_addr = htonl(INADDR_ANY), 2151 .sin_port = htons(port), 2152 }; 2153 2154 cm_id = rdma_create_id(&init_net, smb_direct_listen_handler, 2155 &smb_direct_listener, RDMA_PS_TCP, IB_QPT_RC); 2156 if (IS_ERR(cm_id)) { 2157 pr_err("Can't create cm id: %ld\n", PTR_ERR(cm_id)); 2158 return PTR_ERR(cm_id); 2159 } 2160 2161 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); 2162 if (ret) { 2163 pr_err("Can't bind: %d\n", ret); 2164 goto err; 2165 } 2166 2167 smb_direct_listener.cm_id = cm_id; 2168 2169 ret = rdma_listen(cm_id, 10); 2170 if (ret) { 2171 pr_err("Can't listen: %d\n", ret); 2172 goto err; 2173 } 2174 return 0; 2175 err: 2176 smb_direct_listener.cm_id = NULL; 2177 rdma_destroy_id(cm_id); 2178 return ret; 2179 } 2180 2181 static int smb_direct_ib_client_add(struct ib_device *ib_dev) 2182 { 2183 struct smb_direct_device *smb_dev; 2184 2185 /* Set 5445 port if device type is iWARP(No IB) */ 2186 if (ib_dev->node_type != RDMA_NODE_IB_CA) 2187 smb_direct_port = SMB_DIRECT_PORT_IWARP; 2188 2189 if (!rdma_frwr_is_supported(&ib_dev->attrs)) 2190 return 0; 2191 2192 smb_dev = kzalloc(sizeof(*smb_dev), KSMBD_DEFAULT_GFP); 2193 if (!smb_dev) 2194 return -ENOMEM; 2195 smb_dev->ib_dev = ib_dev; 2196 2197 write_lock(&smb_direct_device_lock); 2198 list_add(&smb_dev->list, &smb_direct_device_list); 2199 write_unlock(&smb_direct_device_lock); 2200 2201 ksmbd_debug(RDMA, "ib device added: name %s\n", ib_dev->name); 2202 return 0; 2203 } 2204 2205 static void smb_direct_ib_client_remove(struct ib_device *ib_dev, 2206 void *client_data) 2207 { 2208 struct smb_direct_device *smb_dev, *tmp; 2209 2210 write_lock(&smb_direct_device_lock); 2211 list_for_each_entry_safe(smb_dev, tmp, &smb_direct_device_list, list) { 2212 if (smb_dev->ib_dev == ib_dev) { 2213 list_del(&smb_dev->list); 2214 kfree(smb_dev); 2215 break; 2216 } 2217 } 2218 write_unlock(&smb_direct_device_lock); 2219 } 2220 2221 static struct ib_client smb_direct_ib_client = { 2222 .name = "ksmbd_smb_direct_ib", 2223 .add = smb_direct_ib_client_add, 2224 .remove = smb_direct_ib_client_remove, 2225 }; 2226 2227 int ksmbd_rdma_init(void) 2228 { 2229 int ret; 2230 2231 smb_direct_listener.cm_id = NULL; 2232 2233 ret = ib_register_client(&smb_direct_ib_client); 2234 if (ret) { 2235 pr_err("failed to ib_register_client\n"); 2236 return ret; 2237 } 2238 2239 /* When a client is running out of send credits, the credits are 2240 * granted by the server's sending a packet using this queue. 2241 * This avoids the situation that a clients cannot send packets 2242 * for lack of credits 2243 */ 2244 smb_direct_wq = alloc_workqueue("ksmbd-smb_direct-wq", 2245 WQ_HIGHPRI | WQ_MEM_RECLAIM, 0); 2246 if (!smb_direct_wq) 2247 return -ENOMEM; 2248 2249 ret = smb_direct_listen(smb_direct_port); 2250 if (ret) { 2251 destroy_workqueue(smb_direct_wq); 2252 smb_direct_wq = NULL; 2253 pr_err("Can't listen: %d\n", ret); 2254 return ret; 2255 } 2256 2257 ksmbd_debug(RDMA, "init RDMA listener. cm_id=%p\n", 2258 smb_direct_listener.cm_id); 2259 return 0; 2260 } 2261 2262 void ksmbd_rdma_stop_listening(void) 2263 { 2264 if (!smb_direct_listener.cm_id) 2265 return; 2266 2267 ib_unregister_client(&smb_direct_ib_client); 2268 rdma_destroy_id(smb_direct_listener.cm_id); 2269 2270 smb_direct_listener.cm_id = NULL; 2271 } 2272 2273 void ksmbd_rdma_destroy(void) 2274 { 2275 if (smb_direct_wq) { 2276 destroy_workqueue(smb_direct_wq); 2277 smb_direct_wq = NULL; 2278 } 2279 } 2280 2281 bool ksmbd_rdma_capable_netdev(struct net_device *netdev) 2282 { 2283 struct smb_direct_device *smb_dev; 2284 int i; 2285 bool rdma_capable = false; 2286 2287 read_lock(&smb_direct_device_lock); 2288 list_for_each_entry(smb_dev, &smb_direct_device_list, list) { 2289 for (i = 0; i < smb_dev->ib_dev->phys_port_cnt; i++) { 2290 struct net_device *ndev; 2291 2292 ndev = ib_device_get_netdev(smb_dev->ib_dev, i + 1); 2293 if (!ndev) 2294 continue; 2295 2296 if (ndev == netdev) { 2297 dev_put(ndev); 2298 rdma_capable = true; 2299 goto out; 2300 } 2301 dev_put(ndev); 2302 } 2303 } 2304 out: 2305 read_unlock(&smb_direct_device_lock); 2306 2307 if (rdma_capable == false) { 2308 struct ib_device *ibdev; 2309 2310 ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN); 2311 if (ibdev) { 2312 rdma_capable = rdma_frwr_is_supported(&ibdev->attrs); 2313 ib_device_put(ibdev); 2314 } 2315 } 2316 2317 ksmbd_debug(RDMA, "netdev(%s) rdma capable : %s\n", 2318 netdev->name, str_true_false(rdma_capable)); 2319 2320 return rdma_capable; 2321 } 2322 2323 static const struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops = { 2324 .prepare = smb_direct_prepare, 2325 .disconnect = smb_direct_disconnect, 2326 .shutdown = smb_direct_shutdown, 2327 .writev = smb_direct_writev, 2328 .read = smb_direct_read, 2329 .rdma_read = smb_direct_rdma_read, 2330 .rdma_write = smb_direct_rdma_write, 2331 .free_transport = smb_direct_free_transport, 2332 }; 2333