1 /* 2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved. 3 * Copyright (c) 2006-2009 Open Grid Computing, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <linux/module.h> 38 #include <linux/moduleparam.h> 39 #include <linux/slab.h> 40 #include <linux/err.h> 41 #include <linux/string.h> 42 #include <linux/list.h> 43 #include <linux/in.h> 44 #include <linux/device.h> 45 #include <linux/pci.h> 46 #include <linux/sched.h> 47 48 #include <asm/atomic.h> 49 50 #include <rdma/ib_verbs.h> 51 #include <rdma/rdma_cm.h> 52 53 #include "krping.h" 54 #include "getopt.h" 55 56 extern int krping_debug; 57 #define DEBUG_LOG(cb, x...) if (krping_debug) log(LOG_INFO, x) 58 #define PRINTF(cb, x...) log(LOG_INFO, x) 59 #define BIND_INFO 1 60 61 MODULE_AUTHOR("Steve Wise"); 62 MODULE_DESCRIPTION("RDMA ping client/server"); 63 MODULE_LICENSE("Dual BSD/GPL"); 64 MODULE_VERSION(krping, 1); 65 MODULE_DEPEND(krping, linuxkpi, 1, 1, 1); 66 67 static __inline uint64_t 68 get_cycles(void) 69 { 70 uint32_t low, high; 71 __asm __volatile("rdtsc" : "=a" (low), "=d" (high)); 72 return (low | ((u_int64_t)high << 32)); 73 } 74 75 typedef uint64_t cycles_t; 76 77 enum mem_type { 78 DMA = 1, 79 FASTREG = 2, 80 MW = 3, 81 MR = 4 82 }; 83 84 static const struct krping_option krping_opts[] = { 85 {"count", OPT_INT, 'C'}, 86 {"size", OPT_INT, 'S'}, 87 {"addr", OPT_STRING, 'a'}, 88 {"port", OPT_INT, 'p'}, 89 {"verbose", OPT_NOPARAM, 'v'}, 90 {"validate", OPT_NOPARAM, 'V'}, 91 {"server", OPT_NOPARAM, 's'}, 92 {"client", OPT_NOPARAM, 'c'}, 93 {"mem_mode", OPT_STRING, 'm'}, 94 {"server_inv", OPT_NOPARAM, 'I'}, 95 {"wlat", OPT_NOPARAM, 'l'}, 96 {"rlat", OPT_NOPARAM, 'L'}, 97 {"bw", OPT_NOPARAM, 'B'}, 98 {"duplex", OPT_NOPARAM, 'd'}, 99 {"txdepth", OPT_INT, 'T'}, 100 {"poll", OPT_NOPARAM, 'P'}, 101 {"local_dma_lkey", OPT_NOPARAM, 'Z'}, 102 {"read_inv", OPT_NOPARAM, 'R'}, 103 {"fr", OPT_INT, 'f'}, 104 {NULL, 0, 0} 105 }; 106 107 #define htonll(x) cpu_to_be64((x)) 108 #define ntohll(x) cpu_to_be64((x)) 109 110 static struct mutex krping_mutex; 111 112 /* 113 * List of running krping threads. 114 */ 115 static LIST_HEAD(krping_cbs); 116 117 /* 118 * krping "ping/pong" loop: 119 * client sends source rkey/addr/len 120 * server receives source rkey/add/len 121 * server rdma reads "ping" data from source 122 * server sends "go ahead" on rdma read completion 123 * client sends sink rkey/addr/len 124 * server receives sink rkey/addr/len 125 * server rdma writes "pong" data to sink 126 * server sends "go ahead" on rdma write completion 127 * <repeat loop> 128 */ 129 130 /* 131 * These states are used to signal events between the completion handler 132 * and the main client or server thread. 133 * 134 * Once CONNECTED, they cycle through RDMA_READ_ADV, RDMA_WRITE_ADV, 135 * and RDMA_WRITE_COMPLETE for each ping. 136 */ 137 enum test_state { 138 IDLE = 1, 139 CONNECT_REQUEST, 140 ADDR_RESOLVED, 141 ROUTE_RESOLVED, 142 CONNECTED, 143 RDMA_READ_ADV, 144 RDMA_READ_COMPLETE, 145 RDMA_WRITE_ADV, 146 RDMA_WRITE_COMPLETE, 147 ERROR 148 }; 149 150 struct krping_rdma_info { 151 uint64_t buf; 152 uint32_t rkey; 153 uint32_t size; 154 }; 155 156 /* 157 * Default max buffer size for IO... 158 */ 159 #define RPING_BUFSIZE 128*1024 160 #define RPING_SQ_DEPTH 64 161 162 /* 163 * Control block struct. 164 */ 165 struct krping_cb { 166 void *cookie; 167 int server; /* 0 iff client */ 168 struct ib_cq *cq; 169 struct ib_pd *pd; 170 struct ib_qp *qp; 171 172 enum mem_type mem; 173 struct ib_mr *dma_mr; 174 175 struct ib_fast_reg_page_list *page_list; 176 int page_list_len; 177 struct ib_send_wr fastreg_wr; 178 struct ib_send_wr invalidate_wr; 179 struct ib_mr *fastreg_mr; 180 int server_invalidate; 181 int read_inv; 182 u8 key; 183 184 struct ib_mw *mw; 185 struct ib_mw_bind bind_attr; 186 187 struct ib_recv_wr rq_wr; /* recv work request record */ 188 struct ib_sge recv_sgl; /* recv single SGE */ 189 struct krping_rdma_info recv_buf;/* malloc'd buffer */ 190 u64 recv_dma_addr; 191 DECLARE_PCI_UNMAP_ADDR(recv_mapping) 192 struct ib_mr *recv_mr; 193 194 struct ib_send_wr sq_wr; /* send work requrest record */ 195 struct ib_sge send_sgl; 196 struct krping_rdma_info send_buf;/* single send buf */ 197 u64 send_dma_addr; 198 DECLARE_PCI_UNMAP_ADDR(send_mapping) 199 struct ib_mr *send_mr; 200 201 struct ib_send_wr rdma_sq_wr; /* rdma work request record */ 202 struct ib_sge rdma_sgl; /* rdma single SGE */ 203 char *rdma_buf; /* used as rdma sink */ 204 u64 rdma_dma_addr; 205 DECLARE_PCI_UNMAP_ADDR(rdma_mapping) 206 struct ib_mr *rdma_mr; 207 208 uint32_t remote_rkey; /* remote guys RKEY */ 209 uint64_t remote_addr; /* remote guys TO */ 210 uint32_t remote_len; /* remote guys LEN */ 211 212 char *start_buf; /* rdma read src */ 213 u64 start_dma_addr; 214 DECLARE_PCI_UNMAP_ADDR(start_mapping) 215 struct ib_mr *start_mr; 216 217 enum test_state state; /* used for cond/signalling */ 218 wait_queue_head_t sem; 219 struct krping_stats stats; 220 221 uint16_t port; /* dst port in NBO */ 222 struct in_addr addr; /* dst addr in NBO */ 223 char *addr_str; /* dst addr string */ 224 int verbose; /* verbose logging */ 225 int count; /* ping count */ 226 int size; /* ping data size */ 227 int validate; /* validate ping data */ 228 int wlat; /* run wlat test */ 229 int rlat; /* run rlat test */ 230 int bw; /* run bw test */ 231 int duplex; /* run bw full duplex test */ 232 int poll; /* poll or block for rlat test */ 233 int txdepth; /* SQ depth */ 234 int local_dma_lkey; /* use 0 for lkey */ 235 int frtest; /* fastreg test */ 236 int testnum; 237 238 /* CM stuff */ 239 struct rdma_cm_id *cm_id; /* connection on client side,*/ 240 /* listener on server side. */ 241 struct rdma_cm_id *child_cm_id; /* connection on server side */ 242 struct list_head list; 243 }; 244 245 static int krping_cma_event_handler(struct rdma_cm_id *cma_id, 246 struct rdma_cm_event *event) 247 { 248 int ret; 249 struct krping_cb *cb = cma_id->context; 250 251 DEBUG_LOG(cb, "cma_event type %d cma_id %p (%s)\n", event->event, 252 cma_id, (cma_id == cb->cm_id) ? "parent" : "child"); 253 254 switch (event->event) { 255 case RDMA_CM_EVENT_ADDR_RESOLVED: 256 cb->state = ADDR_RESOLVED; 257 ret = rdma_resolve_route(cma_id, 2000); 258 if (ret) { 259 PRINTF(cb, "rdma_resolve_route error %d\n", ret); 260 wake_up_interruptible(&cb->sem); 261 } 262 break; 263 264 case RDMA_CM_EVENT_ROUTE_RESOLVED: 265 cb->state = ROUTE_RESOLVED; 266 cb->child_cm_id = cma_id; 267 wake_up_interruptible(&cb->sem); 268 break; 269 270 case RDMA_CM_EVENT_CONNECT_REQUEST: 271 if (cb->state == IDLE) { 272 cb->state = CONNECT_REQUEST; 273 cb->child_cm_id = cma_id; 274 } else { 275 PRINTF(cb, "Received connection request in wrong state" 276 " (%d)\n", cb->state); 277 } 278 DEBUG_LOG(cb, "child cma %p\n", cb->child_cm_id); 279 wake_up_interruptible(&cb->sem); 280 break; 281 282 case RDMA_CM_EVENT_ESTABLISHED: 283 DEBUG_LOG(cb, "ESTABLISHED\n"); 284 if (!cb->server) { 285 cb->state = CONNECTED; 286 } 287 wake_up_interruptible(&cb->sem); 288 break; 289 290 case RDMA_CM_EVENT_ADDR_ERROR: 291 case RDMA_CM_EVENT_ROUTE_ERROR: 292 case RDMA_CM_EVENT_CONNECT_ERROR: 293 case RDMA_CM_EVENT_UNREACHABLE: 294 case RDMA_CM_EVENT_REJECTED: 295 PRINTF(cb, "cma event %d, error %d\n", event->event, 296 event->status); 297 cb->state = ERROR; 298 wake_up_interruptible(&cb->sem); 299 break; 300 301 case RDMA_CM_EVENT_DISCONNECTED: 302 PRINTF(cb, "DISCONNECT EVENT...\n"); 303 cb->state = ERROR; 304 wake_up_interruptible(&cb->sem); 305 break; 306 307 case RDMA_CM_EVENT_DEVICE_REMOVAL: 308 PRINTF(cb, "cma detected device removal!!!!\n"); 309 break; 310 311 default: 312 PRINTF(cb, "oof bad type!\n"); 313 wake_up_interruptible(&cb->sem); 314 break; 315 } 316 return 0; 317 } 318 319 static int server_recv(struct krping_cb *cb, struct ib_wc *wc) 320 { 321 if (wc->byte_len != sizeof(cb->recv_buf)) { 322 PRINTF(cb, "Received bogus data, size %d\n", 323 wc->byte_len); 324 return -1; 325 } 326 327 cb->remote_rkey = ntohl(cb->recv_buf.rkey); 328 cb->remote_addr = ntohll(cb->recv_buf.buf); 329 cb->remote_len = ntohl(cb->recv_buf.size); 330 DEBUG_LOG(cb, "Received rkey %x addr %llx len %d from peer\n", 331 cb->remote_rkey, (unsigned long long)cb->remote_addr, 332 cb->remote_len); 333 334 if (cb->state <= CONNECTED || cb->state == RDMA_WRITE_COMPLETE) 335 cb->state = RDMA_READ_ADV; 336 else 337 cb->state = RDMA_WRITE_ADV; 338 339 return 0; 340 } 341 342 static int client_recv(struct krping_cb *cb, struct ib_wc *wc) 343 { 344 if (wc->byte_len != sizeof(cb->recv_buf)) { 345 PRINTF(cb, "Received bogus data, size %d\n", 346 wc->byte_len); 347 return -1; 348 } 349 350 if (cb->state == RDMA_READ_ADV) 351 cb->state = RDMA_WRITE_ADV; 352 else 353 cb->state = RDMA_WRITE_COMPLETE; 354 355 return 0; 356 } 357 358 static void krping_cq_event_handler(struct ib_cq *cq, void *ctx) 359 { 360 struct krping_cb *cb = ctx; 361 struct ib_wc wc; 362 struct ib_recv_wr *bad_wr; 363 int ret; 364 365 BUG_ON(cb->cq != cq); 366 if (cb->state == ERROR) { 367 PRINTF(cb, "cq completion in ERROR state\n"); 368 return; 369 } 370 if (!cb->wlat && !cb->rlat && !cb->bw && !cb->frtest) 371 ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP); 372 while ((ret = ib_poll_cq(cb->cq, 1, &wc)) == 1) { 373 if (wc.status) { 374 if (wc.status == IB_WC_WR_FLUSH_ERR) { 375 DEBUG_LOG(cb, "cq flushed\n"); 376 continue; 377 } else { 378 PRINTF(cb, "cq completion failed with " 379 "wr_id %jx status %d opcode %d vender_err %x\n", 380 (uintmax_t)wc.wr_id, wc.status, wc.opcode, wc.vendor_err); 381 goto error; 382 } 383 } 384 385 switch (wc.opcode) { 386 case IB_WC_SEND: 387 DEBUG_LOG(cb, "send completion\n"); 388 cb->stats.send_bytes += cb->send_sgl.length; 389 cb->stats.send_msgs++; 390 break; 391 392 case IB_WC_RDMA_WRITE: 393 DEBUG_LOG(cb, "rdma write completion\n"); 394 cb->stats.write_bytes += cb->rdma_sq_wr.sg_list->length; 395 cb->stats.write_msgs++; 396 cb->state = RDMA_WRITE_COMPLETE; 397 wake_up_interruptible(&cb->sem); 398 break; 399 400 case IB_WC_RDMA_READ: 401 DEBUG_LOG(cb, "rdma read completion\n"); 402 cb->stats.read_bytes += cb->rdma_sq_wr.sg_list->length; 403 cb->stats.read_msgs++; 404 cb->state = RDMA_READ_COMPLETE; 405 wake_up_interruptible(&cb->sem); 406 break; 407 408 case IB_WC_RECV: 409 DEBUG_LOG(cb, "recv completion\n"); 410 cb->stats.recv_bytes += sizeof(cb->recv_buf); 411 cb->stats.recv_msgs++; 412 if (cb->wlat || cb->rlat || cb->bw || cb->frtest) 413 ret = server_recv(cb, &wc); 414 else 415 ret = cb->server ? server_recv(cb, &wc) : 416 client_recv(cb, &wc); 417 if (ret) { 418 PRINTF(cb, "recv wc error: %d\n", ret); 419 goto error; 420 } 421 422 ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr); 423 if (ret) { 424 PRINTF(cb, "post recv error: %d\n", 425 ret); 426 goto error; 427 } 428 wake_up_interruptible(&cb->sem); 429 break; 430 431 default: 432 PRINTF(cb, 433 "%s:%d Unexpected opcode %d, Shutting down\n", 434 __func__, __LINE__, wc.opcode); 435 goto error; 436 } 437 } 438 if (ret) { 439 PRINTF(cb, "poll error %d\n", ret); 440 goto error; 441 } 442 return; 443 error: 444 cb->state = ERROR; 445 wake_up_interruptible(&cb->sem); 446 } 447 448 static int krping_accept(struct krping_cb *cb) 449 { 450 struct rdma_conn_param conn_param; 451 int ret; 452 453 DEBUG_LOG(cb, "accepting client connection request\n"); 454 455 memset(&conn_param, 0, sizeof conn_param); 456 conn_param.responder_resources = 1; 457 conn_param.initiator_depth = 1; 458 459 ret = rdma_accept(cb->child_cm_id, &conn_param); 460 if (ret) { 461 PRINTF(cb, "rdma_accept error: %d\n", ret); 462 return ret; 463 } 464 465 if (!cb->wlat && !cb->rlat && !cb->bw && !cb->frtest) { 466 wait_event_interruptible(cb->sem, cb->state >= CONNECTED); 467 if (cb->state == ERROR) { 468 PRINTF(cb, "wait for CONNECTED state %d\n", 469 cb->state); 470 return -1; 471 } 472 } 473 return 0; 474 } 475 476 static void krping_setup_wr(struct krping_cb *cb) 477 { 478 cb->recv_sgl.addr = cb->recv_dma_addr; 479 cb->recv_sgl.length = sizeof cb->recv_buf; 480 if (cb->local_dma_lkey) 481 cb->recv_sgl.lkey = cb->qp->device->local_dma_lkey; 482 else if (cb->mem == DMA) 483 cb->recv_sgl.lkey = cb->dma_mr->lkey; 484 else 485 cb->recv_sgl.lkey = cb->recv_mr->lkey; 486 cb->rq_wr.sg_list = &cb->recv_sgl; 487 cb->rq_wr.num_sge = 1; 488 489 cb->send_sgl.addr = cb->send_dma_addr; 490 cb->send_sgl.length = sizeof cb->send_buf; 491 if (cb->local_dma_lkey) 492 cb->send_sgl.lkey = cb->qp->device->local_dma_lkey; 493 else if (cb->mem == DMA) 494 cb->send_sgl.lkey = cb->dma_mr->lkey; 495 else 496 cb->send_sgl.lkey = cb->send_mr->lkey; 497 498 cb->sq_wr.opcode = IB_WR_SEND; 499 cb->sq_wr.send_flags = IB_SEND_SIGNALED; 500 cb->sq_wr.sg_list = &cb->send_sgl; 501 cb->sq_wr.num_sge = 1; 502 503 if (cb->server || cb->wlat || cb->rlat || cb->bw || cb->frtest) { 504 cb->rdma_sgl.addr = cb->rdma_dma_addr; 505 if (cb->mem == MR) 506 cb->rdma_sgl.lkey = cb->rdma_mr->lkey; 507 cb->rdma_sq_wr.send_flags = IB_SEND_SIGNALED; 508 cb->rdma_sq_wr.sg_list = &cb->rdma_sgl; 509 cb->rdma_sq_wr.num_sge = 1; 510 } 511 512 switch(cb->mem) { 513 case FASTREG: 514 515 /* 516 * A chain of 2 WRs, INVALDATE_MR + FAST_REG_MR. 517 * both unsignaled. The client uses them to reregister 518 * the rdma buffers with a new key each iteration. 519 */ 520 cb->fastreg_wr.opcode = IB_WR_FAST_REG_MR; 521 cb->fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT; 522 cb->fastreg_wr.wr.fast_reg.length = cb->size; 523 cb->fastreg_wr.wr.fast_reg.page_list = cb->page_list; 524 cb->fastreg_wr.wr.fast_reg.page_list_len = cb->page_list_len; 525 526 cb->invalidate_wr.next = &cb->fastreg_wr; 527 cb->invalidate_wr.opcode = IB_WR_LOCAL_INV; 528 break; 529 case MW: 530 cb->bind_attr.wr_id = 0xabbaabba; 531 cb->bind_attr.send_flags = 0; /* unsignaled */ 532 #ifdef BIND_INFO 533 cb->bind_attr.bind_info.length = cb->size; 534 #else 535 cb->bind_attr.length = cb->size; 536 #endif 537 break; 538 default: 539 break; 540 } 541 } 542 543 static int krping_setup_buffers(struct krping_cb *cb) 544 { 545 int ret; 546 struct ib_phys_buf buf; 547 u64 iovbase; 548 549 DEBUG_LOG(cb, "krping_setup_buffers called on cb %p\n", cb); 550 551 cb->recv_dma_addr = dma_map_single(cb->pd->device->dma_device, 552 &cb->recv_buf, 553 sizeof(cb->recv_buf), DMA_BIDIRECTIONAL); 554 pci_unmap_addr_set(cb, recv_mapping, cb->recv_dma_addr); 555 cb->send_dma_addr = dma_map_single(cb->pd->device->dma_device, 556 &cb->send_buf, sizeof(cb->send_buf), 557 DMA_BIDIRECTIONAL); 558 pci_unmap_addr_set(cb, send_mapping, cb->send_dma_addr); 559 560 if (cb->mem == DMA) { 561 cb->dma_mr = ib_get_dma_mr(cb->pd, IB_ACCESS_LOCAL_WRITE| 562 IB_ACCESS_REMOTE_READ| 563 IB_ACCESS_REMOTE_WRITE); 564 if (IS_ERR(cb->dma_mr)) { 565 DEBUG_LOG(cb, "reg_dmamr failed\n"); 566 ret = PTR_ERR(cb->dma_mr); 567 goto bail; 568 } 569 } else { 570 if (!cb->local_dma_lkey) { 571 buf.addr = cb->recv_dma_addr; 572 buf.size = sizeof cb->recv_buf; 573 DEBUG_LOG(cb, "recv buf dma_addr %jx size %d\n", 574 (uintmax_t)buf.addr, (int)buf.size); 575 iovbase = cb->recv_dma_addr; 576 cb->recv_mr = ib_reg_phys_mr(cb->pd, &buf, 1, 577 IB_ACCESS_LOCAL_WRITE, 578 &iovbase); 579 580 if (IS_ERR(cb->recv_mr)) { 581 DEBUG_LOG(cb, "recv_buf reg_mr failed\n"); 582 ret = PTR_ERR(cb->recv_mr); 583 goto bail; 584 } 585 586 buf.addr = cb->send_dma_addr; 587 buf.size = sizeof cb->send_buf; 588 DEBUG_LOG(cb, "send buf dma_addr %jx size %d\n", 589 (uintmax_t)buf.addr, (int)buf.size); 590 iovbase = cb->send_dma_addr; 591 cb->send_mr = ib_reg_phys_mr(cb->pd, &buf, 1, 592 0, &iovbase); 593 594 if (IS_ERR(cb->send_mr)) { 595 DEBUG_LOG(cb, "send_buf reg_mr failed\n"); 596 ret = PTR_ERR(cb->send_mr); 597 goto bail; 598 } 599 } 600 } 601 602 cb->rdma_buf = kmalloc(cb->size, GFP_KERNEL); 603 if (!cb->rdma_buf) { 604 DEBUG_LOG(cb, "rdma_buf malloc failed\n"); 605 ret = -ENOMEM; 606 goto bail; 607 } 608 609 cb->rdma_dma_addr = dma_map_single(cb->pd->device->dma_device, 610 cb->rdma_buf, cb->size, 611 DMA_BIDIRECTIONAL); 612 pci_unmap_addr_set(cb, rdma_mapping, cb->rdma_dma_addr); 613 if (cb->mem != DMA) { 614 switch (cb->mem) { 615 case FASTREG: 616 cb->page_list_len = (((cb->size - 1) & PAGE_MASK) + 617 PAGE_SIZE) >> PAGE_SHIFT; 618 cb->page_list = ib_alloc_fast_reg_page_list( 619 cb->pd->device, 620 cb->page_list_len); 621 if (IS_ERR(cb->page_list)) { 622 DEBUG_LOG(cb, "recv_buf reg_mr failed\n"); 623 ret = PTR_ERR(cb->page_list); 624 goto bail; 625 } 626 cb->fastreg_mr = ib_alloc_fast_reg_mr(cb->pd, 627 cb->page_list->max_page_list_len); 628 if (IS_ERR(cb->fastreg_mr)) { 629 DEBUG_LOG(cb, "recv_buf reg_mr failed\n"); 630 ret = PTR_ERR(cb->fastreg_mr); 631 goto bail; 632 } 633 DEBUG_LOG(cb, "fastreg rkey 0x%x page_list %p" 634 " page_list_len %u\n", cb->fastreg_mr->rkey, 635 cb->page_list, cb->page_list_len); 636 break; 637 case MW: 638 cb->mw = ib_alloc_mw(cb->pd,IB_MW_TYPE_1); 639 if (IS_ERR(cb->mw)) { 640 DEBUG_LOG(cb, "recv_buf alloc_mw failed\n"); 641 ret = PTR_ERR(cb->mw); 642 goto bail; 643 } 644 DEBUG_LOG(cb, "mw rkey 0x%x\n", cb->mw->rkey); 645 /*FALLTHROUGH*/ 646 case MR: 647 buf.addr = cb->rdma_dma_addr; 648 buf.size = cb->size; 649 iovbase = cb->rdma_dma_addr; 650 cb->rdma_mr = ib_reg_phys_mr(cb->pd, &buf, 1, 651 IB_ACCESS_LOCAL_WRITE| 652 IB_ACCESS_REMOTE_READ| 653 IB_ACCESS_REMOTE_WRITE, 654 &iovbase); 655 if (IS_ERR(cb->rdma_mr)) { 656 DEBUG_LOG(cb, "rdma_buf reg_mr failed\n"); 657 ret = PTR_ERR(cb->rdma_mr); 658 goto bail; 659 } 660 DEBUG_LOG(cb, "rdma buf dma_addr %jx size %d mr rkey 0x%x\n", 661 (uintmax_t)buf.addr, (int)buf.size, cb->rdma_mr->rkey); 662 break; 663 default: 664 ret = -EINVAL; 665 goto bail; 666 break; 667 } 668 } 669 670 if (!cb->server || cb->wlat || cb->rlat || cb->bw || cb->frtest) { 671 672 cb->start_buf = kmalloc(cb->size, GFP_KERNEL); 673 if (!cb->start_buf) { 674 DEBUG_LOG(cb, "start_buf malloc failed\n"); 675 ret = -ENOMEM; 676 goto bail; 677 } 678 679 cb->start_dma_addr = dma_map_single(cb->pd->device->dma_device, 680 cb->start_buf, cb->size, 681 DMA_BIDIRECTIONAL); 682 pci_unmap_addr_set(cb, start_mapping, cb->start_dma_addr); 683 684 if (cb->mem == MR || cb->mem == MW) { 685 unsigned flags = IB_ACCESS_REMOTE_READ; 686 687 if (cb->wlat || cb->rlat || cb->bw || cb->frtest) { 688 flags |= IB_ACCESS_LOCAL_WRITE | 689 IB_ACCESS_REMOTE_WRITE; 690 } 691 692 buf.addr = cb->start_dma_addr; 693 buf.size = cb->size; 694 DEBUG_LOG(cb, "start buf dma_addr %jx size %d\n", 695 (uintmax_t)buf.addr, (int)buf.size); 696 iovbase = cb->start_dma_addr; 697 cb->start_mr = ib_reg_phys_mr(cb->pd, &buf, 1, 698 flags, 699 &iovbase); 700 701 if (IS_ERR(cb->start_mr)) { 702 DEBUG_LOG(cb, "start_buf reg_mr failed\n"); 703 ret = PTR_ERR(cb->start_mr); 704 goto bail; 705 } 706 } 707 } 708 709 krping_setup_wr(cb); 710 DEBUG_LOG(cb, "allocated & registered buffers...\n"); 711 return 0; 712 bail: 713 if (cb->fastreg_mr && !IS_ERR(cb->fastreg_mr)) 714 ib_dereg_mr(cb->fastreg_mr); 715 if (cb->mw && !IS_ERR(cb->mw)) 716 ib_dealloc_mw(cb->mw); 717 if (cb->rdma_mr && !IS_ERR(cb->rdma_mr)) 718 ib_dereg_mr(cb->rdma_mr); 719 if (cb->page_list && !IS_ERR(cb->page_list)) 720 ib_free_fast_reg_page_list(cb->page_list); 721 if (cb->dma_mr && !IS_ERR(cb->dma_mr)) 722 ib_dereg_mr(cb->dma_mr); 723 if (cb->recv_mr && !IS_ERR(cb->recv_mr)) 724 ib_dereg_mr(cb->recv_mr); 725 if (cb->send_mr && !IS_ERR(cb->send_mr)) 726 ib_dereg_mr(cb->send_mr); 727 if (cb->rdma_buf) 728 kfree(cb->rdma_buf); 729 if (cb->start_buf) 730 kfree(cb->start_buf); 731 return ret; 732 } 733 734 static void krping_free_buffers(struct krping_cb *cb) 735 { 736 DEBUG_LOG(cb, "krping_free_buffers called on cb %p\n", cb); 737 738 if (cb->dma_mr) 739 ib_dereg_mr(cb->dma_mr); 740 if (cb->send_mr) 741 ib_dereg_mr(cb->send_mr); 742 if (cb->recv_mr) 743 ib_dereg_mr(cb->recv_mr); 744 if (cb->rdma_mr) 745 ib_dereg_mr(cb->rdma_mr); 746 if (cb->start_mr) 747 ib_dereg_mr(cb->start_mr); 748 if (cb->fastreg_mr) 749 ib_dereg_mr(cb->fastreg_mr); 750 if (cb->mw) 751 ib_dealloc_mw(cb->mw); 752 753 dma_unmap_single(cb->pd->device->dma_device, 754 pci_unmap_addr(cb, recv_mapping), 755 sizeof(cb->recv_buf), DMA_BIDIRECTIONAL); 756 dma_unmap_single(cb->pd->device->dma_device, 757 pci_unmap_addr(cb, send_mapping), 758 sizeof(cb->send_buf), DMA_BIDIRECTIONAL); 759 dma_unmap_single(cb->pd->device->dma_device, 760 pci_unmap_addr(cb, rdma_mapping), 761 cb->size, DMA_BIDIRECTIONAL); 762 kfree(cb->rdma_buf); 763 if (cb->start_buf) { 764 dma_unmap_single(cb->pd->device->dma_device, 765 pci_unmap_addr(cb, start_mapping), 766 cb->size, DMA_BIDIRECTIONAL); 767 kfree(cb->start_buf); 768 } 769 } 770 771 static int krping_create_qp(struct krping_cb *cb) 772 { 773 struct ib_qp_init_attr init_attr; 774 int ret; 775 776 memset(&init_attr, 0, sizeof(init_attr)); 777 init_attr.cap.max_send_wr = cb->txdepth; 778 init_attr.cap.max_recv_wr = 2; 779 init_attr.cap.max_recv_sge = 1; 780 init_attr.cap.max_send_sge = 1; 781 init_attr.qp_type = IB_QPT_RC; 782 init_attr.send_cq = cb->cq; 783 init_attr.recv_cq = cb->cq; 784 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 785 786 if (cb->server) { 787 ret = rdma_create_qp(cb->child_cm_id, cb->pd, &init_attr); 788 if (!ret) 789 cb->qp = cb->child_cm_id->qp; 790 } else { 791 ret = rdma_create_qp(cb->cm_id, cb->pd, &init_attr); 792 if (!ret) 793 cb->qp = cb->cm_id->qp; 794 } 795 796 return ret; 797 } 798 799 static void krping_free_qp(struct krping_cb *cb) 800 { 801 ib_destroy_qp(cb->qp); 802 ib_destroy_cq(cb->cq); 803 ib_dealloc_pd(cb->pd); 804 } 805 806 static int krping_setup_qp(struct krping_cb *cb, struct rdma_cm_id *cm_id) 807 { 808 int ret; 809 cb->pd = ib_alloc_pd(cm_id->device); 810 if (IS_ERR(cb->pd)) { 811 PRINTF(cb, "ib_alloc_pd failed\n"); 812 return PTR_ERR(cb->pd); 813 } 814 DEBUG_LOG(cb, "created pd %p\n", cb->pd); 815 816 strlcpy(cb->stats.name, cb->pd->device->name, sizeof(cb->stats.name)); 817 818 cb->cq = ib_create_cq(cm_id->device, krping_cq_event_handler, NULL, 819 cb, cb->txdepth * 2, 0); 820 if (IS_ERR(cb->cq)) { 821 PRINTF(cb, "ib_create_cq failed\n"); 822 ret = PTR_ERR(cb->cq); 823 goto err1; 824 } 825 DEBUG_LOG(cb, "created cq %p\n", cb->cq); 826 827 if (!cb->wlat && !cb->rlat && !cb->bw && !cb->frtest) { 828 ret = ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP); 829 if (ret) { 830 PRINTF(cb, "ib_create_cq failed\n"); 831 goto err2; 832 } 833 } 834 835 ret = krping_create_qp(cb); 836 if (ret) { 837 PRINTF(cb, "krping_create_qp failed: %d\n", ret); 838 goto err2; 839 } 840 DEBUG_LOG(cb, "created qp %p\n", cb->qp); 841 return 0; 842 err2: 843 ib_destroy_cq(cb->cq); 844 err1: 845 ib_dealloc_pd(cb->pd); 846 return ret; 847 } 848 849 /* 850 * return the (possibly rebound) rkey for the rdma buffer. 851 * FASTREG mode: invalidate and rebind via fastreg wr. 852 * MW mode: rebind the MW. 853 * other modes: just return the mr rkey. 854 */ 855 static u32 krping_rdma_rkey(struct krping_cb *cb, u64 buf, int post_inv) 856 { 857 u32 rkey = 0xffffffff; 858 u64 p; 859 struct ib_send_wr *bad_wr; 860 int i; 861 int ret; 862 863 switch (cb->mem) { 864 case FASTREG: 865 cb->invalidate_wr.ex.invalidate_rkey = cb->fastreg_mr->rkey; 866 867 /* 868 * Update the fastreg key. 869 */ 870 ib_update_fast_reg_key(cb->fastreg_mr, ++cb->key); 871 cb->fastreg_wr.wr.fast_reg.rkey = cb->fastreg_mr->rkey; 872 873 /* 874 * Update the fastreg WR with new buf info. 875 */ 876 if (buf == (u64)cb->start_dma_addr) 877 cb->fastreg_wr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_READ; 878 else 879 cb->fastreg_wr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE; 880 cb->fastreg_wr.wr.fast_reg.iova_start = buf; 881 p = (u64)(buf & PAGE_MASK); 882 for (i=0; i < cb->fastreg_wr.wr.fast_reg.page_list_len; 883 i++, p += PAGE_SIZE) { 884 cb->page_list->page_list[i] = p; 885 DEBUG_LOG(cb, "page_list[%d] 0x%jx\n", i, (uintmax_t)p); 886 } 887 888 DEBUG_LOG(cb, "post_inv = %d, fastreg new rkey 0x%x shift %u len %u" 889 " iova_start %jx page_list_len %u\n", 890 post_inv, 891 cb->fastreg_wr.wr.fast_reg.rkey, 892 cb->fastreg_wr.wr.fast_reg.page_shift, 893 (unsigned)cb->fastreg_wr.wr.fast_reg.length, 894 (uintmax_t)cb->fastreg_wr.wr.fast_reg.iova_start, 895 cb->fastreg_wr.wr.fast_reg.page_list_len); 896 897 if (post_inv) 898 ret = ib_post_send(cb->qp, &cb->invalidate_wr, &bad_wr); 899 else 900 ret = ib_post_send(cb->qp, &cb->fastreg_wr, &bad_wr); 901 if (ret) { 902 PRINTF(cb, "post send error %d\n", ret); 903 cb->state = ERROR; 904 } 905 rkey = cb->fastreg_mr->rkey; 906 break; 907 case MW: 908 /* 909 * Update the MW with new buf info. 910 */ 911 if (buf == (u64)cb->start_dma_addr) { 912 #ifdef BIND_INFO 913 cb->bind_attr.bind_info.mw_access_flags = IB_ACCESS_REMOTE_READ; 914 cb->bind_attr.bind_info.mr = cb->start_mr; 915 #else 916 cb->bind_attr.mw_access_flags = IB_ACCESS_REMOTE_READ; 917 cb->bind_attr.mr = cb->start_mr; 918 #endif 919 } else { 920 #ifdef BIND_INFO 921 cb->bind_attr.bind_info.mw_access_flags = IB_ACCESS_REMOTE_WRITE; 922 cb->bind_attr.bind_info.mr = cb->rdma_mr; 923 #else 924 cb->bind_attr.mw_access_flags = IB_ACCESS_REMOTE_WRITE; 925 cb->bind_attr.mr = cb->rdma_mr; 926 #endif 927 } 928 #ifdef BIND_INFO 929 cb->bind_attr.bind_info.addr = buf; 930 #else 931 cb->bind_attr.addr = buf; 932 #endif 933 DEBUG_LOG(cb, "binding mw rkey 0x%x to buf %jx mr rkey 0x%x\n", 934 #ifdef BIND_INFO 935 cb->mw->rkey, (uintmax_t)buf, cb->bind_attr.bind_info.mr->rkey); 936 #else 937 cb->mw->rkey, buf, cb->bind_attr.mr->rkey); 938 #endif 939 ret = ib_bind_mw(cb->qp, cb->mw, &cb->bind_attr); 940 if (ret) { 941 PRINTF(cb, "bind mw error %d\n", ret); 942 cb->state = ERROR; 943 } else 944 rkey = cb->mw->rkey; 945 break; 946 case MR: 947 if (buf == (u64)cb->start_dma_addr) 948 rkey = cb->start_mr->rkey; 949 else 950 rkey = cb->rdma_mr->rkey; 951 break; 952 case DMA: 953 rkey = cb->dma_mr->rkey; 954 break; 955 default: 956 PRINTF(cb, "%s:%d case ERROR\n", __func__, __LINE__); 957 cb->state = ERROR; 958 break; 959 } 960 return rkey; 961 } 962 963 static void krping_format_send(struct krping_cb *cb, u64 buf) 964 { 965 struct krping_rdma_info *info = &cb->send_buf; 966 u32 rkey; 967 968 /* 969 * Client side will do fastreg or mw bind before 970 * advertising the rdma buffer. Server side 971 * sends have no data. 972 */ 973 if (!cb->server || cb->wlat || cb->rlat || cb->bw || cb->frtest) { 974 rkey = krping_rdma_rkey(cb, buf, !cb->server_invalidate); 975 info->buf = htonll(buf); 976 info->rkey = htonl(rkey); 977 info->size = htonl(cb->size); 978 DEBUG_LOG(cb, "RDMA addr %llx rkey %x len %d\n", 979 (unsigned long long)buf, rkey, cb->size); 980 } 981 } 982 983 static void krping_test_server(struct krping_cb *cb) 984 { 985 struct ib_send_wr *bad_wr, inv; 986 int ret; 987 988 while (1) { 989 /* Wait for client's Start STAG/TO/Len */ 990 wait_event_interruptible(cb->sem, cb->state >= RDMA_READ_ADV); 991 if (cb->state != RDMA_READ_ADV) { 992 PRINTF(cb, "wait for RDMA_READ_ADV state %d\n", 993 cb->state); 994 break; 995 } 996 997 DEBUG_LOG(cb, "server received sink adv\n"); 998 999 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey; 1000 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr; 1001 cb->rdma_sq_wr.sg_list->length = cb->remote_len; 1002 cb->rdma_sgl.lkey = krping_rdma_rkey(cb, cb->rdma_dma_addr, 1); 1003 1004 /* Issue RDMA Read. */ 1005 if (cb->read_inv) 1006 cb->rdma_sq_wr.opcode = IB_WR_RDMA_READ_WITH_INV; 1007 else { 1008 1009 cb->rdma_sq_wr.opcode = IB_WR_RDMA_READ; 1010 if (cb->mem == FASTREG) { 1011 /* 1012 * Immediately follow the read with a 1013 * fenced LOCAL_INV. 1014 */ 1015 cb->rdma_sq_wr.next = &inv; 1016 memset(&inv, 0, sizeof inv); 1017 inv.opcode = IB_WR_LOCAL_INV; 1018 inv.ex.invalidate_rkey = cb->fastreg_mr->rkey; 1019 inv.send_flags = IB_SEND_FENCE; 1020 } 1021 } 1022 1023 ret = ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr); 1024 if (ret) { 1025 PRINTF(cb, "post send error %d\n", ret); 1026 break; 1027 } 1028 cb->rdma_sq_wr.next = NULL; 1029 1030 DEBUG_LOG(cb, "server posted rdma read req \n"); 1031 1032 /* Wait for read completion */ 1033 wait_event_interruptible(cb->sem, 1034 cb->state >= RDMA_READ_COMPLETE); 1035 if (cb->state != RDMA_READ_COMPLETE) { 1036 PRINTF(cb, 1037 "wait for RDMA_READ_COMPLETE state %d\n", 1038 cb->state); 1039 break; 1040 } 1041 DEBUG_LOG(cb, "server received read complete\n"); 1042 1043 /* Display data in recv buf */ 1044 if (cb->verbose) { 1045 if (strlen(cb->rdma_buf) > 128) { 1046 char msgbuf[128]; 1047 1048 strlcpy(msgbuf, cb->rdma_buf, sizeof(msgbuf)); 1049 PRINTF(cb, "server ping data stripped: %s\n", 1050 msgbuf); 1051 } else 1052 PRINTF(cb, "server ping data: %s\n", 1053 cb->rdma_buf); 1054 } 1055 1056 /* Tell client to continue */ 1057 if (cb->server && cb->server_invalidate) { 1058 cb->sq_wr.ex.invalidate_rkey = cb->remote_rkey; 1059 cb->sq_wr.opcode = IB_WR_SEND_WITH_INV; 1060 DEBUG_LOG(cb, "send-w-inv rkey 0x%x\n", cb->remote_rkey); 1061 } 1062 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr); 1063 if (ret) { 1064 PRINTF(cb, "post send error %d\n", ret); 1065 break; 1066 } 1067 DEBUG_LOG(cb, "server posted go ahead\n"); 1068 1069 /* Wait for client's RDMA STAG/TO/Len */ 1070 wait_event_interruptible(cb->sem, cb->state >= RDMA_WRITE_ADV); 1071 if (cb->state != RDMA_WRITE_ADV) { 1072 PRINTF(cb, 1073 "wait for RDMA_WRITE_ADV state %d\n", 1074 cb->state); 1075 break; 1076 } 1077 DEBUG_LOG(cb, "server received sink adv\n"); 1078 1079 /* RDMA Write echo data */ 1080 cb->rdma_sq_wr.opcode = IB_WR_RDMA_WRITE; 1081 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey; 1082 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr; 1083 cb->rdma_sq_wr.sg_list->length = strlen(cb->rdma_buf) + 1; 1084 if (cb->local_dma_lkey) 1085 cb->rdma_sgl.lkey = cb->qp->device->local_dma_lkey; 1086 else 1087 cb->rdma_sgl.lkey = krping_rdma_rkey(cb, cb->rdma_dma_addr, 0); 1088 1089 DEBUG_LOG(cb, "rdma write from lkey %x laddr %llx len %d\n", 1090 cb->rdma_sq_wr.sg_list->lkey, 1091 (unsigned long long)cb->rdma_sq_wr.sg_list->addr, 1092 cb->rdma_sq_wr.sg_list->length); 1093 1094 ret = ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr); 1095 if (ret) { 1096 PRINTF(cb, "post send error %d\n", ret); 1097 break; 1098 } 1099 1100 /* Wait for completion */ 1101 ret = wait_event_interruptible(cb->sem, cb->state >= 1102 RDMA_WRITE_COMPLETE); 1103 if (cb->state != RDMA_WRITE_COMPLETE) { 1104 PRINTF(cb, 1105 "wait for RDMA_WRITE_COMPLETE state %d\n", 1106 cb->state); 1107 break; 1108 } 1109 DEBUG_LOG(cb, "server rdma write complete \n"); 1110 1111 cb->state = CONNECTED; 1112 1113 /* Tell client to begin again */ 1114 if (cb->server && cb->server_invalidate) { 1115 cb->sq_wr.ex.invalidate_rkey = cb->remote_rkey; 1116 cb->sq_wr.opcode = IB_WR_SEND_WITH_INV; 1117 DEBUG_LOG(cb, "send-w-inv rkey 0x%x\n", cb->remote_rkey); 1118 } 1119 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr); 1120 if (ret) { 1121 PRINTF(cb, "post send error %d\n", ret); 1122 break; 1123 } 1124 DEBUG_LOG(cb, "server posted go ahead\n"); 1125 } 1126 } 1127 1128 static void rlat_test(struct krping_cb *cb) 1129 { 1130 int scnt; 1131 int iters = cb->count; 1132 struct timeval start_tv, stop_tv; 1133 int ret; 1134 struct ib_wc wc; 1135 struct ib_send_wr *bad_wr; 1136 int ne; 1137 1138 scnt = 0; 1139 cb->rdma_sq_wr.opcode = IB_WR_RDMA_READ; 1140 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey; 1141 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr; 1142 cb->rdma_sq_wr.sg_list->length = cb->size; 1143 1144 microtime(&start_tv); 1145 if (!cb->poll) { 1146 cb->state = RDMA_READ_ADV; 1147 ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP); 1148 } 1149 while (scnt < iters) { 1150 1151 cb->state = RDMA_READ_ADV; 1152 ret = ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr); 1153 if (ret) { 1154 PRINTF(cb, 1155 "Couldn't post send: ret=%d scnt %d\n", 1156 ret, scnt); 1157 return; 1158 } 1159 1160 do { 1161 if (!cb->poll) { 1162 wait_event_interruptible(cb->sem, 1163 cb->state != RDMA_READ_ADV); 1164 if (cb->state == RDMA_READ_COMPLETE) { 1165 ne = 1; 1166 ib_req_notify_cq(cb->cq, 1167 IB_CQ_NEXT_COMP); 1168 } else { 1169 ne = -1; 1170 } 1171 } else 1172 ne = ib_poll_cq(cb->cq, 1, &wc); 1173 if (cb->state == ERROR) { 1174 PRINTF(cb, 1175 "state == ERROR...bailing scnt %d\n", 1176 scnt); 1177 return; 1178 } 1179 } while (ne == 0); 1180 1181 if (ne < 0) { 1182 PRINTF(cb, "poll CQ failed %d\n", ne); 1183 return; 1184 } 1185 if (cb->poll && wc.status != IB_WC_SUCCESS) { 1186 PRINTF(cb, "Completion wth error at %s:\n", 1187 cb->server ? "server" : "client"); 1188 PRINTF(cb, "Failed status %d: wr_id %d\n", 1189 wc.status, (int) wc.wr_id); 1190 return; 1191 } 1192 ++scnt; 1193 } 1194 microtime(&stop_tv); 1195 1196 if (stop_tv.tv_usec < start_tv.tv_usec) { 1197 stop_tv.tv_usec += 1000000; 1198 stop_tv.tv_sec -= 1; 1199 } 1200 1201 PRINTF(cb, "delta sec %lu delta usec %lu iter %d size %d\n", 1202 (unsigned long)(stop_tv.tv_sec - start_tv.tv_sec), 1203 (unsigned long)(stop_tv.tv_usec - start_tv.tv_usec), 1204 scnt, cb->size); 1205 } 1206 1207 static void wlat_test(struct krping_cb *cb) 1208 { 1209 int ccnt, scnt, rcnt; 1210 int iters=cb->count; 1211 volatile char *poll_buf = (char *) cb->start_buf; 1212 char *buf = (char *)cb->rdma_buf; 1213 struct timeval start_tv, stop_tv; 1214 cycles_t *post_cycles_start, *post_cycles_stop; 1215 cycles_t *poll_cycles_start, *poll_cycles_stop; 1216 cycles_t *last_poll_cycles_start; 1217 cycles_t sum_poll = 0, sum_post = 0, sum_last_poll = 0; 1218 int i; 1219 int cycle_iters = 1000; 1220 1221 ccnt = 0; 1222 scnt = 0; 1223 rcnt = 0; 1224 1225 post_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL); 1226 if (!post_cycles_start) { 1227 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__); 1228 return; 1229 } 1230 post_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL); 1231 if (!post_cycles_stop) { 1232 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__); 1233 return; 1234 } 1235 poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL); 1236 if (!poll_cycles_start) { 1237 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__); 1238 return; 1239 } 1240 poll_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL); 1241 if (!poll_cycles_stop) { 1242 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__); 1243 return; 1244 } 1245 last_poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), 1246 GFP_KERNEL); 1247 if (!last_poll_cycles_start) { 1248 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__); 1249 return; 1250 } 1251 cb->rdma_sq_wr.opcode = IB_WR_RDMA_WRITE; 1252 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey; 1253 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr; 1254 cb->rdma_sq_wr.sg_list->length = cb->size; 1255 1256 if (cycle_iters > iters) 1257 cycle_iters = iters; 1258 microtime(&start_tv); 1259 while (scnt < iters || ccnt < iters || rcnt < iters) { 1260 1261 /* Wait till buffer changes. */ 1262 if (rcnt < iters && !(scnt < 1 && !cb->server)) { 1263 ++rcnt; 1264 while (*poll_buf != (char)rcnt) { 1265 if (cb->state == ERROR) { 1266 PRINTF(cb, 1267 "state = ERROR, bailing\n"); 1268 return; 1269 } 1270 } 1271 } 1272 1273 if (scnt < iters) { 1274 struct ib_send_wr *bad_wr; 1275 1276 *buf = (char)scnt+1; 1277 if (scnt < cycle_iters) 1278 post_cycles_start[scnt] = get_cycles(); 1279 if (ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr)) { 1280 PRINTF(cb, 1281 "Couldn't post send: scnt=%d\n", 1282 scnt); 1283 return; 1284 } 1285 if (scnt < cycle_iters) 1286 post_cycles_stop[scnt] = get_cycles(); 1287 scnt++; 1288 } 1289 1290 if (ccnt < iters) { 1291 struct ib_wc wc; 1292 int ne; 1293 1294 if (ccnt < cycle_iters) 1295 poll_cycles_start[ccnt] = get_cycles(); 1296 do { 1297 if (ccnt < cycle_iters) 1298 last_poll_cycles_start[ccnt] = 1299 get_cycles(); 1300 ne = ib_poll_cq(cb->cq, 1, &wc); 1301 } while (ne == 0); 1302 if (ccnt < cycle_iters) 1303 poll_cycles_stop[ccnt] = get_cycles(); 1304 ++ccnt; 1305 1306 if (ne < 0) { 1307 PRINTF(cb, "poll CQ failed %d\n", ne); 1308 return; 1309 } 1310 if (wc.status != IB_WC_SUCCESS) { 1311 PRINTF(cb, 1312 "Completion wth error at %s:\n", 1313 cb->server ? "server" : "client"); 1314 PRINTF(cb, 1315 "Failed status %d: wr_id %d\n", 1316 wc.status, (int) wc.wr_id); 1317 PRINTF(cb, 1318 "scnt=%d, rcnt=%d, ccnt=%d\n", 1319 scnt, rcnt, ccnt); 1320 return; 1321 } 1322 } 1323 } 1324 microtime(&stop_tv); 1325 1326 if (stop_tv.tv_usec < start_tv.tv_usec) { 1327 stop_tv.tv_usec += 1000000; 1328 stop_tv.tv_sec -= 1; 1329 } 1330 1331 for (i=0; i < cycle_iters; i++) { 1332 sum_post += post_cycles_stop[i] - post_cycles_start[i]; 1333 sum_poll += poll_cycles_stop[i] - poll_cycles_start[i]; 1334 sum_last_poll += poll_cycles_stop[i]-last_poll_cycles_start[i]; 1335 } 1336 PRINTF(cb, 1337 "delta sec %lu delta usec %lu iter %d size %d cycle_iters %d" 1338 " sum_post %llu sum_poll %llu sum_last_poll %llu\n", 1339 (unsigned long)(stop_tv.tv_sec - start_tv.tv_sec), 1340 (unsigned long)(stop_tv.tv_usec - start_tv.tv_usec), 1341 scnt, cb->size, cycle_iters, 1342 (unsigned long long)sum_post, (unsigned long long)sum_poll, 1343 (unsigned long long)sum_last_poll); 1344 kfree(post_cycles_start); 1345 kfree(post_cycles_stop); 1346 kfree(poll_cycles_start); 1347 kfree(poll_cycles_stop); 1348 kfree(last_poll_cycles_start); 1349 } 1350 1351 static void bw_test(struct krping_cb *cb) 1352 { 1353 int ccnt, scnt, rcnt; 1354 int iters=cb->count; 1355 struct timeval start_tv, stop_tv; 1356 cycles_t *post_cycles_start, *post_cycles_stop; 1357 cycles_t *poll_cycles_start, *poll_cycles_stop; 1358 cycles_t *last_poll_cycles_start; 1359 cycles_t sum_poll = 0, sum_post = 0, sum_last_poll = 0; 1360 int i; 1361 int cycle_iters = 1000; 1362 1363 ccnt = 0; 1364 scnt = 0; 1365 rcnt = 0; 1366 1367 post_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL); 1368 if (!post_cycles_start) { 1369 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__); 1370 return; 1371 } 1372 post_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL); 1373 if (!post_cycles_stop) { 1374 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__); 1375 return; 1376 } 1377 poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL); 1378 if (!poll_cycles_start) { 1379 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__); 1380 return; 1381 } 1382 poll_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL); 1383 if (!poll_cycles_stop) { 1384 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__); 1385 return; 1386 } 1387 last_poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), 1388 GFP_KERNEL); 1389 if (!last_poll_cycles_start) { 1390 PRINTF(cb, "%s kmalloc failed\n", __FUNCTION__); 1391 return; 1392 } 1393 cb->rdma_sq_wr.opcode = IB_WR_RDMA_WRITE; 1394 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey; 1395 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr; 1396 cb->rdma_sq_wr.sg_list->length = cb->size; 1397 1398 if (cycle_iters > iters) 1399 cycle_iters = iters; 1400 microtime(&start_tv); 1401 while (scnt < iters || ccnt < iters) { 1402 1403 while (scnt < iters && scnt - ccnt < cb->txdepth) { 1404 struct ib_send_wr *bad_wr; 1405 1406 if (scnt < cycle_iters) 1407 post_cycles_start[scnt] = get_cycles(); 1408 if (ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr)) { 1409 PRINTF(cb, 1410 "Couldn't post send: scnt=%d\n", 1411 scnt); 1412 return; 1413 } 1414 if (scnt < cycle_iters) 1415 post_cycles_stop[scnt] = get_cycles(); 1416 ++scnt; 1417 } 1418 1419 if (ccnt < iters) { 1420 int ne; 1421 struct ib_wc wc; 1422 1423 if (ccnt < cycle_iters) 1424 poll_cycles_start[ccnt] = get_cycles(); 1425 do { 1426 if (ccnt < cycle_iters) 1427 last_poll_cycles_start[ccnt] = 1428 get_cycles(); 1429 ne = ib_poll_cq(cb->cq, 1, &wc); 1430 } while (ne == 0); 1431 if (ccnt < cycle_iters) 1432 poll_cycles_stop[ccnt] = get_cycles(); 1433 ccnt += 1; 1434 1435 if (ne < 0) { 1436 PRINTF(cb, "poll CQ failed %d\n", ne); 1437 return; 1438 } 1439 if (wc.status != IB_WC_SUCCESS) { 1440 PRINTF(cb, 1441 "Completion wth error at %s:\n", 1442 cb->server ? "server" : "client"); 1443 PRINTF(cb, 1444 "Failed status %d: wr_id %d\n", 1445 wc.status, (int) wc.wr_id); 1446 return; 1447 } 1448 } 1449 } 1450 microtime(&stop_tv); 1451 1452 if (stop_tv.tv_usec < start_tv.tv_usec) { 1453 stop_tv.tv_usec += 1000000; 1454 stop_tv.tv_sec -= 1; 1455 } 1456 1457 for (i=0; i < cycle_iters; i++) { 1458 sum_post += post_cycles_stop[i] - post_cycles_start[i]; 1459 sum_poll += poll_cycles_stop[i] - poll_cycles_start[i]; 1460 sum_last_poll += poll_cycles_stop[i]-last_poll_cycles_start[i]; 1461 } 1462 PRINTF(cb, 1463 "delta sec %lu delta usec %lu iter %d size %d cycle_iters %d" 1464 " sum_post %llu sum_poll %llu sum_last_poll %llu\n", 1465 (unsigned long)(stop_tv.tv_sec - start_tv.tv_sec), 1466 (unsigned long)(stop_tv.tv_usec - start_tv.tv_usec), 1467 scnt, cb->size, cycle_iters, 1468 (unsigned long long)sum_post, (unsigned long long)sum_poll, 1469 (unsigned long long)sum_last_poll); 1470 kfree(post_cycles_start); 1471 kfree(post_cycles_stop); 1472 kfree(poll_cycles_start); 1473 kfree(poll_cycles_stop); 1474 kfree(last_poll_cycles_start); 1475 } 1476 1477 static void krping_rlat_test_server(struct krping_cb *cb) 1478 { 1479 struct ib_send_wr *bad_wr; 1480 struct ib_wc wc; 1481 int ret; 1482 1483 /* Spin waiting for client's Start STAG/TO/Len */ 1484 while (cb->state < RDMA_READ_ADV) { 1485 krping_cq_event_handler(cb->cq, cb); 1486 } 1487 1488 /* Send STAG/TO/Len to client */ 1489 krping_format_send(cb, cb->start_dma_addr); 1490 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr); 1491 if (ret) { 1492 PRINTF(cb, "post send error %d\n", ret); 1493 return; 1494 } 1495 1496 /* Spin waiting for send completion */ 1497 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0)); 1498 if (ret < 0) { 1499 PRINTF(cb, "poll error %d\n", ret); 1500 return; 1501 } 1502 if (wc.status) { 1503 PRINTF(cb, "send completiong error %d\n", wc.status); 1504 return; 1505 } 1506 wait_event_interruptible(cb->sem, cb->state == ERROR); 1507 } 1508 1509 static void krping_wlat_test_server(struct krping_cb *cb) 1510 { 1511 struct ib_send_wr *bad_wr; 1512 struct ib_wc wc; 1513 int ret; 1514 1515 /* Spin waiting for client's Start STAG/TO/Len */ 1516 while (cb->state < RDMA_READ_ADV) { 1517 krping_cq_event_handler(cb->cq, cb); 1518 } 1519 1520 /* Send STAG/TO/Len to client */ 1521 krping_format_send(cb, cb->start_dma_addr); 1522 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr); 1523 if (ret) { 1524 PRINTF(cb, "post send error %d\n", ret); 1525 return; 1526 } 1527 1528 /* Spin waiting for send completion */ 1529 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0)); 1530 if (ret < 0) { 1531 PRINTF(cb, "poll error %d\n", ret); 1532 return; 1533 } 1534 if (wc.status) { 1535 PRINTF(cb, "send completiong error %d\n", wc.status); 1536 return; 1537 } 1538 1539 wlat_test(cb); 1540 wait_event_interruptible(cb->sem, cb->state == ERROR); 1541 } 1542 1543 static void krping_bw_test_server(struct krping_cb *cb) 1544 { 1545 struct ib_send_wr *bad_wr; 1546 struct ib_wc wc; 1547 int ret; 1548 1549 /* Spin waiting for client's Start STAG/TO/Len */ 1550 while (cb->state < RDMA_READ_ADV) { 1551 krping_cq_event_handler(cb->cq, cb); 1552 } 1553 1554 /* Send STAG/TO/Len to client */ 1555 krping_format_send(cb, cb->start_dma_addr); 1556 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr); 1557 if (ret) { 1558 PRINTF(cb, "post send error %d\n", ret); 1559 return; 1560 } 1561 1562 /* Spin waiting for send completion */ 1563 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0)); 1564 if (ret < 0) { 1565 PRINTF(cb, "poll error %d\n", ret); 1566 return; 1567 } 1568 if (wc.status) { 1569 PRINTF(cb, "send completiong error %d\n", wc.status); 1570 return; 1571 } 1572 1573 if (cb->duplex) 1574 bw_test(cb); 1575 wait_event_interruptible(cb->sem, cb->state == ERROR); 1576 } 1577 1578 static int fastreg_supported(struct krping_cb *cb, int server) 1579 { 1580 struct ib_device *dev = server?cb->child_cm_id->device: 1581 cb->cm_id->device; 1582 struct ib_device_attr attr; 1583 int ret; 1584 1585 ret = ib_query_device(dev, &attr); 1586 if (ret) { 1587 PRINTF(cb, "ib_query_device failed ret %d\n", ret); 1588 return 0; 1589 } 1590 if (!(attr.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) { 1591 PRINTF(cb, "Fastreg not supported - device_cap_flags 0x%llx\n", 1592 (unsigned long long)attr.device_cap_flags); 1593 return 0; 1594 } 1595 DEBUG_LOG(cb, "Fastreg supported - device_cap_flags 0x%jx\n", 1596 (uintmax_t)attr.device_cap_flags); 1597 return 1; 1598 } 1599 1600 static int krping_bind_server(struct krping_cb *cb) 1601 { 1602 struct sockaddr_in sin; 1603 int ret; 1604 1605 memset(&sin, 0, sizeof(sin)); 1606 sin.sin_len = sizeof sin; 1607 sin.sin_family = AF_INET; 1608 sin.sin_addr.s_addr = cb->addr.s_addr; 1609 sin.sin_port = cb->port; 1610 1611 ret = rdma_bind_addr(cb->cm_id, (struct sockaddr *) &sin); 1612 if (ret) { 1613 PRINTF(cb, "rdma_bind_addr error %d\n", ret); 1614 return ret; 1615 } 1616 DEBUG_LOG(cb, "rdma_bind_addr successful\n"); 1617 1618 DEBUG_LOG(cb, "rdma_listen\n"); 1619 ret = rdma_listen(cb->cm_id, 3); 1620 if (ret) { 1621 PRINTF(cb, "rdma_listen failed: %d\n", ret); 1622 return ret; 1623 } 1624 1625 wait_event_interruptible(cb->sem, cb->state >= CONNECT_REQUEST); 1626 if (cb->state != CONNECT_REQUEST) { 1627 PRINTF(cb, "wait for CONNECT_REQUEST state %d\n", 1628 cb->state); 1629 return -1; 1630 } 1631 1632 if (cb->mem == FASTREG && !fastreg_supported(cb, 1)) 1633 return -EINVAL; 1634 1635 return 0; 1636 } 1637 1638 /* 1639 * sq-depth worth of fastreg + 0B read-inv pairs, reposting them as the reads 1640 * complete. 1641 * NOTE: every 9 seconds we sleep for 1 second to keep the kernel happy. 1642 */ 1643 static void krping_fr_test5(struct krping_cb *cb) 1644 { 1645 struct ib_fast_reg_page_list **pl; 1646 struct ib_send_wr *fr, *read, *bad; 1647 struct ib_wc wc; 1648 struct ib_sge *sgl; 1649 u8 key = 0; 1650 struct ib_mr **mr; 1651 u8 **buf; 1652 dma_addr_t *dma_addr; 1653 int i; 1654 int ret; 1655 int plen = (((cb->size - 1) & PAGE_MASK) + PAGE_SIZE) >> PAGE_SHIFT; 1656 time_t start; 1657 int count = 0; 1658 int scnt; 1659 int depth = cb->txdepth >> 1; 1660 1661 if (!depth) { 1662 PRINTF(cb, "txdepth must be > 1 for this test!\n"); 1663 return; 1664 } 1665 1666 pl = kzalloc(sizeof *pl * depth, GFP_KERNEL); 1667 DEBUG_LOG(cb, "%s pl %p size %zu\n", __func__, pl, sizeof *pl * depth); 1668 mr = kzalloc(sizeof *mr * depth, GFP_KERNEL); 1669 DEBUG_LOG(cb, "%s mr %p size %zu\n", __func__, mr, sizeof *mr * depth); 1670 fr = kzalloc(sizeof *fr * depth, GFP_KERNEL); 1671 DEBUG_LOG(cb, "%s fr %p size %zu\n", __func__, fr, sizeof *fr * depth); 1672 sgl = kzalloc(sizeof *sgl * depth, GFP_KERNEL); 1673 DEBUG_LOG(cb, "%s sgl %p size %zu\n", __func__, sgl, sizeof *sgl * depth); 1674 read = kzalloc(sizeof *read * depth, GFP_KERNEL); 1675 DEBUG_LOG(cb, "%s read %p size %zu\n", __func__, read, sizeof *read * depth); 1676 buf = kzalloc(sizeof *buf * depth, GFP_KERNEL); 1677 DEBUG_LOG(cb, "%s buf %p size %zu\n", __func__, buf, sizeof *buf * depth); 1678 dma_addr = kzalloc(sizeof *dma_addr * depth, GFP_KERNEL); 1679 DEBUG_LOG(cb, "%s dma_addr %p size %zu\n", __func__, dma_addr, sizeof *dma_addr * depth); 1680 if (!pl || !mr || !fr || !read || !sgl || !buf || !dma_addr) { 1681 PRINTF(cb, "kzalloc failed\n"); 1682 goto err1; 1683 } 1684 1685 for (scnt = 0; scnt < depth; scnt++) { 1686 pl[scnt] = ib_alloc_fast_reg_page_list(cb->qp->device, plen); 1687 if (IS_ERR(pl[scnt])) { 1688 PRINTF(cb, "alloc_fr_page_list failed %ld\n", 1689 PTR_ERR(pl[scnt])); 1690 goto err2; 1691 } 1692 DEBUG_LOG(cb, "%s pl[%u] %p\n", __func__, scnt, pl[scnt]); 1693 1694 mr[scnt] = ib_alloc_fast_reg_mr(cb->pd, plen); 1695 if (IS_ERR(mr[scnt])) { 1696 PRINTF(cb, "alloc_fr failed %ld\n", 1697 PTR_ERR(mr[scnt])); 1698 goto err2; 1699 } 1700 DEBUG_LOG(cb, "%s mr[%u] %p\n", __func__, scnt, mr[scnt]); 1701 ib_update_fast_reg_key(mr[scnt], ++key); 1702 1703 buf[scnt] = kmalloc(cb->size, GFP_KERNEL); 1704 if (!buf[scnt]) { 1705 PRINTF(cb, "kmalloc failed\n"); 1706 ret = -ENOMEM; 1707 goto err2; 1708 } 1709 DEBUG_LOG(cb, "%s buf[%u] %p\n", __func__, scnt, buf[scnt]); 1710 dma_addr[scnt] = dma_map_single(cb->pd->device->dma_device, 1711 buf[scnt], cb->size, 1712 DMA_BIDIRECTIONAL); 1713 if (dma_mapping_error(cb->pd->device->dma_device, 1714 dma_addr[scnt])) { 1715 PRINTF(cb, "dma_map failed\n"); 1716 ret = -ENOMEM; 1717 goto err2; 1718 } 1719 DEBUG_LOG(cb, "%s dma_addr[%u] %p\n", __func__, scnt, (void *)dma_addr[scnt]); 1720 for (i=0; i<plen; i++) { 1721 pl[scnt]->page_list[i] = ((unsigned long)dma_addr[scnt] & PAGE_MASK) + (i * PAGE_SIZE); 1722 DEBUG_LOG(cb, "%s pl[%u]->page_list[%u] 0x%jx\n", 1723 __func__, scnt, i, (uintmax_t)pl[scnt]->page_list[i]); 1724 } 1725 1726 sgl[scnt].lkey = mr[scnt]->rkey; 1727 sgl[scnt].length = cb->size; 1728 sgl[scnt].addr = (u64)buf[scnt]; 1729 DEBUG_LOG(cb, "%s sgl[%u].lkey 0x%x length %u addr 0x%jx\n", 1730 __func__, scnt, sgl[scnt].lkey, sgl[scnt].length, 1731 (uintmax_t)sgl[scnt].addr); 1732 1733 fr[scnt].opcode = IB_WR_FAST_REG_MR; 1734 fr[scnt].wr_id = scnt; 1735 fr[scnt].send_flags = 0; 1736 fr[scnt].wr.fast_reg.page_shift = PAGE_SHIFT; 1737 fr[scnt].wr.fast_reg.length = cb->size; 1738 fr[scnt].wr.fast_reg.page_list = pl[scnt]; 1739 fr[scnt].wr.fast_reg.page_list_len = plen; 1740 fr[scnt].wr.fast_reg.iova_start = (u64)buf[scnt]; 1741 fr[scnt].wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE; 1742 fr[scnt].wr.fast_reg.rkey = mr[scnt]->rkey; 1743 fr[scnt].next = &read[scnt]; 1744 read[scnt].opcode = IB_WR_RDMA_READ_WITH_INV; 1745 read[scnt].wr_id = scnt; 1746 read[scnt].send_flags = IB_SEND_SIGNALED; 1747 read[scnt].wr.rdma.rkey = cb->remote_rkey; 1748 read[scnt].wr.rdma.remote_addr = cb->remote_addr; 1749 read[scnt].num_sge = 1; 1750 read[scnt].sg_list = &sgl[scnt]; 1751 ret = ib_post_send(cb->qp, &fr[scnt], &bad); 1752 if (ret) { 1753 PRINTF(cb, "ib_post_send failed %d\n", ret); 1754 goto err2; 1755 } 1756 } 1757 1758 start = time_uptime; 1759 DEBUG_LOG(cb, "%s starting IO.\n", __func__); 1760 while (!cb->count || cb->server || count < cb->count) { 1761 if ((time_uptime - start) >= 9) { 1762 DEBUG_LOG(cb, "%s pausing 1 tick! count %u\n", __func__, 1763 count); 1764 wait_event_interruptible_timeout(cb->sem, 1765 cb->state == ERROR, 1766 1); 1767 if (cb->state == ERROR) 1768 break; 1769 start = time_uptime; 1770 } 1771 do { 1772 ret = ib_poll_cq(cb->cq, 1, &wc); 1773 if (ret < 0) { 1774 PRINTF(cb, "ib_poll_cq failed %d\n", 1775 ret); 1776 goto err2; 1777 } 1778 if (ret == 1) { 1779 if (wc.status) { 1780 PRINTF(cb, 1781 "completion error %u wr_id %ju " 1782 "opcode %d\n", wc.status, 1783 (uintmax_t)wc.wr_id, wc.opcode); 1784 goto err2; 1785 } 1786 count++; 1787 if (count == cb->count) 1788 break; 1789 ib_update_fast_reg_key(mr[wc.wr_id], ++key); 1790 fr[wc.wr_id].wr.fast_reg.rkey = 1791 mr[wc.wr_id]->rkey; 1792 sgl[wc.wr_id].lkey = mr[wc.wr_id]->rkey; 1793 ret = ib_post_send(cb->qp, &fr[wc.wr_id], &bad); 1794 if (ret) { 1795 PRINTF(cb, 1796 "ib_post_send failed %d\n", ret); 1797 goto err2; 1798 } 1799 } else if (krping_sigpending()) { 1800 PRINTF(cb, "signal!\n"); 1801 goto err2; 1802 } 1803 } while (ret == 1); 1804 } 1805 DEBUG_LOG(cb, "%s done!\n", __func__); 1806 err2: 1807 DEBUG_LOG(cb, "sleeping 1 second\n"); 1808 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ); 1809 DEBUG_LOG(cb, "draining the cq...\n"); 1810 do { 1811 ret = ib_poll_cq(cb->cq, 1, &wc); 1812 if (ret < 0) { 1813 PRINTF(cb, "ib_poll_cq failed %d\n", ret); 1814 break; 1815 } 1816 if (ret == 1) { 1817 if (wc.status) { 1818 PRINTF(cb, "completion error %u " 1819 "opcode %u\n", wc.status, wc.opcode); 1820 } 1821 } 1822 } while (ret == 1); 1823 1824 DEBUG_LOG(cb, "destroying fr mrs!\n"); 1825 for (scnt = 0; scnt < depth; scnt++) { 1826 if (mr[scnt]) { 1827 ib_dereg_mr(mr[scnt]); 1828 DEBUG_LOG(cb, "%s dereg mr %p\n", __func__, mr[scnt]); 1829 } 1830 } 1831 DEBUG_LOG(cb, "unmapping/freeing bufs!\n"); 1832 for (scnt = 0; scnt < depth; scnt++) { 1833 if (buf[scnt]) { 1834 dma_unmap_single(cb->pd->device->dma_device, 1835 dma_addr[scnt], cb->size, 1836 DMA_BIDIRECTIONAL); 1837 kfree(buf[scnt]); 1838 DEBUG_LOG(cb, "%s unmap/free buf %p dma_addr %p\n", __func__, buf[scnt], (void *)dma_addr[scnt]); 1839 } 1840 } 1841 DEBUG_LOG(cb, "destroying fr page lists!\n"); 1842 for (scnt = 0; scnt < depth; scnt++) { 1843 if (pl[scnt]) { 1844 DEBUG_LOG(cb, "%s free pl %p\n", __func__, pl[scnt]); 1845 ib_free_fast_reg_page_list(pl[scnt]); 1846 } 1847 } 1848 err1: 1849 if (pl) 1850 kfree(pl); 1851 if (mr) 1852 kfree(mr); 1853 if (fr) 1854 kfree(fr); 1855 if (read) 1856 kfree(read); 1857 if (sgl) 1858 kfree(sgl); 1859 if (buf) 1860 kfree(buf); 1861 if (dma_addr) 1862 kfree(dma_addr); 1863 } 1864 static void krping_fr_test_server(struct krping_cb *cb) 1865 { 1866 DEBUG_LOG(cb, "%s waiting for disconnect...\n", __func__); 1867 wait_event_interruptible(cb->sem, cb->state == ERROR); 1868 } 1869 1870 static void krping_fr_test5_server(struct krping_cb *cb) 1871 { 1872 struct ib_send_wr *bad_wr; 1873 struct ib_wc wc; 1874 int ret; 1875 1876 /* Spin waiting for client's Start STAG/TO/Len */ 1877 while (cb->state < RDMA_READ_ADV) { 1878 krping_cq_event_handler(cb->cq, cb); 1879 } 1880 DEBUG_LOG(cb, "%s client STAG %x TO 0x%jx\n", __func__, 1881 cb->remote_rkey, (uintmax_t)cb->remote_addr); 1882 1883 /* Send STAG/TO/Len to client */ 1884 krping_format_send(cb, cb->start_dma_addr); 1885 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr); 1886 if (ret) { 1887 PRINTF(cb, "post send error %d\n", ret); 1888 return; 1889 } 1890 1891 /* Spin waiting for send completion */ 1892 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0)); 1893 if (ret < 0) { 1894 PRINTF(cb, "poll error %d\n", ret); 1895 return; 1896 } 1897 if (wc.status) { 1898 PRINTF(cb, "send completiong error %d\n", wc.status); 1899 return; 1900 } 1901 1902 if (cb->duplex) 1903 krping_fr_test5(cb); 1904 DEBUG_LOG(cb, "%s waiting for disconnect...\n", __func__); 1905 wait_event_interruptible(cb->sem, cb->state == ERROR); 1906 } 1907 1908 static void krping_fr_test5_client(struct krping_cb *cb) 1909 { 1910 struct ib_send_wr *bad; 1911 struct ib_wc wc; 1912 int ret; 1913 1914 cb->state = RDMA_READ_ADV; 1915 1916 /* Send STAG/TO/Len to server */ 1917 krping_format_send(cb, cb->start_dma_addr); 1918 if (cb->state == ERROR) { 1919 PRINTF(cb, "krping_format_send failed\n"); 1920 return; 1921 } 1922 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad); 1923 if (ret) { 1924 PRINTF(cb, "post send error %d\n", ret); 1925 return; 1926 } 1927 1928 /* Spin waiting for send completion */ 1929 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0)); 1930 if (ret < 0) { 1931 PRINTF(cb, "poll error %d\n", ret); 1932 return; 1933 } 1934 if (wc.status) { 1935 PRINTF(cb, "send completion error %d\n", wc.status); 1936 return; 1937 } 1938 1939 /* Spin waiting for server's Start STAG/TO/Len */ 1940 while (cb->state < RDMA_WRITE_ADV) { 1941 krping_cq_event_handler(cb->cq, cb); 1942 } 1943 DEBUG_LOG(cb, "%s server STAG %x TO 0x%jx\n", __func__, cb->remote_rkey, 1944 (uintmax_t)cb->remote_addr); 1945 1946 return krping_fr_test5(cb); 1947 } 1948 1949 /* 1950 * sq-depth worth of write + fastreg + inv, reposting them as the invs 1951 * complete. 1952 * NOTE: every 9 seconds we sleep for 1 second to keep the kernel happy. 1953 * If a count is given, then the last IO will have a bogus lkey in the 1954 * write work request. This reproduces a fw bug where the connection 1955 * will get stuck if a fastreg is processed while the ulptx is failing 1956 * the bad write. 1957 */ 1958 static void krping_fr_test6(struct krping_cb *cb) 1959 { 1960 struct ib_fast_reg_page_list **pl; 1961 struct ib_send_wr *fr, *write, *inv, *bad; 1962 struct ib_wc wc; 1963 struct ib_sge *sgl; 1964 u8 key = 0; 1965 struct ib_mr **mr; 1966 u8 **buf; 1967 dma_addr_t *dma_addr; 1968 int i; 1969 int ret; 1970 int plen = (((cb->size - 1) & PAGE_MASK) + PAGE_SIZE) >> PAGE_SHIFT; 1971 unsigned long start; 1972 int count = 0; 1973 int scnt; 1974 int depth = cb->txdepth / 3; 1975 1976 if (!depth) { 1977 PRINTF(cb, "txdepth must be > 3 for this test!\n"); 1978 return; 1979 } 1980 1981 pl = kzalloc(sizeof *pl * depth, GFP_KERNEL); 1982 DEBUG_LOG(cb, "%s pl %p size %zu\n", __func__, pl, sizeof *pl * depth); 1983 1984 mr = kzalloc(sizeof *mr * depth, GFP_KERNEL); 1985 DEBUG_LOG(cb, "%s mr %p size %zu\n", __func__, mr, sizeof *mr * depth); 1986 1987 fr = kzalloc(sizeof *fr * depth, GFP_KERNEL); 1988 DEBUG_LOG(cb, "%s fr %p size %zu\n", __func__, fr, sizeof *fr * depth); 1989 1990 sgl = kzalloc(sizeof *sgl * depth, GFP_KERNEL); 1991 DEBUG_LOG(cb, "%s sgl %p size %zu\n", __func__, sgl, sizeof *sgl * depth); 1992 1993 write = kzalloc(sizeof *write * depth, GFP_KERNEL); 1994 DEBUG_LOG(cb, "%s read %p size %zu\n", __func__, write, sizeof *write * depth); 1995 1996 inv = kzalloc(sizeof *inv * depth, GFP_KERNEL); 1997 DEBUG_LOG(cb, "%s inv %p size %zu\n", __func__, inv, sizeof *inv * depth); 1998 1999 buf = kzalloc(sizeof *buf * depth, GFP_KERNEL); 2000 DEBUG_LOG(cb, "%s buf %p size %zu\n", __func__, buf, sizeof *buf * depth); 2001 2002 dma_addr = kzalloc(sizeof *dma_addr * depth, GFP_KERNEL); 2003 DEBUG_LOG(cb, "%s dma_addr %p size %zu\n", __func__, dma_addr, sizeof *dma_addr * depth); 2004 2005 if (!pl || !mr || !fr || !write || !sgl || !buf || !dma_addr) { 2006 PRINTF(cb, "kzalloc failed\n"); 2007 goto err1; 2008 } 2009 2010 for (scnt = 0; scnt < depth; scnt++) { 2011 pl[scnt] = ib_alloc_fast_reg_page_list(cb->qp->device, plen); 2012 if (IS_ERR(pl[scnt])) { 2013 PRINTF(cb, "alloc_fr_page_list failed %ld\n", 2014 PTR_ERR(pl[scnt])); 2015 goto err2; 2016 } 2017 DEBUG_LOG(cb, "%s pl[%u] %p\n", __func__, scnt, pl[scnt]); 2018 2019 mr[scnt] = ib_alloc_fast_reg_mr(cb->pd, plen); 2020 if (IS_ERR(mr[scnt])) { 2021 PRINTF(cb, "alloc_fr failed %ld\n", 2022 PTR_ERR(mr[scnt])); 2023 goto err2; 2024 } 2025 DEBUG_LOG(cb, "%s mr[%u] %p\n", __func__, scnt, mr[scnt]); 2026 ib_update_fast_reg_key(mr[scnt], ++key); 2027 2028 buf[scnt] = kmalloc(cb->size, GFP_KERNEL); 2029 if (!buf[scnt]) { 2030 PRINTF(cb, "kmalloc failed\n"); 2031 ret = -ENOMEM; 2032 goto err2; 2033 } 2034 DEBUG_LOG(cb, "%s buf[%u] %p\n", __func__, scnt, buf[scnt]); 2035 dma_addr[scnt] = dma_map_single(cb->pd->device->dma_device, 2036 buf[scnt], cb->size, 2037 DMA_BIDIRECTIONAL); 2038 if (dma_mapping_error(cb->pd->device->dma_device, 2039 dma_addr[scnt])) { 2040 PRINTF(cb, "dma_map failed\n"); 2041 ret = -ENOMEM; 2042 goto err2; 2043 } 2044 DEBUG_LOG(cb, "%s dma_addr[%u] %p\n", __func__, scnt, (void *)dma_addr[scnt]); 2045 for (i=0; i<plen; i++) { 2046 pl[scnt]->page_list[i] = ((unsigned long)dma_addr[scnt] & PAGE_MASK) + (i * PAGE_SIZE); 2047 DEBUG_LOG(cb, "%s pl[%u]->page_list[%u] 0x%jx\n", 2048 __func__, scnt, i, (uintmax_t)pl[scnt]->page_list[i]); 2049 } 2050 2051 write[scnt].opcode = IB_WR_RDMA_WRITE; 2052 write[scnt].wr_id = scnt; 2053 write[scnt].wr.rdma.rkey = cb->remote_rkey; 2054 write[scnt].wr.rdma.remote_addr = cb->remote_addr; 2055 write[scnt].num_sge = 1; 2056 write[scnt].sg_list = &cb->rdma_sgl; 2057 write[scnt].sg_list->length = cb->size; 2058 write[scnt].next = &fr[scnt]; 2059 2060 fr[scnt].opcode = IB_WR_FAST_REG_MR; 2061 fr[scnt].wr_id = scnt; 2062 fr[scnt].wr.fast_reg.page_shift = PAGE_SHIFT; 2063 fr[scnt].wr.fast_reg.length = cb->size; 2064 fr[scnt].wr.fast_reg.page_list = pl[scnt]; 2065 fr[scnt].wr.fast_reg.page_list_len = plen; 2066 fr[scnt].wr.fast_reg.iova_start = (u64)buf[scnt]; 2067 fr[scnt].wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE; 2068 fr[scnt].wr.fast_reg.rkey = mr[scnt]->rkey; 2069 fr[scnt].next = &inv[scnt]; 2070 2071 inv[scnt].opcode = IB_WR_LOCAL_INV; 2072 inv[scnt].send_flags = IB_SEND_SIGNALED; 2073 inv[scnt].ex.invalidate_rkey = mr[scnt]->rkey; 2074 2075 ret = ib_post_send(cb->qp, &write[scnt], &bad); 2076 if (ret) { 2077 PRINTF(cb, "ib_post_send failed %d\n", ret); 2078 goto err2; 2079 } 2080 } 2081 2082 start = time_uptime; 2083 DEBUG_LOG(cb, "%s starting IO.\n", __func__); 2084 while (!cb->count || cb->server || count < cb->count) { 2085 if ((time_uptime - start) >= 9) { 2086 DEBUG_LOG(cb, "%s pausing 1 tick! count %u\n", __func__, 2087 count); 2088 wait_event_interruptible_timeout(cb->sem, 2089 cb->state == ERROR, 2090 1); 2091 if (cb->state == ERROR) 2092 break; 2093 start = time_uptime; 2094 } 2095 do { 2096 ret = ib_poll_cq(cb->cq, 1, &wc); 2097 if (ret < 0) { 2098 PRINTF(cb, "ib_poll_cq failed %d\n", 2099 ret); 2100 goto err2; 2101 } 2102 if (ret == 1) { 2103 if (wc.status) { 2104 PRINTF(cb, 2105 "completion error %u wr_id %ju " 2106 "opcode %d\n", wc.status, 2107 (uintmax_t)wc.wr_id, wc.opcode); 2108 goto err2; 2109 } 2110 count++; 2111 if (count == (cb->count -1)) 2112 cb->rdma_sgl.lkey = 0x00dead; 2113 if (count == cb->count) 2114 break; 2115 ib_update_fast_reg_key(mr[wc.wr_id], ++key); 2116 fr[wc.wr_id].wr.fast_reg.rkey = 2117 mr[wc.wr_id]->rkey; 2118 inv[wc.wr_id].ex.invalidate_rkey = 2119 mr[wc.wr_id]->rkey; 2120 ret = ib_post_send(cb->qp, &write[wc.wr_id], &bad); 2121 if (ret) { 2122 PRINTF(cb, 2123 "ib_post_send failed %d\n", ret); 2124 goto err2; 2125 } 2126 } else if (krping_sigpending()){ 2127 PRINTF(cb, "signal!\n"); 2128 goto err2; 2129 } 2130 } while (ret == 1); 2131 } 2132 DEBUG_LOG(cb, "%s done!\n", __func__); 2133 err2: 2134 DEBUG_LOG(cb, "sleeping 1 second\n"); 2135 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ); 2136 DEBUG_LOG(cb, "draining the cq...\n"); 2137 do { 2138 ret = ib_poll_cq(cb->cq, 1, &wc); 2139 if (ret < 0) { 2140 PRINTF(cb, "ib_poll_cq failed %d\n", ret); 2141 break; 2142 } 2143 if (ret == 1) { 2144 if (wc.status) { 2145 PRINTF(cb, "completion error %u " 2146 "opcode %u\n", wc.status, wc.opcode); 2147 } 2148 } 2149 } while (ret == 1); 2150 2151 DEBUG_LOG(cb, "destroying fr mrs!\n"); 2152 for (scnt = 0; scnt < depth; scnt++) { 2153 if (mr[scnt]) { 2154 ib_dereg_mr(mr[scnt]); 2155 DEBUG_LOG(cb, "%s dereg mr %p\n", __func__, mr[scnt]); 2156 } 2157 } 2158 DEBUG_LOG(cb, "unmapping/freeing bufs!\n"); 2159 for (scnt = 0; scnt < depth; scnt++) { 2160 if (buf[scnt]) { 2161 dma_unmap_single(cb->pd->device->dma_device, 2162 dma_addr[scnt], cb->size, 2163 DMA_BIDIRECTIONAL); 2164 kfree(buf[scnt]); 2165 DEBUG_LOG(cb, "%s unmap/free buf %p dma_addr %p\n", __func__, buf[scnt], (void *)dma_addr[scnt]); 2166 } 2167 } 2168 DEBUG_LOG(cb, "destroying fr page lists!\n"); 2169 for (scnt = 0; scnt < depth; scnt++) { 2170 if (pl[scnt]) { 2171 DEBUG_LOG(cb, "%s free pl %p\n", __func__, pl[scnt]); 2172 ib_free_fast_reg_page_list(pl[scnt]); 2173 } 2174 } 2175 err1: 2176 if (pl) 2177 kfree(pl); 2178 if (mr) 2179 kfree(mr); 2180 if (fr) 2181 kfree(fr); 2182 if (write) 2183 kfree(write); 2184 if (inv) 2185 kfree(inv); 2186 if (sgl) 2187 kfree(sgl); 2188 if (buf) 2189 kfree(buf); 2190 if (dma_addr) 2191 kfree(dma_addr); 2192 } 2193 2194 static void krping_fr_test6_server(struct krping_cb *cb) 2195 { 2196 struct ib_send_wr *bad_wr; 2197 struct ib_wc wc; 2198 int ret; 2199 2200 /* Spin waiting for client's Start STAG/TO/Len */ 2201 while (cb->state < RDMA_READ_ADV) { 2202 krping_cq_event_handler(cb->cq, cb); 2203 } 2204 DEBUG_LOG(cb, "%s client STAG %x TO 0x%jx\n", __func__, 2205 cb->remote_rkey, (uintmax_t)cb->remote_addr); 2206 2207 /* Send STAG/TO/Len to client */ 2208 krping_format_send(cb, cb->start_dma_addr); 2209 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr); 2210 if (ret) { 2211 PRINTF(cb, "post send error %d\n", ret); 2212 return; 2213 } 2214 2215 /* Spin waiting for send completion */ 2216 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0)); 2217 if (ret < 0) { 2218 PRINTF(cb, "poll error %d\n", ret); 2219 return; 2220 } 2221 if (wc.status) { 2222 PRINTF(cb, "send completiong error %d\n", wc.status); 2223 return; 2224 } 2225 2226 if (cb->duplex) 2227 krping_fr_test6(cb); 2228 DEBUG_LOG(cb, "%s waiting for disconnect...\n", __func__); 2229 wait_event_interruptible(cb->sem, cb->state == ERROR); 2230 } 2231 2232 static void krping_fr_test6_client(struct krping_cb *cb) 2233 { 2234 struct ib_send_wr *bad; 2235 struct ib_wc wc; 2236 int ret; 2237 2238 cb->state = RDMA_READ_ADV; 2239 2240 /* Send STAG/TO/Len to server */ 2241 krping_format_send(cb, cb->start_dma_addr); 2242 if (cb->state == ERROR) { 2243 PRINTF(cb, "krping_format_send failed\n"); 2244 return; 2245 } 2246 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad); 2247 if (ret) { 2248 PRINTF(cb, "post send error %d\n", ret); 2249 return; 2250 } 2251 2252 /* Spin waiting for send completion */ 2253 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0)); 2254 if (ret < 0) { 2255 PRINTF(cb, "poll error %d\n", ret); 2256 return; 2257 } 2258 if (wc.status) { 2259 PRINTF(cb, "send completion error %d\n", wc.status); 2260 return; 2261 } 2262 2263 /* Spin waiting for server's Start STAG/TO/Len */ 2264 while (cb->state < RDMA_WRITE_ADV) { 2265 krping_cq_event_handler(cb->cq, cb); 2266 } 2267 DEBUG_LOG(cb, "%s server STAG %x TO 0x%jx\n", __func__, cb->remote_rkey, 2268 (uintmax_t)cb->remote_addr); 2269 2270 return krping_fr_test6(cb); 2271 } 2272 2273 static void krping_run_server(struct krping_cb *cb) 2274 { 2275 struct ib_recv_wr *bad_wr; 2276 int ret; 2277 2278 ret = krping_bind_server(cb); 2279 if (ret) 2280 return; 2281 2282 ret = krping_setup_qp(cb, cb->child_cm_id); 2283 if (ret) { 2284 PRINTF(cb, "setup_qp failed: %d\n", ret); 2285 goto err0; 2286 } 2287 2288 ret = krping_setup_buffers(cb); 2289 if (ret) { 2290 PRINTF(cb, "krping_setup_buffers failed: %d\n", ret); 2291 goto err1; 2292 } 2293 2294 ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr); 2295 if (ret) { 2296 PRINTF(cb, "ib_post_recv failed: %d\n", ret); 2297 goto err2; 2298 } 2299 2300 ret = krping_accept(cb); 2301 if (ret) { 2302 PRINTF(cb, "connect error %d\n", ret); 2303 goto err2; 2304 } 2305 2306 if (cb->wlat) 2307 krping_wlat_test_server(cb); 2308 else if (cb->rlat) 2309 krping_rlat_test_server(cb); 2310 else if (cb->bw) 2311 krping_bw_test_server(cb); 2312 else if (cb->frtest) { 2313 switch (cb->testnum) { 2314 case 1: 2315 case 2: 2316 case 3: 2317 case 4: 2318 krping_fr_test_server(cb); 2319 break; 2320 case 5: 2321 krping_fr_test5_server(cb); 2322 break; 2323 case 6: 2324 krping_fr_test6_server(cb); 2325 break; 2326 default: 2327 PRINTF(cb, "unknown fr test %d\n", cb->testnum); 2328 goto err2; 2329 break; 2330 } 2331 } else 2332 krping_test_server(cb); 2333 rdma_disconnect(cb->child_cm_id); 2334 err2: 2335 krping_free_buffers(cb); 2336 err1: 2337 krping_free_qp(cb); 2338 err0: 2339 rdma_destroy_id(cb->child_cm_id); 2340 } 2341 2342 static void krping_test_client(struct krping_cb *cb) 2343 { 2344 int ping, start, cc, i, ret; 2345 struct ib_send_wr *bad_wr; 2346 unsigned char c; 2347 2348 start = 65; 2349 for (ping = 0; !cb->count || ping < cb->count; ping++) { 2350 cb->state = RDMA_READ_ADV; 2351 2352 /* Put some ascii text in the buffer. */ 2353 cc = sprintf(cb->start_buf, "rdma-ping-%d: ", ping); 2354 for (i = cc, c = start; i < cb->size; i++) { 2355 cb->start_buf[i] = c; 2356 c++; 2357 if (c > 122) 2358 c = 65; 2359 } 2360 start++; 2361 if (start > 122) 2362 start = 65; 2363 cb->start_buf[cb->size - 1] = 0; 2364 2365 krping_format_send(cb, cb->start_dma_addr); 2366 if (cb->state == ERROR) { 2367 PRINTF(cb, "krping_format_send failed\n"); 2368 break; 2369 } 2370 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr); 2371 if (ret) { 2372 PRINTF(cb, "post send error %d\n", ret); 2373 break; 2374 } 2375 2376 /* Wait for server to ACK */ 2377 wait_event_interruptible(cb->sem, cb->state >= RDMA_WRITE_ADV); 2378 if (cb->state != RDMA_WRITE_ADV) { 2379 PRINTF(cb, 2380 "wait for RDMA_WRITE_ADV state %d\n", 2381 cb->state); 2382 break; 2383 } 2384 2385 krping_format_send(cb, cb->rdma_dma_addr); 2386 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr); 2387 if (ret) { 2388 PRINTF(cb, "post send error %d\n", ret); 2389 break; 2390 } 2391 2392 /* Wait for the server to say the RDMA Write is complete. */ 2393 wait_event_interruptible(cb->sem, 2394 cb->state >= RDMA_WRITE_COMPLETE); 2395 if (cb->state != RDMA_WRITE_COMPLETE) { 2396 PRINTF(cb, 2397 "wait for RDMA_WRITE_COMPLETE state %d\n", 2398 cb->state); 2399 break; 2400 } 2401 2402 if (cb->validate) 2403 if (memcmp(cb->start_buf, cb->rdma_buf, cb->size)) { 2404 PRINTF(cb, "data mismatch!\n"); 2405 break; 2406 } 2407 2408 if (cb->verbose) { 2409 if (strlen(cb->rdma_buf) > 128) { 2410 char msgbuf[128]; 2411 2412 strlcpy(msgbuf, cb->rdma_buf, sizeof(msgbuf)); 2413 PRINTF(cb, "ping data stripped: %s\n", 2414 msgbuf); 2415 } else 2416 PRINTF(cb, "ping data: %s\n", cb->rdma_buf); 2417 } 2418 #ifdef SLOW_KRPING 2419 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ); 2420 #endif 2421 } 2422 } 2423 2424 static void krping_rlat_test_client(struct krping_cb *cb) 2425 { 2426 struct ib_send_wr *bad_wr; 2427 struct ib_wc wc; 2428 int ret; 2429 2430 cb->state = RDMA_READ_ADV; 2431 2432 /* Send STAG/TO/Len to client */ 2433 krping_format_send(cb, cb->start_dma_addr); 2434 if (cb->state == ERROR) { 2435 PRINTF(cb, "krping_format_send failed\n"); 2436 return; 2437 } 2438 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr); 2439 if (ret) { 2440 PRINTF(cb, "post send error %d\n", ret); 2441 return; 2442 } 2443 2444 /* Spin waiting for send completion */ 2445 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0)); 2446 if (ret < 0) { 2447 PRINTF(cb, "poll error %d\n", ret); 2448 return; 2449 } 2450 if (wc.status) { 2451 PRINTF(cb, "send completion error %d\n", wc.status); 2452 return; 2453 } 2454 2455 /* Spin waiting for server's Start STAG/TO/Len */ 2456 while (cb->state < RDMA_WRITE_ADV) { 2457 krping_cq_event_handler(cb->cq, cb); 2458 } 2459 2460 #if 0 2461 { 2462 int i; 2463 struct timeval start, stop; 2464 time_t sec; 2465 suseconds_t usec; 2466 unsigned long long elapsed; 2467 struct ib_wc wc; 2468 struct ib_send_wr *bad_wr; 2469 int ne; 2470 2471 cb->rdma_sq_wr.opcode = IB_WR_RDMA_WRITE; 2472 cb->rdma_sq_wr.wr.rdma.rkey = cb->remote_rkey; 2473 cb->rdma_sq_wr.wr.rdma.remote_addr = cb->remote_addr; 2474 cb->rdma_sq_wr.sg_list->length = 0; 2475 cb->rdma_sq_wr.num_sge = 0; 2476 2477 microtime(&start); 2478 for (i=0; i < 100000; i++) { 2479 if (ib_post_send(cb->qp, &cb->rdma_sq_wr, &bad_wr)) { 2480 PRINTF(cb, "Couldn't post send\n"); 2481 return; 2482 } 2483 do { 2484 ne = ib_poll_cq(cb->cq, 1, &wc); 2485 } while (ne == 0); 2486 if (ne < 0) { 2487 PRINTF(cb, "poll CQ failed %d\n", ne); 2488 return; 2489 } 2490 if (wc.status != IB_WC_SUCCESS) { 2491 PRINTF(cb, "Completion wth error at %s:\n", 2492 cb->server ? "server" : "client"); 2493 PRINTF(cb, "Failed status %d: wr_id %d\n", 2494 wc.status, (int) wc.wr_id); 2495 return; 2496 } 2497 } 2498 microtime(&stop); 2499 2500 if (stop.tv_usec < start.tv_usec) { 2501 stop.tv_usec += 1000000; 2502 stop.tv_sec -= 1; 2503 } 2504 sec = stop.tv_sec - start.tv_sec; 2505 usec = stop.tv_usec - start.tv_usec; 2506 elapsed = sec * 1000000 + usec; 2507 PRINTF(cb, "0B-write-lat iters 100000 usec %llu\n", elapsed); 2508 } 2509 #endif 2510 2511 rlat_test(cb); 2512 } 2513 2514 static void krping_wlat_test_client(struct krping_cb *cb) 2515 { 2516 struct ib_send_wr *bad_wr; 2517 struct ib_wc wc; 2518 int ret; 2519 2520 cb->state = RDMA_READ_ADV; 2521 2522 /* Send STAG/TO/Len to client */ 2523 krping_format_send(cb, cb->start_dma_addr); 2524 if (cb->state == ERROR) { 2525 PRINTF(cb, "krping_format_send failed\n"); 2526 return; 2527 } 2528 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr); 2529 if (ret) { 2530 PRINTF(cb, "post send error %d\n", ret); 2531 return; 2532 } 2533 2534 /* Spin waiting for send completion */ 2535 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0)); 2536 if (ret < 0) { 2537 PRINTF(cb, "poll error %d\n", ret); 2538 return; 2539 } 2540 if (wc.status) { 2541 PRINTF(cb, "send completion error %d\n", wc.status); 2542 return; 2543 } 2544 2545 /* Spin waiting for server's Start STAG/TO/Len */ 2546 while (cb->state < RDMA_WRITE_ADV) { 2547 krping_cq_event_handler(cb->cq, cb); 2548 } 2549 2550 wlat_test(cb); 2551 } 2552 2553 static void krping_bw_test_client(struct krping_cb *cb) 2554 { 2555 struct ib_send_wr *bad_wr; 2556 struct ib_wc wc; 2557 int ret; 2558 2559 cb->state = RDMA_READ_ADV; 2560 2561 /* Send STAG/TO/Len to client */ 2562 krping_format_send(cb, cb->start_dma_addr); 2563 if (cb->state == ERROR) { 2564 PRINTF(cb, "krping_format_send failed\n"); 2565 return; 2566 } 2567 ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr); 2568 if (ret) { 2569 PRINTF(cb, "post send error %d\n", ret); 2570 return; 2571 } 2572 2573 /* Spin waiting for send completion */ 2574 while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0)); 2575 if (ret < 0) { 2576 PRINTF(cb, "poll error %d\n", ret); 2577 return; 2578 } 2579 if (wc.status) { 2580 PRINTF(cb, "send completion error %d\n", wc.status); 2581 return; 2582 } 2583 2584 /* Spin waiting for server's Start STAG/TO/Len */ 2585 while (cb->state < RDMA_WRITE_ADV) { 2586 krping_cq_event_handler(cb->cq, cb); 2587 } 2588 2589 bw_test(cb); 2590 } 2591 2592 2593 /* 2594 * fastreg 2 valid different mrs and verify the completions. 2595 */ 2596 static void krping_fr_test1(struct krping_cb *cb) 2597 { 2598 struct ib_fast_reg_page_list *pl; 2599 struct ib_send_wr fr, *bad; 2600 struct ib_wc wc; 2601 struct ib_mr *mr1, *mr2; 2602 int i; 2603 int ret; 2604 int size = cb->size; 2605 int plen = (((size - 1) & PAGE_MASK) + PAGE_SIZE) >> PAGE_SHIFT; 2606 int count = 0; 2607 2608 pl = ib_alloc_fast_reg_page_list(cb->qp->device, plen); 2609 if (IS_ERR(pl)) { 2610 PRINTF(cb, "ib_alloc_fast_reg_page_list failed %ld\n", PTR_ERR(pl)); 2611 return; 2612 } 2613 2614 mr1 = ib_alloc_fast_reg_mr(cb->pd, plen); 2615 if (IS_ERR(mr1)) { 2616 PRINTF(cb, "ib_alloc_fast_reg_mr failed %ld\n", PTR_ERR(pl)); 2617 goto err1; 2618 } 2619 mr2 = ib_alloc_fast_reg_mr(cb->pd, plen); 2620 if (IS_ERR(mr2)) { 2621 PRINTF(cb, "ib_alloc_fast_reg_mr failed %ld\n", PTR_ERR(pl)); 2622 goto err2; 2623 } 2624 2625 2626 for (i=0; i<plen; i++) 2627 pl->page_list[i] = i * PAGE_SIZE; 2628 2629 memset(&fr, 0, sizeof fr); 2630 fr.opcode = IB_WR_FAST_REG_MR; 2631 fr.wr_id = 1; 2632 fr.wr.fast_reg.page_shift = PAGE_SHIFT; 2633 fr.wr.fast_reg.length = size; 2634 fr.wr.fast_reg.page_list = pl; 2635 fr.wr.fast_reg.page_list_len = plen; 2636 fr.wr.fast_reg.iova_start = 0; 2637 fr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE; 2638 fr.send_flags = IB_SEND_SIGNALED; 2639 fr.wr.fast_reg.rkey = mr1->rkey; 2640 DEBUG_LOG(cb, "%s fr1: stag 0x%x plen %u size %u depth %u\n", __func__, fr.wr.fast_reg.rkey, plen, cb->size, cb->txdepth); 2641 ret = ib_post_send(cb->qp, &fr, &bad); 2642 if (ret) { 2643 PRINTF(cb, "ib_post_send failed %d\n", ret); 2644 goto err3; 2645 } 2646 fr.wr.fast_reg.rkey = mr2->rkey; 2647 DEBUG_LOG(cb, "%s fr2: stag 0x%x plen %u size %u depth %u\n", __func__, fr.wr.fast_reg.rkey, plen, cb->size, cb->txdepth); 2648 ret = ib_post_send(cb->qp, &fr, &bad); 2649 if (ret) { 2650 PRINTF(cb, "ib_post_send failed %d\n", ret); 2651 goto err3; 2652 } 2653 2654 DEBUG_LOG(cb, "sleeping 1 second\n"); 2655 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ); 2656 do { 2657 ret = ib_poll_cq(cb->cq, 1, &wc); 2658 if (ret < 0) { 2659 PRINTF(cb, "ib_poll_cq failed %d\n", ret); 2660 goto err3; 2661 } 2662 if (ret == 1) { 2663 DEBUG_LOG(cb, "completion status %u wr %s\n", 2664 wc.status, wc.wr_id == 1 ? "fr" : "inv"); 2665 count++; 2666 } else if (krping_sigpending()) { 2667 PRINTF(cb, "signal!\n"); 2668 goto err3; 2669 } 2670 2671 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ); 2672 } while (count != 2); 2673 err3: 2674 DEBUG_LOG(cb, "sleeping 1 second\n"); 2675 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ); 2676 DEBUG_LOG(cb, "draining the cq...\n"); 2677 do { 2678 ret = ib_poll_cq(cb->cq, 1, &wc); 2679 if (ret < 0) { 2680 PRINTF(cb, "ib_poll_cq failed %d\n", ret); 2681 break; 2682 } 2683 if (ret == 1) { 2684 PRINTF(cb, "completion %u opcode %u\n", wc.status, wc.opcode); 2685 } 2686 } while (ret == 1); 2687 DEBUG_LOG(cb, "destroying fr mr2!\n"); 2688 2689 ib_dereg_mr(mr2); 2690 err2: 2691 DEBUG_LOG(cb, "destroying fr mr1!\n"); 2692 ib_dereg_mr(mr1); 2693 err1: 2694 DEBUG_LOG(cb, "destroying fr page list!\n"); 2695 ib_free_fast_reg_page_list(pl); 2696 DEBUG_LOG(cb, "%s done!\n", __func__); 2697 } 2698 2699 /* 2700 * fastreg the same mr twice, 2nd one should produce error cqe. 2701 */ 2702 static void krping_fr_test2(struct krping_cb *cb) 2703 { 2704 struct ib_fast_reg_page_list *pl; 2705 struct ib_send_wr fr, *bad; 2706 struct ib_wc wc; 2707 struct ib_mr *mr1; 2708 int i; 2709 int ret; 2710 int size = cb->size; 2711 int plen = (((size - 1) & PAGE_MASK) + PAGE_SIZE) >> PAGE_SHIFT; 2712 int count = 0; 2713 2714 pl = ib_alloc_fast_reg_page_list(cb->qp->device, plen); 2715 if (IS_ERR(pl)) { 2716 PRINTF(cb, "ib_alloc_fast_reg_page_list failed %ld\n", PTR_ERR(pl)); 2717 return; 2718 } 2719 2720 mr1 = ib_alloc_fast_reg_mr(cb->pd, plen); 2721 if (IS_ERR(mr1)) { 2722 PRINTF(cb, "ib_alloc_fast_reg_mr failed %ld\n", PTR_ERR(pl)); 2723 goto err1; 2724 } 2725 2726 for (i=0; i<plen; i++) 2727 pl->page_list[i] = i * PAGE_SIZE; 2728 2729 memset(&fr, 0, sizeof fr); 2730 fr.opcode = IB_WR_FAST_REG_MR; 2731 fr.wr_id = 1; 2732 fr.wr.fast_reg.page_shift = PAGE_SHIFT; 2733 fr.wr.fast_reg.length = size; 2734 fr.wr.fast_reg.page_list = pl; 2735 fr.wr.fast_reg.page_list_len = plen; 2736 fr.wr.fast_reg.iova_start = 0; 2737 fr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE; 2738 fr.send_flags = IB_SEND_SIGNALED; 2739 fr.wr.fast_reg.rkey = mr1->rkey; 2740 DEBUG_LOG(cb, "%s fr1: stag 0x%x plen %u size %u depth %u\n", __func__, fr.wr.fast_reg.rkey, plen, cb->size, cb->txdepth); 2741 ret = ib_post_send(cb->qp, &fr, &bad); 2742 if (ret) { 2743 PRINTF(cb, "ib_post_send failed %d\n", ret); 2744 goto err3; 2745 } 2746 DEBUG_LOG(cb, "%s fr2: stag 0x%x plen %u size %u depth %u\n", __func__, fr.wr.fast_reg.rkey, plen, cb->size, cb->txdepth); 2747 ret = ib_post_send(cb->qp, &fr, &bad); 2748 if (ret) { 2749 PRINTF(cb, "ib_post_send failed %d\n", ret); 2750 goto err3; 2751 } 2752 2753 DEBUG_LOG(cb, "sleeping 1 second\n"); 2754 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ); 2755 do { 2756 ret = ib_poll_cq(cb->cq, 1, &wc); 2757 if (ret < 0) { 2758 PRINTF(cb, "ib_poll_cq failed %d\n", ret); 2759 goto err3; 2760 } 2761 if (ret == 1) { 2762 DEBUG_LOG(cb, "completion status %u wr %s\n", 2763 wc.status, wc.wr_id == 1 ? "fr" : "inv"); 2764 count++; 2765 } else if (krping_sigpending()) { 2766 PRINTF(cb, "signal!\n"); 2767 goto err3; 2768 } 2769 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ); 2770 } while (count != 2); 2771 err3: 2772 DEBUG_LOG(cb, "sleeping 1 second\n"); 2773 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ); 2774 DEBUG_LOG(cb, "draining the cq...\n"); 2775 do { 2776 ret = ib_poll_cq(cb->cq, 1, &wc); 2777 if (ret < 0) { 2778 PRINTF(cb, "ib_poll_cq failed %d\n", ret); 2779 break; 2780 } 2781 if (ret == 1) { 2782 PRINTF(cb, "completion %u opcode %u\n", wc.status, wc.opcode); 2783 } 2784 } while (ret == 1); 2785 DEBUG_LOG(cb, "destroying fr mr1!\n"); 2786 ib_dereg_mr(mr1); 2787 err1: 2788 DEBUG_LOG(cb, "destroying fr page list!\n"); 2789 ib_free_fast_reg_page_list(pl); 2790 DEBUG_LOG(cb, "%s done!\n", __func__); 2791 } 2792 2793 /* 2794 * fastreg pipelined in a loop as fast as we can until the user interrupts. 2795 * NOTE: every 9 seconds we sleep for 1 second to keep the kernel happy. 2796 */ 2797 static void krping_fr_test3(struct krping_cb *cb) 2798 { 2799 struct ib_fast_reg_page_list *pl; 2800 struct ib_send_wr fr, inv, *bad; 2801 struct ib_wc wc; 2802 u8 key = 0; 2803 struct ib_mr *mr; 2804 int i; 2805 int ret; 2806 int size = cb->size; 2807 int plen = (((size - 1) & PAGE_MASK) + PAGE_SIZE) >> PAGE_SHIFT; 2808 unsigned long start; 2809 int count = 0; 2810 int scnt = 0; 2811 2812 2813 pl = ib_alloc_fast_reg_page_list(cb->qp->device, plen); 2814 if (IS_ERR(pl)) { 2815 PRINTF(cb, "ib_alloc_fast_reg_page_list failed %ld\n", PTR_ERR(pl)); 2816 return; 2817 } 2818 2819 mr = ib_alloc_fast_reg_mr(cb->pd, plen); 2820 if (IS_ERR(mr)) { 2821 PRINTF(cb, "ib_alloc_fast_reg_mr failed %ld\n", PTR_ERR(pl)); 2822 goto err1; 2823 } 2824 2825 for (i=0; i<plen; i++) 2826 pl->page_list[i] = i * PAGE_SIZE; 2827 2828 memset(&fr, 0, sizeof fr); 2829 fr.opcode = IB_WR_FAST_REG_MR; 2830 fr.wr.fast_reg.page_shift = PAGE_SHIFT; 2831 fr.wr.fast_reg.length = size; 2832 fr.wr.fast_reg.page_list = pl; 2833 fr.wr.fast_reg.page_list_len = plen; 2834 fr.wr.fast_reg.iova_start = 0; 2835 fr.send_flags = IB_SEND_SIGNALED; 2836 fr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE; 2837 fr.next = &inv; 2838 memset(&inv, 0, sizeof inv); 2839 inv.opcode = IB_WR_LOCAL_INV; 2840 inv.send_flags = IB_SEND_SIGNALED; 2841 2842 DEBUG_LOG(cb, "fr_test: stag index 0x%x plen %u size %u depth %u\n", mr->rkey >> 8, plen, cb->size, cb->txdepth); 2843 start = time_uptime; 2844 while (1) { 2845 if ((time_uptime - start) >= 9) { 2846 DEBUG_LOG(cb, "fr_test: pausing 1 second! count %u latest size %u plen %u\n", count, size, plen); 2847 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ); 2848 if (cb->state == ERROR) 2849 break; 2850 start = time_uptime; 2851 } 2852 while (scnt < (cb->txdepth>>1)) { 2853 ib_update_fast_reg_key(mr, ++key); 2854 fr.wr.fast_reg.rkey = mr->rkey; 2855 inv.ex.invalidate_rkey = mr->rkey; 2856 size = arc4random() % cb->size; 2857 if (size == 0) 2858 size = cb->size; 2859 plen = (((size - 1) & PAGE_MASK) + PAGE_SIZE) >> PAGE_SHIFT; 2860 fr.wr.fast_reg.length = size; 2861 fr.wr.fast_reg.page_list_len = plen; 2862 ret = ib_post_send(cb->qp, &fr, &bad); 2863 if (ret) { 2864 PRINTF(cb, "ib_post_send failed %d\n", ret); 2865 goto err2; 2866 } 2867 scnt+=2; 2868 } 2869 2870 do { 2871 ret = ib_poll_cq(cb->cq, 1, &wc); 2872 if (ret < 0) { 2873 PRINTF(cb, "ib_poll_cq failed %d\n", ret); 2874 goto err2; 2875 } 2876 if (ret == 1) { 2877 if (wc.status) { 2878 PRINTF(cb, "completion error %u\n", wc.status); 2879 goto err2; 2880 } 2881 count++; 2882 scnt--; 2883 } 2884 else if (krping_sigpending()) { 2885 PRINTF(cb, "signal!\n"); 2886 goto err2; 2887 } 2888 } while (ret == 1); 2889 } 2890 err2: 2891 DEBUG_LOG(cb, "sleeping 1 second\n"); 2892 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ); 2893 DEBUG_LOG(cb, "draining the cq...\n"); 2894 do { 2895 ret = ib_poll_cq(cb->cq, 1, &wc); 2896 if (ret < 0) { 2897 PRINTF(cb, "ib_poll_cq failed %d\n", ret); 2898 break; 2899 } 2900 if (ret == 1) { 2901 if (wc.status) { 2902 PRINTF(cb, "completion error %u opcode %u\n", wc.status, wc.opcode); 2903 } 2904 } 2905 } while (ret == 1); 2906 DEBUG_LOG(cb, "fr_test: done!\n"); 2907 ib_dereg_mr(mr); 2908 err1: 2909 DEBUG_LOG(cb, "destroying fr page list!\n"); 2910 ib_free_fast_reg_page_list(pl); 2911 DEBUG_LOG(cb, "%s done!\n", __func__); 2912 } 2913 2914 /* 2915 * fastreg 1 and invalidate 1 mr and verify completion. 2916 */ 2917 static void krping_fr_test4(struct krping_cb *cb) 2918 { 2919 struct ib_fast_reg_page_list *pl; 2920 struct ib_send_wr fr, inv, *bad; 2921 struct ib_wc wc; 2922 struct ib_mr *mr1; 2923 int i; 2924 int ret; 2925 int size = cb->size; 2926 int plen = (((size - 1) & PAGE_MASK) + PAGE_SIZE) >> PAGE_SHIFT; 2927 int count = 0; 2928 2929 pl = ib_alloc_fast_reg_page_list(cb->qp->device, plen); 2930 if (IS_ERR(pl)) { 2931 PRINTF(cb, "ib_alloc_fast_reg_page_list failed %ld\n", PTR_ERR(pl)); 2932 return; 2933 } 2934 2935 mr1 = ib_alloc_fast_reg_mr(cb->pd, plen); 2936 if (IS_ERR(mr1)) { 2937 PRINTF(cb, "ib_alloc_fast_reg_mr failed %ld\n", PTR_ERR(pl)); 2938 goto err1; 2939 } 2940 2941 for (i=0; i<plen; i++) 2942 pl->page_list[i] = i * PAGE_SIZE; 2943 2944 memset(&fr, 0, sizeof fr); 2945 fr.opcode = IB_WR_FAST_REG_MR; 2946 fr.wr_id = 1; 2947 fr.wr.fast_reg.page_shift = PAGE_SHIFT; 2948 fr.wr.fast_reg.length = size; 2949 fr.wr.fast_reg.page_list = pl; 2950 fr.wr.fast_reg.page_list_len = plen; 2951 fr.wr.fast_reg.iova_start = 0; 2952 fr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE; 2953 fr.send_flags = IB_SEND_SIGNALED; 2954 fr.wr.fast_reg.rkey = mr1->rkey; 2955 fr.next = &inv; 2956 memset(&inv, 0, sizeof inv); 2957 inv.opcode = IB_WR_LOCAL_INV; 2958 inv.ex.invalidate_rkey = mr1->rkey; 2959 2960 DEBUG_LOG(cb, "%s fr1: stag 0x%x plen %u size %u depth %u\n", __func__, fr.wr.fast_reg.rkey, plen, cb->size, cb->txdepth); 2961 ret = ib_post_send(cb->qp, &fr, &bad); 2962 if (ret) { 2963 PRINTF(cb, "ib_post_send failed %d\n", ret); 2964 goto err3; 2965 } 2966 DEBUG_LOG(cb, "sleeping 1 second\n"); 2967 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ); 2968 do { 2969 ret = ib_poll_cq(cb->cq, 1, &wc); 2970 if (ret < 0) { 2971 PRINTF(cb, "ib_poll_cq failed %d\n", ret); 2972 goto err3; 2973 } 2974 if (ret == 1) { 2975 DEBUG_LOG(cb, "completion status %u wr %s\n", 2976 wc.status, wc.wr_id == 1 ? "fr" : "inv"); 2977 count++; 2978 } else if (krping_sigpending()) { 2979 PRINTF(cb, "signal!\n"); 2980 goto err3; 2981 } 2982 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ); 2983 } while (count != 1); 2984 err3: 2985 DEBUG_LOG(cb, "sleeping 1 second\n"); 2986 wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ); 2987 DEBUG_LOG(cb, "draining the cq...\n"); 2988 do { 2989 ret = ib_poll_cq(cb->cq, 1, &wc); 2990 if (ret < 0) { 2991 PRINTF(cb, "ib_poll_cq failed %d\n", ret); 2992 break; 2993 } 2994 if (ret == 1) { 2995 PRINTF(cb, "completion %u opcode %u\n", wc.status, wc.opcode); 2996 } 2997 } while (ret == 1); 2998 DEBUG_LOG(cb, "destroying fr mr1!\n"); 2999 ib_dereg_mr(mr1); 3000 err1: 3001 DEBUG_LOG(cb, "destroying fr page list!\n"); 3002 ib_free_fast_reg_page_list(pl); 3003 DEBUG_LOG(cb, "%s done!\n", __func__); 3004 } 3005 3006 static void krping_fr_test(struct krping_cb *cb) 3007 { 3008 switch (cb->testnum) { 3009 case 1: 3010 krping_fr_test1(cb); 3011 break; 3012 case 2: 3013 krping_fr_test2(cb); 3014 break; 3015 case 3: 3016 krping_fr_test3(cb); 3017 break; 3018 case 4: 3019 krping_fr_test4(cb); 3020 break; 3021 case 5: 3022 krping_fr_test5_client(cb); 3023 break; 3024 case 6: 3025 krping_fr_test6_client(cb); 3026 break; 3027 default: 3028 PRINTF(cb, "Unkown frtest num %u\n", cb->testnum); 3029 break; 3030 } 3031 } 3032 3033 static int krping_connect_client(struct krping_cb *cb) 3034 { 3035 struct rdma_conn_param conn_param; 3036 int ret; 3037 3038 memset(&conn_param, 0, sizeof conn_param); 3039 conn_param.responder_resources = 1; 3040 conn_param.initiator_depth = 1; 3041 conn_param.retry_count = 10; 3042 3043 ret = rdma_connect(cb->cm_id, &conn_param); 3044 if (ret) { 3045 PRINTF(cb, "rdma_connect error %d\n", ret); 3046 return ret; 3047 } 3048 3049 wait_event_interruptible(cb->sem, cb->state >= CONNECTED); 3050 if (cb->state == ERROR) { 3051 PRINTF(cb, "wait for CONNECTED state %d\n", cb->state); 3052 return -1; 3053 } 3054 3055 DEBUG_LOG(cb, "rdma_connect successful\n"); 3056 return 0; 3057 } 3058 3059 static int krping_bind_client(struct krping_cb *cb) 3060 { 3061 struct sockaddr_in sin; 3062 int ret; 3063 3064 memset(&sin, 0, sizeof(sin)); 3065 sin.sin_len = sizeof sin; 3066 sin.sin_family = AF_INET; 3067 sin.sin_addr.s_addr = cb->addr.s_addr; 3068 sin.sin_port = cb->port; 3069 3070 ret = rdma_resolve_addr(cb->cm_id, NULL, (struct sockaddr *) &sin, 3071 2000); 3072 if (ret) { 3073 PRINTF(cb, "rdma_resolve_addr error %d\n", ret); 3074 return ret; 3075 } 3076 3077 wait_event_interruptible(cb->sem, cb->state >= ROUTE_RESOLVED); 3078 if (cb->state != ROUTE_RESOLVED) { 3079 PRINTF(cb, 3080 "addr/route resolution did not resolve: state %d\n", 3081 cb->state); 3082 return -EINTR; 3083 } 3084 3085 if (cb->mem == FASTREG && !fastreg_supported(cb, 0)) 3086 return -EINVAL; 3087 3088 DEBUG_LOG(cb, "rdma_resolve_addr - rdma_resolve_route successful\n"); 3089 return 0; 3090 } 3091 3092 static void krping_run_client(struct krping_cb *cb) 3093 { 3094 struct ib_recv_wr *bad_wr; 3095 int ret; 3096 3097 ret = krping_bind_client(cb); 3098 if (ret) 3099 return; 3100 3101 ret = krping_setup_qp(cb, cb->cm_id); 3102 if (ret) { 3103 PRINTF(cb, "setup_qp failed: %d\n", ret); 3104 return; 3105 } 3106 3107 ret = krping_setup_buffers(cb); 3108 if (ret) { 3109 PRINTF(cb, "krping_setup_buffers failed: %d\n", ret); 3110 goto err1; 3111 } 3112 3113 ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr); 3114 if (ret) { 3115 PRINTF(cb, "ib_post_recv failed: %d\n", ret); 3116 goto err2; 3117 } 3118 3119 ret = krping_connect_client(cb); 3120 if (ret) { 3121 PRINTF(cb, "connect error %d\n", ret); 3122 goto err2; 3123 } 3124 3125 if (cb->wlat) 3126 krping_wlat_test_client(cb); 3127 else if (cb->rlat) 3128 krping_rlat_test_client(cb); 3129 else if (cb->bw) 3130 krping_bw_test_client(cb); 3131 else if (cb->frtest) 3132 krping_fr_test(cb); 3133 else 3134 krping_test_client(cb); 3135 rdma_disconnect(cb->cm_id); 3136 err2: 3137 krping_free_buffers(cb); 3138 err1: 3139 krping_free_qp(cb); 3140 } 3141 3142 int krping_doit(char *cmd, void *cookie) 3143 { 3144 struct krping_cb *cb; 3145 int op; 3146 int ret = 0; 3147 char *optarg; 3148 unsigned long optint; 3149 3150 cb = kzalloc(sizeof(*cb), GFP_KERNEL); 3151 if (!cb) 3152 return -ENOMEM; 3153 3154 mutex_lock(&krping_mutex); 3155 list_add_tail(&cb->list, &krping_cbs); 3156 mutex_unlock(&krping_mutex); 3157 3158 cb->cookie = cookie; 3159 cb->server = -1; 3160 cb->state = IDLE; 3161 cb->size = 64; 3162 cb->txdepth = RPING_SQ_DEPTH; 3163 cb->mem = DMA; 3164 init_waitqueue_head(&cb->sem); 3165 3166 while ((op = krping_getopt("krping", &cmd, krping_opts, NULL, &optarg, 3167 &optint)) != 0) { 3168 switch (op) { 3169 case 'a': 3170 cb->addr_str = optarg; 3171 DEBUG_LOG(cb, "ipaddr (%s)\n", optarg); 3172 if (!inet_aton(optarg, &cb->addr)) { 3173 PRINTF(cb, "bad addr string %s\n", 3174 optarg); 3175 ret = EINVAL; 3176 } 3177 break; 3178 case 'p': 3179 cb->port = htons(optint); 3180 DEBUG_LOG(cb, "port %d\n", (int)optint); 3181 break; 3182 case 'P': 3183 cb->poll = 1; 3184 DEBUG_LOG(cb, "server\n"); 3185 break; 3186 case 's': 3187 cb->server = 1; 3188 DEBUG_LOG(cb, "server\n"); 3189 break; 3190 case 'c': 3191 cb->server = 0; 3192 DEBUG_LOG(cb, "client\n"); 3193 break; 3194 case 'S': 3195 cb->size = optint; 3196 if ((cb->size < 1) || 3197 (cb->size > RPING_BUFSIZE)) { 3198 PRINTF(cb, "Invalid size %d " 3199 "(valid range is 1 to %d)\n", 3200 cb->size, RPING_BUFSIZE); 3201 ret = EINVAL; 3202 } else 3203 DEBUG_LOG(cb, "size %d\n", (int)optint); 3204 break; 3205 case 'C': 3206 cb->count = optint; 3207 if (cb->count < 0) { 3208 PRINTF(cb, "Invalid count %d\n", 3209 cb->count); 3210 ret = EINVAL; 3211 } else 3212 DEBUG_LOG(cb, "count %d\n", (int) cb->count); 3213 break; 3214 case 'v': 3215 cb->verbose++; 3216 DEBUG_LOG(cb, "verbose\n"); 3217 break; 3218 case 'V': 3219 cb->validate++; 3220 DEBUG_LOG(cb, "validate data\n"); 3221 break; 3222 case 'l': 3223 cb->wlat++; 3224 break; 3225 case 'L': 3226 cb->rlat++; 3227 break; 3228 case 'B': 3229 cb->bw++; 3230 break; 3231 case 'd': 3232 cb->duplex++; 3233 break; 3234 case 'm': 3235 if (!strncmp(optarg, "dma", 3)) 3236 cb->mem = DMA; 3237 else if (!strncmp(optarg, "fastreg", 7)) 3238 cb->mem = FASTREG; 3239 else if (!strncmp(optarg, "mw", 2)) 3240 cb->mem = MW; 3241 else if (!strncmp(optarg, "mr", 2)) 3242 cb->mem = MR; 3243 else { 3244 PRINTF(cb, "unknown mem mode %s. " 3245 "Must be dma, fastreg, mw, or mr\n", 3246 optarg); 3247 ret = -EINVAL; 3248 break; 3249 } 3250 break; 3251 case 'I': 3252 cb->server_invalidate = 1; 3253 break; 3254 case 'T': 3255 cb->txdepth = optint; 3256 DEBUG_LOG(cb, "txdepth %d\n", (int) cb->txdepth); 3257 break; 3258 case 'Z': 3259 cb->local_dma_lkey = 1; 3260 DEBUG_LOG(cb, "using local dma lkey\n"); 3261 break; 3262 case 'R': 3263 cb->read_inv = 1; 3264 DEBUG_LOG(cb, "using read-with-inv\n"); 3265 break; 3266 case 'f': 3267 cb->frtest = 1; 3268 cb->testnum = optint; 3269 DEBUG_LOG(cb, "fast-reg test!\n"); 3270 break; 3271 default: 3272 PRINTF(cb, "unknown opt %s\n", optarg); 3273 ret = -EINVAL; 3274 break; 3275 } 3276 } 3277 if (ret) 3278 goto out; 3279 3280 if (cb->server == -1) { 3281 PRINTF(cb, "must be either client or server\n"); 3282 ret = -EINVAL; 3283 goto out; 3284 } 3285 3286 if ((cb->frtest + cb->bw + cb->rlat + cb->wlat) > 1) { 3287 PRINTF(cb, "Pick only one test: fr, bw, rlat, wlat\n"); 3288 ret = -EINVAL; 3289 goto out; 3290 } 3291 if (cb->server_invalidate && cb->mem != FASTREG) { 3292 PRINTF(cb, "server_invalidate only valid with fastreg mem_mode\n"); 3293 ret = -EINVAL; 3294 goto out; 3295 } 3296 3297 if (cb->read_inv && cb->mem != FASTREG) { 3298 PRINTF(cb, "read_inv only valid with fastreg mem_mode\n"); 3299 ret = -EINVAL; 3300 goto out; 3301 } 3302 3303 if (cb->mem != MR && (cb->wlat || cb->rlat || cb->bw || cb->frtest)) { 3304 PRINTF(cb, "wlat, rlat, and bw tests only support mem_mode MR\n"); 3305 ret = -EINVAL; 3306 goto out; 3307 } 3308 3309 cb->cm_id = rdma_create_id(krping_cma_event_handler, cb, RDMA_PS_TCP, IB_QPT_RC); 3310 if (IS_ERR(cb->cm_id)) { 3311 ret = PTR_ERR(cb->cm_id); 3312 PRINTF(cb, "rdma_create_id error %d\n", ret); 3313 goto out; 3314 } 3315 DEBUG_LOG(cb, "created cm_id %p\n", cb->cm_id); 3316 3317 if (cb->server) 3318 krping_run_server(cb); 3319 else 3320 krping_run_client(cb); 3321 3322 DEBUG_LOG(cb, "destroy cm_id %p\n", cb->cm_id); 3323 rdma_destroy_id(cb->cm_id); 3324 out: 3325 mutex_lock(&krping_mutex); 3326 list_del(&cb->list); 3327 mutex_unlock(&krping_mutex); 3328 kfree(cb); 3329 return ret; 3330 } 3331 3332 void 3333 krping_walk_cb_list(void (*f)(struct krping_stats *, void *), void *arg) 3334 { 3335 struct krping_cb *cb; 3336 3337 mutex_lock(&krping_mutex); 3338 list_for_each_entry(cb, &krping_cbs, list) 3339 (*f)(cb->pd ? &cb->stats : NULL, arg); 3340 mutex_unlock(&krping_mutex); 3341 } 3342 3343 void krping_init(void) 3344 { 3345 3346 mutex_init(&krping_mutex); 3347 } 3348