xref: /freebsd/sys/contrib/rdma/krping/krping.c (revision 82397d791966b09d344251bc709cd9db2b3a1902)
1 /*
2  * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3  * Copyright (c) 2006-2009 Open Grid Computing, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <linux/module.h>
38 #include <linux/moduleparam.h>
39 #include <linux/slab.h>
40 #include <linux/err.h>
41 #include <linux/string.h>
42 #include <linux/list.h>
43 #include <linux/in.h>
44 #include <linux/device.h>
45 #include <linux/pci.h>
46 #include <linux/sched.h>
47 #include <linux/wait.h>
48 
49 #include <asm/atomic.h>
50 
51 #include <rdma/ib_verbs.h>
52 #include <rdma/rdma_cm.h>
53 
54 #include "krping.h"
55 #include "getopt.h"
56 
57 #define PFX "krping: "
58 
59 extern int krping_debug;
60 #define DEBUG_LOG(...) do { if (krping_debug) log(LOG_INFO, __VA_ARGS__); } while (0)
61 #define BIND_INFO 1
62 
63 MODULE_AUTHOR("Steve Wise");
64 MODULE_DESCRIPTION("RDMA ping server");
65 MODULE_LICENSE("Dual BSD/GPL");
66 MODULE_VERSION(krping, 1);
67 MODULE_DEPEND(krping, linuxkpi, 1, 1, 1);
68 
69 static __inline uint64_t
70 get_cycles(void)
71 {
72 	uint32_t low, high;
73 	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
74 	return (low | ((u_int64_t)high << 32));
75 }
76 
77 typedef uint64_t cycles_t;
78 
79 enum mem_type {
80 	DMA = 1,
81 	REG = 2,
82 };
83 
84 static const struct krping_option krping_opts[] = {
85 	{"count", OPT_INT, 'C'},
86 	{"size", OPT_INT, 'S'},
87 	{"addr", OPT_STRING, 'a'},
88 	{"addr6", OPT_STRING, 'A'},
89 	{"port", OPT_INT, 'p'},
90 	{"verbose", OPT_NOPARAM, 'v'},
91 	{"validate", OPT_NOPARAM, 'V'},
92 	{"server", OPT_NOPARAM, 's'},
93 	{"client", OPT_NOPARAM, 'c'},
94 	{"server_inv", OPT_NOPARAM, 'I'},
95  	{"wlat", OPT_NOPARAM, 'l'},
96  	{"rlat", OPT_NOPARAM, 'L'},
97  	{"bw", OPT_NOPARAM, 'B'},
98  	{"duplex", OPT_NOPARAM, 'd'},
99 	{"tos", OPT_INT, 't'},
100  	{"txdepth", OPT_INT, 'T'},
101  	{"poll", OPT_NOPARAM, 'P'},
102  	{"local_dma_lkey", OPT_NOPARAM, 'Z'},
103  	{"read_inv", OPT_NOPARAM, 'R'},
104  	{"fr", OPT_NOPARAM, 'f'},
105 	{NULL, 0, 0}
106 };
107 
108 #define htonll(x) cpu_to_be64((x))
109 #define ntohll(x) cpu_to_be64((x))
110 
111 static DEFINE_MUTEX(krping_mutex);
112 
113 /*
114  * List of running krping threads.
115  */
116 static LIST_HEAD(krping_cbs);
117 
118 /*
119  * Invoke like this, one on each side, using the server's address on
120  * the RDMA device (iw%d):
121  *
122  * /bin/echo server,port=9999,addr=192.168.69.142,validate > /proc/krping
123  * /bin/echo client,port=9999,addr=192.168.69.142,validate > /proc/krping
124  * /bin/echo client,port=9999,addr6=2001:db8:0:f101::1,validate > /proc/krping
125  *
126  * krping "ping/pong" loop:
127  * 	client sends source rkey/addr/len
128  *	server receives source rkey/add/len
129  *	server rdma reads "ping" data from source
130  * 	server sends "go ahead" on rdma read completion
131  *	client sends sink rkey/addr/len
132  * 	server receives sink rkey/addr/len
133  * 	server rdma writes "pong" data to sink
134  * 	server sends "go ahead" on rdma write completion
135  * 	<repeat loop>
136  */
137 
138 /*
139  * These states are used to signal events between the completion handler
140  * and the main client or server thread.
141  *
142  * Once CONNECTED, they cycle through RDMA_READ_ADV, RDMA_WRITE_ADV,
143  * and RDMA_WRITE_COMPLETE for each ping.
144  */
145 enum test_state {
146 	IDLE = 1,
147 	CONNECT_REQUEST,
148 	ADDR_RESOLVED,
149 	ROUTE_RESOLVED,
150 	CONNECTED,
151 	RDMA_READ_ADV,
152 	RDMA_READ_COMPLETE,
153 	RDMA_WRITE_ADV,
154 	RDMA_WRITE_COMPLETE,
155 	ERROR
156 };
157 
158 struct krping_rdma_info {
159 	uint64_t buf;
160 	uint32_t rkey;
161 	uint32_t size;
162 };
163 
164 /*
165  * Default max buffer size for IO...
166  */
167 #define RPING_BUFSIZE 128*1024
168 #define RPING_SQ_DEPTH 64
169 
170 /*
171  * Control block struct.
172  */
173 struct krping_cb {
174 	int server;			/* 0 iff client */
175 	struct ib_cq *cq;
176 	struct ib_pd *pd;
177 	struct ib_qp *qp;
178 
179 	struct ib_mr *dma_mr;
180 
181 	struct ib_fast_reg_page_list *page_list;
182 	int page_list_len;
183 	struct ib_reg_wr reg_mr_wr;
184 	struct ib_send_wr invalidate_wr;
185 	struct ib_mr *reg_mr;
186 	int server_invalidate;
187 	int read_inv;
188 	u8 key;
189 
190 	struct ib_recv_wr rq_wr;	/* recv work request record */
191 	struct ib_sge recv_sgl;		/* recv single SGE */
192 	struct krping_rdma_info recv_buf __aligned(16);	/* malloc'd buffer */
193 	u64 recv_dma_addr;
194 	DECLARE_PCI_UNMAP_ADDR(recv_mapping)
195 
196 	struct ib_send_wr sq_wr;	/* send work requrest record */
197 	struct ib_sge send_sgl;
198 	struct krping_rdma_info send_buf __aligned(16); /* single send buf */
199 	u64 send_dma_addr;
200 	DECLARE_PCI_UNMAP_ADDR(send_mapping)
201 
202 	struct ib_rdma_wr rdma_sq_wr;	/* rdma work request record */
203 	struct ib_sge rdma_sgl;		/* rdma single SGE */
204 	char *rdma_buf;			/* used as rdma sink */
205 	u64  rdma_dma_addr;
206 	DECLARE_PCI_UNMAP_ADDR(rdma_mapping)
207 	struct ib_mr *rdma_mr;
208 
209 	uint32_t remote_rkey;		/* remote guys RKEY */
210 	uint64_t remote_addr;		/* remote guys TO */
211 	uint32_t remote_len;		/* remote guys LEN */
212 
213 	char *start_buf;		/* rdma read src */
214 	u64  start_dma_addr;
215 	DECLARE_PCI_UNMAP_ADDR(start_mapping)
216 	struct ib_mr *start_mr;
217 
218 	enum test_state state;		/* used for cond/signalling */
219 	wait_queue_head_t sem;
220 	struct krping_stats stats;
221 
222 	uint16_t port;			/* dst port in NBO */
223 	u8 addr[16] __aligned(8);	/* dst addr in NBO */
224 	char *addr_str;			/* dst addr string */
225 	uint8_t addr_type;		/* ADDR_FAMILY - IPv4/V6 */
226 	int verbose;			/* verbose logging */
227 	int count;			/* ping count */
228 	int size;			/* ping data size */
229 	int validate;			/* validate ping data */
230 	int wlat;			/* run wlat test */
231 	int rlat;			/* run rlat test */
232 	int bw;				/* run bw test */
233 	int duplex;			/* run bw full duplex test */
234 	int poll;			/* poll or block for rlat test */
235 	int txdepth;			/* SQ depth */
236 	int local_dma_lkey;		/* use 0 for lkey */
237 	int frtest;			/* reg test */
238 	int tos;			/* type of service */
239 
240 	/* CM stuff */
241 	struct rdma_cm_id *cm_id;	/* connection on client side,*/
242 					/* listener on server side. */
243 	struct rdma_cm_id *child_cm_id;	/* connection on server side */
244 	struct list_head list;
245 };
246 
247 static int krping_cma_event_handler(struct rdma_cm_id *cma_id,
248 				   struct rdma_cm_event *event)
249 {
250 	int ret;
251 	struct krping_cb *cb = cma_id->context;
252 
253 	DEBUG_LOG("cma_event type %d cma_id %p (%s)\n", event->event, cma_id,
254 		  (cma_id == cb->cm_id) ? "parent" : "child");
255 
256 	switch (event->event) {
257 	case RDMA_CM_EVENT_ADDR_RESOLVED:
258 		cb->state = ADDR_RESOLVED;
259 		ret = rdma_resolve_route(cma_id, 2000);
260 		if (ret) {
261 			printk(KERN_ERR PFX "rdma_resolve_route error %d\n",
262 			       ret);
263 			wake_up_interruptible(&cb->sem);
264 		}
265 		break;
266 
267 	case RDMA_CM_EVENT_ROUTE_RESOLVED:
268 		cb->state = ROUTE_RESOLVED;
269 		wake_up_interruptible(&cb->sem);
270 		break;
271 
272 	case RDMA_CM_EVENT_CONNECT_REQUEST:
273 		cb->state = CONNECT_REQUEST;
274 		cb->child_cm_id = cma_id;
275 		DEBUG_LOG("child cma %p\n", cb->child_cm_id);
276 		wake_up_interruptible(&cb->sem);
277 		break;
278 
279 	case RDMA_CM_EVENT_ESTABLISHED:
280 		DEBUG_LOG("ESTABLISHED\n");
281 		if (!cb->server) {
282 			cb->state = CONNECTED;
283 		}
284 		wake_up_interruptible(&cb->sem);
285 		break;
286 
287 	case RDMA_CM_EVENT_ADDR_ERROR:
288 	case RDMA_CM_EVENT_ROUTE_ERROR:
289 	case RDMA_CM_EVENT_CONNECT_ERROR:
290 	case RDMA_CM_EVENT_UNREACHABLE:
291 	case RDMA_CM_EVENT_REJECTED:
292 		printk(KERN_ERR PFX "cma event %d, error %d\n", event->event,
293 		       event->status);
294 		cb->state = ERROR;
295 		wake_up_interruptible(&cb->sem);
296 		break;
297 
298 	case RDMA_CM_EVENT_DISCONNECTED:
299 		printk(KERN_ERR PFX "DISCONNECT EVENT...\n");
300 		cb->state = ERROR;
301 		wake_up_interruptible(&cb->sem);
302 		break;
303 
304 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
305 		printk(KERN_ERR PFX "cma detected device removal!!!!\n");
306 		cb->state = ERROR;
307 		wake_up_interruptible(&cb->sem);
308 		break;
309 
310 	default:
311 		printk(KERN_ERR PFX "oof bad type!\n");
312 		wake_up_interruptible(&cb->sem);
313 		break;
314 	}
315 	return 0;
316 }
317 
318 static int server_recv(struct krping_cb *cb, struct ib_wc *wc)
319 {
320 	if (wc->byte_len != sizeof(cb->recv_buf)) {
321 		printk(KERN_ERR PFX "Received bogus data, size %d\n",
322 		       wc->byte_len);
323 		return -1;
324 	}
325 
326 	cb->remote_rkey = ntohl(cb->recv_buf.rkey);
327 	cb->remote_addr = ntohll(cb->recv_buf.buf);
328 	cb->remote_len  = ntohl(cb->recv_buf.size);
329 	DEBUG_LOG("Received rkey %x addr %llx len %d from peer\n",
330 		  cb->remote_rkey, (unsigned long long)cb->remote_addr,
331 		  cb->remote_len);
332 
333 	if (cb->state <= CONNECTED || cb->state == RDMA_WRITE_COMPLETE)
334 		cb->state = RDMA_READ_ADV;
335 	else
336 		cb->state = RDMA_WRITE_ADV;
337 
338 	return 0;
339 }
340 
341 static int client_recv(struct krping_cb *cb, struct ib_wc *wc)
342 {
343 	if (wc->byte_len != sizeof(cb->recv_buf)) {
344 		printk(KERN_ERR PFX "Received bogus data, size %d\n",
345 		       wc->byte_len);
346 		return -1;
347 	}
348 
349 	if (cb->state == RDMA_READ_ADV)
350 		cb->state = RDMA_WRITE_ADV;
351 	else
352 		cb->state = RDMA_WRITE_COMPLETE;
353 
354 	return 0;
355 }
356 
357 static void krping_cq_event_handler(struct ib_cq *cq, void *ctx)
358 {
359 	struct krping_cb *cb = ctx;
360 	struct ib_wc wc;
361 	struct ib_recv_wr *bad_wr;
362 	int ret;
363 
364 	BUG_ON(cb->cq != cq);
365 	if (cb->frtest) {
366 		printk(KERN_ERR PFX "cq completion event in frtest!\n");
367 		return;
368 	}
369 	if (!cb->wlat && !cb->rlat && !cb->bw)
370 		ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP);
371 	while ((ret = ib_poll_cq(cb->cq, 1, &wc)) == 1) {
372 		if (wc.status) {
373 			if (wc.status == IB_WC_WR_FLUSH_ERR) {
374 				DEBUG_LOG("cq flushed\n");
375 				continue;
376 			} else {
377 				printk(KERN_ERR PFX "cq completion failed with "
378 				       "wr_id %jx status %d opcode %d vender_err %x\n",
379 					(uintmax_t)wc.wr_id, wc.status, wc.opcode, wc.vendor_err);
380 				goto error;
381 			}
382 		}
383 		if (cb->state == ERROR) {
384 			printk(KERN_ERR PFX "cq completion in ERROR state\n");
385 			return;
386 		}
387 		switch (wc.opcode) {
388 		case IB_WC_SEND:
389 			DEBUG_LOG("send completion\n");
390 			cb->stats.send_bytes += cb->send_sgl.length;
391 			cb->stats.send_msgs++;
392 			break;
393 
394 		case IB_WC_RDMA_WRITE:
395 			DEBUG_LOG("rdma write completion\n");
396 			cb->stats.write_bytes += cb->rdma_sq_wr.wr.sg_list->length;
397 			cb->stats.write_msgs++;
398 			cb->state = RDMA_WRITE_COMPLETE;
399 			wake_up_interruptible(&cb->sem);
400 			break;
401 
402 		case IB_WC_RDMA_READ:
403 			DEBUG_LOG("rdma read completion\n");
404 			cb->stats.read_bytes += cb->rdma_sq_wr.wr.sg_list->length;
405 			cb->stats.read_msgs++;
406 			cb->state = RDMA_READ_COMPLETE;
407 			wake_up_interruptible(&cb->sem);
408 			break;
409 
410 		case IB_WC_RECV:
411 			DEBUG_LOG("recv completion\n");
412 			cb->stats.recv_bytes += sizeof(cb->recv_buf);
413 			cb->stats.recv_msgs++;
414 			if (cb->wlat || cb->rlat || cb->bw)
415 				ret = server_recv(cb, &wc);
416 			else
417 				ret = cb->server ? server_recv(cb, &wc) :
418 						   client_recv(cb, &wc);
419 			if (ret) {
420 				printk(KERN_ERR PFX "recv wc error: %d\n", ret);
421 				goto error;
422 			}
423 
424 			ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr);
425 			if (ret) {
426 				printk(KERN_ERR PFX "post recv error: %d\n",
427 				       ret);
428 				goto error;
429 			}
430 			wake_up_interruptible(&cb->sem);
431 			break;
432 
433 		default:
434 			printk(KERN_ERR PFX
435 			       "%s:%d Unexpected opcode %d, Shutting down\n",
436 			       __func__, __LINE__, wc.opcode);
437 			goto error;
438 		}
439 	}
440 	if (ret) {
441 		printk(KERN_ERR PFX "poll error %d\n", ret);
442 		goto error;
443 	}
444 	return;
445 error:
446 	cb->state = ERROR;
447 	wake_up_interruptible(&cb->sem);
448 }
449 
450 static int krping_accept(struct krping_cb *cb)
451 {
452 	struct rdma_conn_param conn_param;
453 	int ret;
454 
455 	DEBUG_LOG("accepting client connection request\n");
456 
457 	memset(&conn_param, 0, sizeof conn_param);
458 	conn_param.responder_resources = 1;
459 	conn_param.initiator_depth = 1;
460 
461 	ret = rdma_accept(cb->child_cm_id, &conn_param);
462 	if (ret) {
463 		printk(KERN_ERR PFX "rdma_accept error: %d\n", ret);
464 		return ret;
465 	}
466 
467 	if (!cb->wlat && !cb->rlat && !cb->bw) {
468 		wait_event_interruptible(cb->sem, cb->state >= CONNECTED);
469 		if (cb->state == ERROR) {
470 			printk(KERN_ERR PFX "wait for CONNECTED state %d\n",
471 				cb->state);
472 			return -1;
473 		}
474 	}
475 	return 0;
476 }
477 
478 static void krping_setup_wr(struct krping_cb *cb)
479 {
480 	cb->recv_sgl.addr = cb->recv_dma_addr;
481 	cb->recv_sgl.length = sizeof cb->recv_buf;
482 	cb->recv_sgl.lkey = cb->pd->local_dma_lkey;
483 	cb->rq_wr.sg_list = &cb->recv_sgl;
484 	cb->rq_wr.num_sge = 1;
485 
486 	cb->send_sgl.addr = cb->send_dma_addr;
487 	cb->send_sgl.length = sizeof cb->send_buf;
488 	cb->send_sgl.lkey = cb->pd->local_dma_lkey;
489 
490 	cb->sq_wr.opcode = IB_WR_SEND;
491 	cb->sq_wr.send_flags = IB_SEND_SIGNALED;
492 	cb->sq_wr.sg_list = &cb->send_sgl;
493 	cb->sq_wr.num_sge = 1;
494 
495 	if (cb->server || cb->wlat || cb->rlat || cb->bw) {
496 		cb->rdma_sgl.addr = cb->rdma_dma_addr;
497 		cb->rdma_sq_wr.wr.send_flags = IB_SEND_SIGNALED;
498 		cb->rdma_sq_wr.wr.sg_list = &cb->rdma_sgl;
499 		cb->rdma_sq_wr.wr.num_sge = 1;
500 	}
501 
502 	/*
503 	 * A chain of 2 WRs, INVALDATE_MR + REG_MR.
504 	 * both unsignaled.  The client uses them to reregister
505 	 * the rdma buffers with a new key each iteration.
506 	 */
507 	cb->reg_mr_wr.wr.opcode = IB_WR_REG_MR;
508 	cb->reg_mr_wr.mr = cb->reg_mr;
509 
510 	cb->invalidate_wr.next = &cb->reg_mr_wr.wr;
511 	cb->invalidate_wr.opcode = IB_WR_LOCAL_INV;
512 }
513 
514 static int krping_setup_buffers(struct krping_cb *cb)
515 {
516 	int ret;
517 
518 	DEBUG_LOG(PFX "krping_setup_buffers called on cb %p\n", cb);
519 
520 	cb->recv_dma_addr = ib_dma_map_single(cb->pd->device,
521 				   &cb->recv_buf,
522 				   sizeof(cb->recv_buf), DMA_BIDIRECTIONAL);
523 	pci_unmap_addr_set(cb, recv_mapping, cb->recv_dma_addr);
524 	cb->send_dma_addr = ib_dma_map_single(cb->pd->device,
525 					   &cb->send_buf, sizeof(cb->send_buf),
526 					   DMA_BIDIRECTIONAL);
527 	pci_unmap_addr_set(cb, send_mapping, cb->send_dma_addr);
528 
529 	cb->rdma_buf = ib_dma_alloc_coherent(cb->pd->device, cb->size,
530 					     &cb->rdma_dma_addr,
531 					     GFP_KERNEL);
532 	if (!cb->rdma_buf) {
533 		DEBUG_LOG(PFX "rdma_buf allocation failed\n");
534 		ret = -ENOMEM;
535 		goto bail;
536 	}
537 	pci_unmap_addr_set(cb, rdma_mapping, cb->rdma_dma_addr);
538 	cb->page_list_len = (((cb->size - 1) & PAGE_MASK) + PAGE_SIZE)
539 				>> PAGE_SHIFT;
540 	cb->reg_mr = ib_alloc_mr(cb->pd,  IB_MR_TYPE_MEM_REG,
541 				 cb->page_list_len);
542 	if (IS_ERR(cb->reg_mr)) {
543 		ret = PTR_ERR(cb->reg_mr);
544 		DEBUG_LOG(PFX "recv_buf reg_mr failed %d\n", ret);
545 		goto bail;
546 	}
547 	DEBUG_LOG(PFX "reg rkey 0x%x page_list_len %u\n",
548 		cb->reg_mr->rkey, cb->page_list_len);
549 
550 	if (!cb->server || cb->wlat || cb->rlat || cb->bw) {
551 
552 		cb->start_buf = ib_dma_alloc_coherent(cb->pd->device, cb->size,
553 						      &cb->start_dma_addr,
554 						      GFP_KERNEL);
555 		if (!cb->start_buf) {
556 			DEBUG_LOG(PFX "start_buf malloc failed\n");
557 			ret = -ENOMEM;
558 			goto bail;
559 		}
560 		pci_unmap_addr_set(cb, start_mapping, cb->start_dma_addr);
561 	}
562 
563 	krping_setup_wr(cb);
564 	DEBUG_LOG(PFX "allocated & registered buffers...\n");
565 	return 0;
566 bail:
567 	if (cb->reg_mr && !IS_ERR(cb->reg_mr))
568 		ib_dereg_mr(cb->reg_mr);
569 	if (cb->rdma_mr && !IS_ERR(cb->rdma_mr))
570 		ib_dereg_mr(cb->rdma_mr);
571 	if (cb->dma_mr && !IS_ERR(cb->dma_mr))
572 		ib_dereg_mr(cb->dma_mr);
573 	if (cb->rdma_buf) {
574 		ib_dma_free_coherent(cb->pd->device, cb->size, cb->rdma_buf,
575 				     cb->rdma_dma_addr);
576 	}
577 	if (cb->start_buf) {
578 		ib_dma_free_coherent(cb->pd->device, cb->size, cb->start_buf,
579 				     cb->start_dma_addr);
580 	}
581 	return ret;
582 }
583 
584 static void krping_free_buffers(struct krping_cb *cb)
585 {
586 	DEBUG_LOG("krping_free_buffers called on cb %p\n", cb);
587 
588 	if (cb->dma_mr)
589 		ib_dereg_mr(cb->dma_mr);
590 	if (cb->rdma_mr)
591 		ib_dereg_mr(cb->rdma_mr);
592 	if (cb->start_mr)
593 		ib_dereg_mr(cb->start_mr);
594 	if (cb->reg_mr)
595 		ib_dereg_mr(cb->reg_mr);
596 
597 	dma_unmap_single(cb->pd->device->dma_device,
598 			 pci_unmap_addr(cb, recv_mapping),
599 			 sizeof(cb->recv_buf), DMA_BIDIRECTIONAL);
600 	dma_unmap_single(cb->pd->device->dma_device,
601 			 pci_unmap_addr(cb, send_mapping),
602 			 sizeof(cb->send_buf), DMA_BIDIRECTIONAL);
603 
604 	ib_dma_free_coherent(cb->pd->device, cb->size, cb->rdma_buf,
605 			     cb->rdma_dma_addr);
606 
607 	if (cb->start_buf) {
608 		ib_dma_free_coherent(cb->pd->device, cb->size, cb->start_buf,
609 				     cb->start_dma_addr);
610 	}
611 }
612 
613 static int krping_create_qp(struct krping_cb *cb)
614 {
615 	struct ib_qp_init_attr init_attr;
616 	int ret;
617 
618 	memset(&init_attr, 0, sizeof(init_attr));
619 	init_attr.cap.max_send_wr = cb->txdepth;
620 	init_attr.cap.max_recv_wr = 2;
621 
622 	/* For flush_qp() */
623 	init_attr.cap.max_send_wr++;
624 	init_attr.cap.max_recv_wr++;
625 
626 	init_attr.cap.max_recv_sge = 1;
627 	init_attr.cap.max_send_sge = 1;
628 	init_attr.qp_type = IB_QPT_RC;
629 	init_attr.send_cq = cb->cq;
630 	init_attr.recv_cq = cb->cq;
631 	init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
632 
633 	if (cb->server) {
634 		ret = rdma_create_qp(cb->child_cm_id, cb->pd, &init_attr);
635 		if (!ret)
636 			cb->qp = cb->child_cm_id->qp;
637 	} else {
638 		ret = rdma_create_qp(cb->cm_id, cb->pd, &init_attr);
639 		if (!ret)
640 			cb->qp = cb->cm_id->qp;
641 	}
642 
643 	return ret;
644 }
645 
646 static void krping_free_qp(struct krping_cb *cb)
647 {
648 	ib_destroy_qp(cb->qp);
649 	ib_destroy_cq(cb->cq);
650 	ib_dealloc_pd(cb->pd);
651 }
652 
653 static int krping_setup_qp(struct krping_cb *cb, struct rdma_cm_id *cm_id)
654 {
655 	int ret;
656 	struct ib_cq_init_attr attr = {0};
657 
658 	cb->pd = ib_alloc_pd(cm_id->device, 0);
659 	if (IS_ERR(cb->pd)) {
660 		printk(KERN_ERR PFX "ib_alloc_pd failed\n");
661 		return PTR_ERR(cb->pd);
662 	}
663 	DEBUG_LOG("created pd %p\n", cb->pd);
664 
665 	strlcpy(cb->stats.name, cb->pd->device->name, sizeof(cb->stats.name));
666 
667 	attr.cqe = cb->txdepth * 2;
668 	attr.comp_vector = 0;
669 	cb->cq = ib_create_cq(cm_id->device, krping_cq_event_handler, NULL,
670 			      cb, &attr);
671 	if (IS_ERR(cb->cq)) {
672 		printk(KERN_ERR PFX "ib_create_cq failed\n");
673 		ret = PTR_ERR(cb->cq);
674 		goto err1;
675 	}
676 	DEBUG_LOG("created cq %p\n", cb->cq);
677 
678 	if (!cb->wlat && !cb->rlat && !cb->bw && !cb->frtest) {
679 		ret = ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP);
680 		if (ret) {
681 			printk(KERN_ERR PFX "ib_create_cq failed\n");
682 			goto err2;
683 		}
684 	}
685 
686 	ret = krping_create_qp(cb);
687 	if (ret) {
688 		printk(KERN_ERR PFX "krping_create_qp failed: %d\n", ret);
689 		goto err2;
690 	}
691 	DEBUG_LOG("created qp %p\n", cb->qp);
692 	return 0;
693 err2:
694 	ib_destroy_cq(cb->cq);
695 err1:
696 	ib_dealloc_pd(cb->pd);
697 	return ret;
698 }
699 
700 /*
701  * return the (possibly rebound) rkey for the rdma buffer.
702  * REG mode: invalidate and rebind via reg wr.
703  * other modes: just return the mr rkey.
704  */
705 static u32 krping_rdma_rkey(struct krping_cb *cb, u64 buf, int post_inv)
706 {
707 	u32 rkey;
708 	struct ib_send_wr *bad_wr;
709 	int ret;
710 	struct scatterlist sg = {0};
711 
712 	cb->invalidate_wr.ex.invalidate_rkey = cb->reg_mr->rkey;
713 
714 	/*
715 	 * Update the reg key.
716 	 */
717 	ib_update_fast_reg_key(cb->reg_mr, ++cb->key);
718 	cb->reg_mr_wr.key = cb->reg_mr->rkey;
719 
720 	/*
721 	 * Update the reg WR with new buf info.
722 	 */
723 	if (buf == (u64)cb->start_dma_addr)
724 		cb->reg_mr_wr.access = IB_ACCESS_REMOTE_READ;
725 	else
726 		cb->reg_mr_wr.access = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE;
727 	sg_dma_address(&sg) = buf;
728 	sg_dma_len(&sg) = cb->size;
729 
730 	ret = ib_map_mr_sg(cb->reg_mr, &sg, 1, NULL, PAGE_SIZE);
731 	BUG_ON(ret <= 0 || ret > cb->page_list_len);
732 
733 	DEBUG_LOG(PFX "post_inv = %d, reg_mr new rkey 0x%x pgsz %u len %u"
734 		" iova_start %llx\n",
735 		post_inv,
736 		cb->reg_mr_wr.key,
737 		cb->reg_mr->page_size,
738 		(unsigned)cb->reg_mr->length,
739 	        (unsigned long long)cb->reg_mr->iova);
740 
741 	if (post_inv)
742 		ret = ib_post_send(cb->qp, &cb->invalidate_wr, &bad_wr);
743 	else
744 		ret = ib_post_send(cb->qp, &cb->reg_mr_wr.wr, &bad_wr);
745 	if (ret) {
746 		printk(KERN_ERR PFX "post send error %d\n", ret);
747 		cb->state = ERROR;
748 	}
749 	rkey = cb->reg_mr->rkey;
750 	return rkey;
751 }
752 
753 static void krping_format_send(struct krping_cb *cb, u64 buf)
754 {
755 	struct krping_rdma_info *info = &cb->send_buf;
756 	u32 rkey;
757 
758 	/*
759 	 * Client side will do reg or mw bind before
760 	 * advertising the rdma buffer.  Server side
761 	 * sends have no data.
762 	 */
763 	if (!cb->server || cb->wlat || cb->rlat || cb->bw) {
764 		rkey = krping_rdma_rkey(cb, buf, !cb->server_invalidate);
765 		info->buf = htonll(buf);
766 		info->rkey = htonl(rkey);
767 		info->size = htonl(cb->size);
768 		DEBUG_LOG("RDMA addr %llx rkey %x len %d\n",
769 			  (unsigned long long)buf, rkey, cb->size);
770 	}
771 }
772 
773 static void krping_test_server(struct krping_cb *cb)
774 {
775 	struct ib_send_wr *bad_wr, inv;
776 	int ret;
777 
778 	while (1) {
779 		/* Wait for client's Start STAG/TO/Len */
780 		wait_event_interruptible(cb->sem, cb->state >= RDMA_READ_ADV);
781 		if (cb->state != RDMA_READ_ADV) {
782 			printk(KERN_ERR PFX "wait for RDMA_READ_ADV state %d\n",
783 				cb->state);
784 			break;
785 		}
786 
787 		DEBUG_LOG("server received sink adv\n");
788 
789 		cb->rdma_sq_wr.rkey = cb->remote_rkey;
790 		cb->rdma_sq_wr.remote_addr = cb->remote_addr;
791 		cb->rdma_sq_wr.wr.sg_list->length = cb->remote_len;
792 		cb->rdma_sgl.lkey = krping_rdma_rkey(cb, cb->rdma_dma_addr, !cb->read_inv);
793 		cb->rdma_sq_wr.wr.next = NULL;
794 
795 		/* Issue RDMA Read. */
796 		if (cb->read_inv)
797 			cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
798 		else {
799 
800 			cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_READ;
801 			/*
802 			 * Immediately follow the read with a
803 			 * fenced LOCAL_INV.
804 			 */
805 			cb->rdma_sq_wr.wr.next = &inv;
806 			memset(&inv, 0, sizeof inv);
807 			inv.opcode = IB_WR_LOCAL_INV;
808 			inv.ex.invalidate_rkey = cb->reg_mr->rkey;
809 			inv.send_flags = IB_SEND_FENCE;
810 		}
811 
812 		ret = ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr);
813 		if (ret) {
814 			printk(KERN_ERR PFX "post send error %d\n", ret);
815 			break;
816 		}
817 		cb->rdma_sq_wr.wr.next = NULL;
818 
819 		DEBUG_LOG("server posted rdma read req \n");
820 
821 		/* Wait for read completion */
822 		wait_event_interruptible(cb->sem,
823 					 cb->state >= RDMA_READ_COMPLETE);
824 		if (cb->state != RDMA_READ_COMPLETE) {
825 			printk(KERN_ERR PFX
826 			       "wait for RDMA_READ_COMPLETE state %d\n",
827 			       cb->state);
828 			break;
829 		}
830 		DEBUG_LOG("server received read complete\n");
831 
832 		/* Display data in recv buf */
833 		if (cb->verbose)
834 			printk(KERN_INFO PFX "server ping data: %s\n",
835 				cb->rdma_buf);
836 
837 		/* Tell client to continue */
838 		if (cb->server && cb->server_invalidate) {
839 			cb->sq_wr.ex.invalidate_rkey = cb->remote_rkey;
840 			cb->sq_wr.opcode = IB_WR_SEND_WITH_INV;
841 			DEBUG_LOG("send-w-inv rkey 0x%x\n", cb->remote_rkey);
842 		}
843 		ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
844 		if (ret) {
845 			printk(KERN_ERR PFX "post send error %d\n", ret);
846 			break;
847 		}
848 		DEBUG_LOG("server posted go ahead\n");
849 
850 		/* Wait for client's RDMA STAG/TO/Len */
851 		wait_event_interruptible(cb->sem, cb->state >= RDMA_WRITE_ADV);
852 		if (cb->state != RDMA_WRITE_ADV) {
853 			printk(KERN_ERR PFX
854 			       "wait for RDMA_WRITE_ADV state %d\n",
855 			       cb->state);
856 			break;
857 		}
858 		DEBUG_LOG("server received sink adv\n");
859 
860 		/* RDMA Write echo data */
861 		cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_WRITE;
862 		cb->rdma_sq_wr.rkey = cb->remote_rkey;
863 		cb->rdma_sq_wr.remote_addr = cb->remote_addr;
864 		cb->rdma_sq_wr.wr.sg_list->length = strlen(cb->rdma_buf) + 1;
865 		if (cb->local_dma_lkey)
866 			cb->rdma_sgl.lkey = cb->pd->local_dma_lkey;
867 		else
868 			cb->rdma_sgl.lkey = krping_rdma_rkey(cb, cb->rdma_dma_addr, 0);
869 
870 		DEBUG_LOG("rdma write from lkey %x laddr %llx len %d\n",
871 			  cb->rdma_sq_wr.wr.sg_list->lkey,
872 			  (unsigned long long)cb->rdma_sq_wr.wr.sg_list->addr,
873 			  cb->rdma_sq_wr.wr.sg_list->length);
874 
875 		ret = ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr);
876 		if (ret) {
877 			printk(KERN_ERR PFX "post send error %d\n", ret);
878 			break;
879 		}
880 
881 		/* Wait for completion */
882 		ret = wait_event_interruptible(cb->sem, cb->state >=
883 							 RDMA_WRITE_COMPLETE);
884 		if (cb->state != RDMA_WRITE_COMPLETE) {
885 			printk(KERN_ERR PFX
886 			       "wait for RDMA_WRITE_COMPLETE state %d\n",
887 			       cb->state);
888 			break;
889 		}
890 		DEBUG_LOG("server rdma write complete \n");
891 
892 		cb->state = CONNECTED;
893 
894 		/* Tell client to begin again */
895 		if (cb->server && cb->server_invalidate) {
896 			cb->sq_wr.ex.invalidate_rkey = cb->remote_rkey;
897 			cb->sq_wr.opcode = IB_WR_SEND_WITH_INV;
898 			DEBUG_LOG("send-w-inv rkey 0x%x\n", cb->remote_rkey);
899 		}
900 		ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
901 		if (ret) {
902 			printk(KERN_ERR PFX "post send error %d\n", ret);
903 			break;
904 		}
905 		DEBUG_LOG("server posted go ahead\n");
906 	}
907 }
908 
909 static void rlat_test(struct krping_cb *cb)
910 {
911 	int scnt;
912 	int iters = cb->count;
913 	struct timeval start_tv, stop_tv;
914 	int ret;
915 	struct ib_wc wc;
916 	struct ib_send_wr *bad_wr;
917 	int ne;
918 
919 	scnt = 0;
920 	cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_READ;
921 	cb->rdma_sq_wr.rkey = cb->remote_rkey;
922 	cb->rdma_sq_wr.remote_addr = cb->remote_addr;
923 	cb->rdma_sq_wr.wr.sg_list->length = cb->size;
924 
925 	microtime(&start_tv);
926 	if (!cb->poll) {
927 		cb->state = RDMA_READ_ADV;
928 		ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP);
929 	}
930 	while (scnt < iters) {
931 
932 		cb->state = RDMA_READ_ADV;
933 		ret = ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr);
934 		if (ret) {
935 			printk(KERN_ERR PFX
936 				"Couldn't post send: ret=%d scnt %d\n",
937 				ret, scnt);
938 			return;
939 		}
940 
941 		do {
942 			if (!cb->poll) {
943 				wait_event_interruptible(cb->sem,
944 					cb->state != RDMA_READ_ADV);
945 				if (cb->state == RDMA_READ_COMPLETE) {
946 					ne = 1;
947 					ib_req_notify_cq(cb->cq,
948 						IB_CQ_NEXT_COMP);
949 				} else {
950 					ne = -1;
951 				}
952 			} else
953 				ne = ib_poll_cq(cb->cq, 1, &wc);
954 			if (cb->state == ERROR) {
955 				printk(KERN_ERR PFX
956 					"state == ERROR...bailing scnt %d\n",
957 					scnt);
958 				return;
959 			}
960 		} while (ne == 0);
961 
962 		if (ne < 0) {
963 			printk(KERN_ERR PFX "poll CQ failed %d\n", ne);
964 			return;
965 		}
966 		if (cb->poll && wc.status != IB_WC_SUCCESS) {
967 			printk(KERN_ERR PFX "Completion wth error at %s:\n",
968 				cb->server ? "server" : "client");
969 			printk(KERN_ERR PFX "Failed status %d: wr_id %d\n",
970 				wc.status, (int) wc.wr_id);
971 			return;
972 		}
973 		++scnt;
974 	}
975 	microtime(&stop_tv);
976 
977         if (stop_tv.tv_usec < start_tv.tv_usec) {
978                 stop_tv.tv_usec += 1000000;
979                 stop_tv.tv_sec  -= 1;
980         }
981 
982 	printk(KERN_ERR PFX "delta sec %lu delta usec %lu iter %d size %d\n",
983 		(unsigned long)(stop_tv.tv_sec - start_tv.tv_sec),
984 		(unsigned long)(stop_tv.tv_usec - start_tv.tv_usec),
985 		scnt, cb->size);
986 }
987 
988 static void wlat_test(struct krping_cb *cb)
989 {
990 	int ccnt, scnt, rcnt;
991 	int iters=cb->count;
992 	volatile char *poll_buf = (char *) cb->start_buf;
993 	char *buf = (char *)cb->rdma_buf;
994 	struct timeval start_tv, stop_tv;
995 	cycles_t *post_cycles_start, *post_cycles_stop;
996 	cycles_t *poll_cycles_start, *poll_cycles_stop;
997 	cycles_t *last_poll_cycles_start;
998 	cycles_t sum_poll = 0, sum_post = 0, sum_last_poll = 0;
999 	int i;
1000 	int cycle_iters = 1000;
1001 
1002 	ccnt = 0;
1003 	scnt = 0;
1004 	rcnt = 0;
1005 
1006 	post_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1007 	if (!post_cycles_start) {
1008 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1009 		return;
1010 	}
1011 	post_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1012 	if (!post_cycles_stop) {
1013 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1014 		return;
1015 	}
1016 	poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1017 	if (!poll_cycles_start) {
1018 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1019 		return;
1020 	}
1021 	poll_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1022 	if (!poll_cycles_stop) {
1023 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1024 		return;
1025 	}
1026 	last_poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t),
1027 		GFP_KERNEL);
1028 	if (!last_poll_cycles_start) {
1029 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1030 		return;
1031 	}
1032 	cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_WRITE;
1033 	cb->rdma_sq_wr.rkey = cb->remote_rkey;
1034 	cb->rdma_sq_wr.remote_addr = cb->remote_addr;
1035 	cb->rdma_sq_wr.wr.sg_list->length = cb->size;
1036 
1037 	if (cycle_iters > iters)
1038 		cycle_iters = iters;
1039 	microtime(&start_tv);
1040 	while (scnt < iters || ccnt < iters || rcnt < iters) {
1041 
1042 		/* Wait till buffer changes. */
1043 		if (rcnt < iters && !(scnt < 1 && !cb->server)) {
1044 			++rcnt;
1045 			while (*poll_buf != (char)rcnt) {
1046 				if (cb->state == ERROR) {
1047 					printk(KERN_ERR PFX
1048 						"state = ERROR, bailing\n");
1049 					return;
1050 				}
1051 			}
1052 		}
1053 
1054 		if (scnt < iters) {
1055 			struct ib_send_wr *bad_wr;
1056 
1057 			*buf = (char)scnt+1;
1058 			if (scnt < cycle_iters)
1059 				post_cycles_start[scnt] = get_cycles();
1060 			if (ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr)) {
1061 				printk(KERN_ERR PFX
1062 					"Couldn't post send: scnt=%d\n",
1063 					scnt);
1064 				return;
1065 			}
1066 			if (scnt < cycle_iters)
1067 				post_cycles_stop[scnt] = get_cycles();
1068 			scnt++;
1069 		}
1070 
1071 		if (ccnt < iters) {
1072 			struct ib_wc wc;
1073 			int ne;
1074 
1075 			if (ccnt < cycle_iters)
1076 				poll_cycles_start[ccnt] = get_cycles();
1077 			do {
1078 				if (ccnt < cycle_iters)
1079 					last_poll_cycles_start[ccnt] =
1080 						get_cycles();
1081 				ne = ib_poll_cq(cb->cq, 1, &wc);
1082 			} while (ne == 0);
1083 			if (ccnt < cycle_iters)
1084 				poll_cycles_stop[ccnt] = get_cycles();
1085 			++ccnt;
1086 
1087 			if (ne < 0) {
1088 				printk(KERN_ERR PFX "poll CQ failed %d\n", ne);
1089 				return;
1090 			}
1091 			if (wc.status != IB_WC_SUCCESS) {
1092 				printk(KERN_ERR PFX
1093 					"Completion wth error at %s:\n",
1094 					cb->server ? "server" : "client");
1095 				printk(KERN_ERR PFX
1096 					"Failed status %d: wr_id %d\n",
1097 					wc.status, (int) wc.wr_id);
1098 				printk(KERN_ERR PFX
1099 					"scnt=%d, rcnt=%d, ccnt=%d\n",
1100 					scnt, rcnt, ccnt);
1101 				return;
1102 			}
1103 		}
1104 	}
1105 	microtime(&stop_tv);
1106 
1107         if (stop_tv.tv_usec < start_tv.tv_usec) {
1108                 stop_tv.tv_usec += 1000000;
1109                 stop_tv.tv_sec  -= 1;
1110         }
1111 
1112 	for (i=0; i < cycle_iters; i++) {
1113 		sum_post += post_cycles_stop[i] - post_cycles_start[i];
1114 		sum_poll += poll_cycles_stop[i] - poll_cycles_start[i];
1115 		sum_last_poll += poll_cycles_stop[i]-last_poll_cycles_start[i];
1116 	}
1117 	printk(KERN_ERR PFX
1118 		"delta sec %lu delta usec %lu iter %d size %d cycle_iters %d"
1119 		" sum_post %llu sum_poll %llu sum_last_poll %llu\n",
1120 		(unsigned long)(stop_tv.tv_sec - start_tv.tv_sec),
1121 		(unsigned long)(stop_tv.tv_usec - start_tv.tv_usec),
1122 		scnt, cb->size, cycle_iters,
1123 		(unsigned long long)sum_post, (unsigned long long)sum_poll,
1124 		(unsigned long long)sum_last_poll);
1125 	kfree(post_cycles_start);
1126 	kfree(post_cycles_stop);
1127 	kfree(poll_cycles_start);
1128 	kfree(poll_cycles_stop);
1129 	kfree(last_poll_cycles_start);
1130 }
1131 
1132 static void bw_test(struct krping_cb *cb)
1133 {
1134 	int ccnt, scnt, rcnt;
1135 	int iters=cb->count;
1136 	struct timeval start_tv, stop_tv;
1137 	cycles_t *post_cycles_start, *post_cycles_stop;
1138 	cycles_t *poll_cycles_start, *poll_cycles_stop;
1139 	cycles_t *last_poll_cycles_start;
1140 	cycles_t sum_poll = 0, sum_post = 0, sum_last_poll = 0;
1141 	int i;
1142 	int cycle_iters = 1000;
1143 
1144 	ccnt = 0;
1145 	scnt = 0;
1146 	rcnt = 0;
1147 
1148 	post_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1149 	if (!post_cycles_start) {
1150 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1151 		return;
1152 	}
1153 	post_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1154 	if (!post_cycles_stop) {
1155 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1156 		return;
1157 	}
1158 	poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1159 	if (!poll_cycles_start) {
1160 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1161 		return;
1162 	}
1163 	poll_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1164 	if (!poll_cycles_stop) {
1165 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1166 		return;
1167 	}
1168 	last_poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t),
1169 		GFP_KERNEL);
1170 	if (!last_poll_cycles_start) {
1171 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1172 		return;
1173 	}
1174 	cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_WRITE;
1175 	cb->rdma_sq_wr.rkey = cb->remote_rkey;
1176 	cb->rdma_sq_wr.remote_addr = cb->remote_addr;
1177 	cb->rdma_sq_wr.wr.sg_list->length = cb->size;
1178 
1179 	if (cycle_iters > iters)
1180 		cycle_iters = iters;
1181 	microtime(&start_tv);
1182 	while (scnt < iters || ccnt < iters) {
1183 
1184 		while (scnt < iters && scnt - ccnt < cb->txdepth) {
1185 			struct ib_send_wr *bad_wr;
1186 
1187 			if (scnt < cycle_iters)
1188 				post_cycles_start[scnt] = get_cycles();
1189 			if (ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr)) {
1190 				printk(KERN_ERR PFX
1191 					"Couldn't post send: scnt=%d\n",
1192 					scnt);
1193 				return;
1194 			}
1195 			if (scnt < cycle_iters)
1196 				post_cycles_stop[scnt] = get_cycles();
1197 			++scnt;
1198 		}
1199 
1200 		if (ccnt < iters) {
1201 			int ne;
1202 			struct ib_wc wc;
1203 
1204 			if (ccnt < cycle_iters)
1205 				poll_cycles_start[ccnt] = get_cycles();
1206 			do {
1207 				if (ccnt < cycle_iters)
1208 					last_poll_cycles_start[ccnt] =
1209 						get_cycles();
1210 				ne = ib_poll_cq(cb->cq, 1, &wc);
1211 			} while (ne == 0);
1212 			if (ccnt < cycle_iters)
1213 				poll_cycles_stop[ccnt] = get_cycles();
1214 			ccnt += 1;
1215 
1216 			if (ne < 0) {
1217 				printk(KERN_ERR PFX "poll CQ failed %d\n", ne);
1218 				return;
1219 			}
1220 			if (wc.status != IB_WC_SUCCESS) {
1221 				printk(KERN_ERR PFX
1222 					"Completion wth error at %s:\n",
1223 					cb->server ? "server" : "client");
1224 				printk(KERN_ERR PFX
1225 					"Failed status %d: wr_id %d\n",
1226 					wc.status, (int) wc.wr_id);
1227 				return;
1228 			}
1229 		}
1230 	}
1231 	microtime(&stop_tv);
1232 
1233         if (stop_tv.tv_usec < start_tv.tv_usec) {
1234                 stop_tv.tv_usec += 1000000;
1235                 stop_tv.tv_sec  -= 1;
1236         }
1237 
1238 	for (i=0; i < cycle_iters; i++) {
1239 		sum_post += post_cycles_stop[i] - post_cycles_start[i];
1240 		sum_poll += poll_cycles_stop[i] - poll_cycles_start[i];
1241 		sum_last_poll += poll_cycles_stop[i]-last_poll_cycles_start[i];
1242 	}
1243 	printk(KERN_ERR PFX
1244 		"delta sec %lu delta usec %lu iter %d size %d cycle_iters %d"
1245 		" sum_post %llu sum_poll %llu sum_last_poll %llu\n",
1246 		(unsigned long)(stop_tv.tv_sec - start_tv.tv_sec),
1247 		(unsigned long)(stop_tv.tv_usec - start_tv.tv_usec),
1248 		scnt, cb->size, cycle_iters,
1249 		(unsigned long long)sum_post, (unsigned long long)sum_poll,
1250 		(unsigned long long)sum_last_poll);
1251 	kfree(post_cycles_start);
1252 	kfree(post_cycles_stop);
1253 	kfree(poll_cycles_start);
1254 	kfree(poll_cycles_stop);
1255 	kfree(last_poll_cycles_start);
1256 }
1257 
1258 static void krping_rlat_test_server(struct krping_cb *cb)
1259 {
1260 	struct ib_send_wr *bad_wr;
1261 	struct ib_wc wc;
1262 	int ret;
1263 
1264 	/* Spin waiting for client's Start STAG/TO/Len */
1265 	while (cb->state < RDMA_READ_ADV) {
1266 		krping_cq_event_handler(cb->cq, cb);
1267 	}
1268 
1269 	/* Send STAG/TO/Len to client */
1270 	krping_format_send(cb, cb->start_dma_addr);
1271 	ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1272 	if (ret) {
1273 		printk(KERN_ERR PFX "post send error %d\n", ret);
1274 		return;
1275 	}
1276 
1277 	/* Spin waiting for send completion */
1278 	while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1279 	if (ret < 0) {
1280 		printk(KERN_ERR PFX "poll error %d\n", ret);
1281 		return;
1282 	}
1283 	if (wc.status) {
1284 		printk(KERN_ERR PFX "send completiong error %d\n", wc.status);
1285 		return;
1286 	}
1287 
1288 	wait_event_interruptible(cb->sem, cb->state == ERROR);
1289 }
1290 
1291 static void krping_wlat_test_server(struct krping_cb *cb)
1292 {
1293 	struct ib_send_wr *bad_wr;
1294 	struct ib_wc wc;
1295 	int ret;
1296 
1297 	/* Spin waiting for client's Start STAG/TO/Len */
1298 	while (cb->state < RDMA_READ_ADV) {
1299 		krping_cq_event_handler(cb->cq, cb);
1300 	}
1301 
1302 	/* Send STAG/TO/Len to client */
1303 	krping_format_send(cb, cb->start_dma_addr);
1304 	ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1305 	if (ret) {
1306 		printk(KERN_ERR PFX "post send error %d\n", ret);
1307 		return;
1308 	}
1309 
1310 	/* Spin waiting for send completion */
1311 	while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1312 	if (ret < 0) {
1313 		printk(KERN_ERR PFX "poll error %d\n", ret);
1314 		return;
1315 	}
1316 	if (wc.status) {
1317 		printk(KERN_ERR PFX "send completiong error %d\n", wc.status);
1318 		return;
1319 	}
1320 
1321 	wlat_test(cb);
1322 	wait_event_interruptible(cb->sem, cb->state == ERROR);
1323 }
1324 
1325 static void krping_bw_test_server(struct krping_cb *cb)
1326 {
1327 	struct ib_send_wr *bad_wr;
1328 	struct ib_wc wc;
1329 	int ret;
1330 
1331 	/* Spin waiting for client's Start STAG/TO/Len */
1332 	while (cb->state < RDMA_READ_ADV) {
1333 		krping_cq_event_handler(cb->cq, cb);
1334 	}
1335 
1336 	/* Send STAG/TO/Len to client */
1337 	krping_format_send(cb, cb->start_dma_addr);
1338 	ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1339 	if (ret) {
1340 		printk(KERN_ERR PFX "post send error %d\n", ret);
1341 		return;
1342 	}
1343 
1344 	/* Spin waiting for send completion */
1345 	while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1346 	if (ret < 0) {
1347 		printk(KERN_ERR PFX "poll error %d\n", ret);
1348 		return;
1349 	}
1350 	if (wc.status) {
1351 		printk(KERN_ERR PFX "send completiong error %d\n", wc.status);
1352 		return;
1353 	}
1354 
1355 	if (cb->duplex)
1356 		bw_test(cb);
1357 	wait_event_interruptible(cb->sem, cb->state == ERROR);
1358 }
1359 
1360 static int reg_supported(struct ib_device *dev)
1361 {
1362 	u64 needed_flags = IB_DEVICE_MEM_MGT_EXTENSIONS;
1363 
1364 	if ((dev->attrs.device_cap_flags & needed_flags) != needed_flags) {
1365 		printk(KERN_ERR PFX
1366 			"Fastreg not supported - device_cap_flags 0x%llx\n",
1367 			(unsigned long long)dev->attrs.device_cap_flags);
1368 		return 0;
1369 	}
1370 	DEBUG_LOG("Fastreg supported - device_cap_flags 0x%llx\n",
1371 		(unsigned long long)dev->attrs.device_cap_flags);
1372 	return 1;
1373 }
1374 
1375 static void fill_sockaddr(struct sockaddr_storage *sin, struct krping_cb *cb)
1376 {
1377 	memset(sin, 0, sizeof(*sin));
1378 
1379 	if (cb->addr_type == AF_INET) {
1380 		struct sockaddr_in *sin4 = (struct sockaddr_in *)sin;
1381 		sin4->sin_len = sizeof(*sin4);
1382 		sin4->sin_family = AF_INET;
1383 		memcpy((void *)&sin4->sin_addr.s_addr, cb->addr, 4);
1384 		sin4->sin_port = cb->port;
1385 	} else if (cb->addr_type == AF_INET6) {
1386 		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sin;
1387 		sin6->sin6_len = sizeof(*sin6);
1388 		sin6->sin6_family = AF_INET6;
1389 		memcpy((void *)&sin6->sin6_addr, cb->addr, 16);
1390 		sin6->sin6_port = cb->port;
1391 	}
1392 }
1393 
1394 static int krping_bind_server(struct krping_cb *cb)
1395 {
1396 	struct sockaddr_storage sin;
1397 	int ret;
1398 
1399 
1400 	fill_sockaddr(&sin, cb);
1401 
1402 	ret = rdma_bind_addr(cb->cm_id, (struct sockaddr *)&sin);
1403 	if (ret) {
1404 		printk(KERN_ERR PFX "rdma_bind_addr error %d\n", ret);
1405 		return ret;
1406 	}
1407 	DEBUG_LOG("rdma_bind_addr successful\n");
1408 
1409 	DEBUG_LOG("rdma_listen\n");
1410 	ret = rdma_listen(cb->cm_id, 3);
1411 	if (ret) {
1412 		printk(KERN_ERR PFX "rdma_listen failed: %d\n", ret);
1413 		return ret;
1414 	}
1415 
1416 	wait_event_interruptible(cb->sem, cb->state >= CONNECT_REQUEST);
1417 	if (cb->state != CONNECT_REQUEST) {
1418 		printk(KERN_ERR PFX "wait for CONNECT_REQUEST state %d\n",
1419 			cb->state);
1420 		return -1;
1421 	}
1422 
1423 	if (!reg_supported(cb->child_cm_id->device))
1424 		return -EINVAL;
1425 
1426 	return 0;
1427 }
1428 
1429 static void krping_run_server(struct krping_cb *cb)
1430 {
1431 	struct ib_recv_wr *bad_wr;
1432 	int ret;
1433 
1434 	ret = krping_bind_server(cb);
1435 	if (ret)
1436 		return;
1437 
1438 	ret = krping_setup_qp(cb, cb->child_cm_id);
1439 	if (ret) {
1440 		printk(KERN_ERR PFX "setup_qp failed: %d\n", ret);
1441 		goto err0;
1442 	}
1443 
1444 	ret = krping_setup_buffers(cb);
1445 	if (ret) {
1446 		printk(KERN_ERR PFX "krping_setup_buffers failed: %d\n", ret);
1447 		goto err1;
1448 	}
1449 
1450 	ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr);
1451 	if (ret) {
1452 		printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
1453 		goto err2;
1454 	}
1455 
1456 	ret = krping_accept(cb);
1457 	if (ret) {
1458 		printk(KERN_ERR PFX "connect error %d\n", ret);
1459 		goto err2;
1460 	}
1461 
1462 	if (cb->wlat)
1463 		krping_wlat_test_server(cb);
1464 	else if (cb->rlat)
1465 		krping_rlat_test_server(cb);
1466 	else if (cb->bw)
1467 		krping_bw_test_server(cb);
1468 	else
1469 		krping_test_server(cb);
1470 	rdma_disconnect(cb->child_cm_id);
1471 err2:
1472 	krping_free_buffers(cb);
1473 err1:
1474 	krping_free_qp(cb);
1475 err0:
1476 	rdma_destroy_id(cb->child_cm_id);
1477 }
1478 
1479 static void krping_test_client(struct krping_cb *cb)
1480 {
1481 	int ping, start, cc, i, ret;
1482 	struct ib_send_wr *bad_wr;
1483 	unsigned char c;
1484 
1485 	start = 65;
1486 	for (ping = 0; !cb->count || ping < cb->count; ping++) {
1487 		cb->state = RDMA_READ_ADV;
1488 
1489 		/* Put some ascii text in the buffer. */
1490 		cc = sprintf(cb->start_buf, "rdma-ping-%d: ", ping);
1491 		for (i = cc, c = start; i < cb->size; i++) {
1492 			cb->start_buf[i] = c;
1493 			c++;
1494 			if (c > 122)
1495 				c = 65;
1496 		}
1497 		start++;
1498 		if (start > 122)
1499 			start = 65;
1500 		cb->start_buf[cb->size - 1] = 0;
1501 
1502 		krping_format_send(cb, cb->start_dma_addr);
1503 		if (cb->state == ERROR) {
1504 			printk(KERN_ERR PFX "krping_format_send failed\n");
1505 			break;
1506 		}
1507 		ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1508 		if (ret) {
1509 			printk(KERN_ERR PFX "post send error %d\n", ret);
1510 			break;
1511 		}
1512 
1513 		/* Wait for server to ACK */
1514 		wait_event_interruptible(cb->sem, cb->state >= RDMA_WRITE_ADV);
1515 		if (cb->state != RDMA_WRITE_ADV) {
1516 			printk(KERN_ERR PFX
1517 			       "wait for RDMA_WRITE_ADV state %d\n",
1518 			       cb->state);
1519 			break;
1520 		}
1521 
1522 		krping_format_send(cb, cb->rdma_dma_addr);
1523 		ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1524 		if (ret) {
1525 			printk(KERN_ERR PFX "post send error %d\n", ret);
1526 			break;
1527 		}
1528 
1529 		/* Wait for the server to say the RDMA Write is complete. */
1530 		wait_event_interruptible(cb->sem,
1531 					 cb->state >= RDMA_WRITE_COMPLETE);
1532 		if (cb->state != RDMA_WRITE_COMPLETE) {
1533 			printk(KERN_ERR PFX
1534 			       "wait for RDMA_WRITE_COMPLETE state %d\n",
1535 			       cb->state);
1536 			break;
1537 		}
1538 
1539 		if (cb->validate)
1540 			if (memcmp(cb->start_buf, cb->rdma_buf, cb->size)) {
1541 				printk(KERN_ERR PFX "data mismatch!\n");
1542 				break;
1543 			}
1544 
1545 		if (cb->verbose)
1546 			printk(KERN_INFO PFX "ping data: %s\n", cb->rdma_buf);
1547 #ifdef SLOW_KRPING
1548 		wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
1549 #endif
1550 	}
1551 }
1552 
1553 static void krping_rlat_test_client(struct krping_cb *cb)
1554 {
1555 	struct ib_send_wr *bad_wr;
1556 	struct ib_wc wc;
1557 	int ret;
1558 
1559 	cb->state = RDMA_READ_ADV;
1560 
1561 	/* Send STAG/TO/Len to client */
1562 	krping_format_send(cb, cb->start_dma_addr);
1563 	if (cb->state == ERROR) {
1564 		printk(KERN_ERR PFX "krping_format_send failed\n");
1565 		return;
1566 	}
1567 	ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1568 	if (ret) {
1569 		printk(KERN_ERR PFX "post send error %d\n", ret);
1570 		return;
1571 	}
1572 
1573 	/* Spin waiting for send completion */
1574 	while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1575 	if (ret < 0) {
1576 		printk(KERN_ERR PFX "poll error %d\n", ret);
1577 		return;
1578 	}
1579 	if (wc.status) {
1580 		printk(KERN_ERR PFX "send completion error %d\n", wc.status);
1581 		return;
1582 	}
1583 
1584 	/* Spin waiting for server's Start STAG/TO/Len */
1585 	while (cb->state < RDMA_WRITE_ADV) {
1586 		krping_cq_event_handler(cb->cq, cb);
1587 	}
1588 
1589 #if 0
1590 {
1591 	int i;
1592 	struct timeval start, stop;
1593 	time_t sec;
1594 	suseconds_t usec;
1595 	unsigned long long elapsed;
1596 	struct ib_wc wc;
1597 	struct ib_send_wr *bad_wr;
1598 	int ne;
1599 
1600 	cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_WRITE;
1601 	cb->rdma_sq_wr.rkey = cb->remote_rkey;
1602 	cb->rdma_sq_wr.remote_addr = cb->remote_addr;
1603 	cb->rdma_sq_wr.wr.sg_list->length = 0;
1604 	cb->rdma_sq_wr.wr.num_sge = 0;
1605 
1606 	microtime(&start);
1607 	for (i=0; i < 100000; i++) {
1608 		if (ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr)) {
1609 			printk(KERN_ERR PFX  "Couldn't post send\n");
1610 			return;
1611 		}
1612 		do {
1613 			ne = ib_poll_cq(cb->cq, 1, &wc);
1614 		} while (ne == 0);
1615 		if (ne < 0) {
1616 			printk(KERN_ERR PFX "poll CQ failed %d\n", ne);
1617 			return;
1618 		}
1619 		if (wc.status != IB_WC_SUCCESS) {
1620 			printk(KERN_ERR PFX "Completion wth error at %s:\n",
1621 				cb->server ? "server" : "client");
1622 			printk(KERN_ERR PFX "Failed status %d: wr_id %d\n",
1623 				wc.status, (int) wc.wr_id);
1624 			return;
1625 		}
1626 	}
1627 	microtime(&stop);
1628 
1629 	if (stop.tv_usec < start.tv_usec) {
1630 		stop.tv_usec += 1000000;
1631 		stop.tv_sec  -= 1;
1632 	}
1633 	sec     = stop.tv_sec - start.tv_sec;
1634 	usec    = stop.tv_usec - start.tv_usec;
1635 	elapsed = sec * 1000000 + usec;
1636 	printk(KERN_ERR PFX "0B-write-lat iters 100000 usec %llu\n", elapsed);
1637 }
1638 #endif
1639 
1640 	rlat_test(cb);
1641 }
1642 
1643 static void krping_wlat_test_client(struct krping_cb *cb)
1644 {
1645 	struct ib_send_wr *bad_wr;
1646 	struct ib_wc wc;
1647 	int ret;
1648 
1649 	cb->state = RDMA_READ_ADV;
1650 
1651 	/* Send STAG/TO/Len to client */
1652 	krping_format_send(cb, cb->start_dma_addr);
1653 	if (cb->state == ERROR) {
1654 		printk(KERN_ERR PFX "krping_format_send failed\n");
1655 		return;
1656 	}
1657 	ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1658 	if (ret) {
1659 		printk(KERN_ERR PFX "post send error %d\n", ret);
1660 		return;
1661 	}
1662 
1663 	/* Spin waiting for send completion */
1664 	while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1665 	if (ret < 0) {
1666 		printk(KERN_ERR PFX "poll error %d\n", ret);
1667 		return;
1668 	}
1669 	if (wc.status) {
1670 		printk(KERN_ERR PFX "send completion error %d\n", wc.status);
1671 		return;
1672 	}
1673 
1674 	/* Spin waiting for server's Start STAG/TO/Len */
1675 	while (cb->state < RDMA_WRITE_ADV) {
1676 		krping_cq_event_handler(cb->cq, cb);
1677 	}
1678 
1679 	wlat_test(cb);
1680 }
1681 
1682 static void krping_bw_test_client(struct krping_cb *cb)
1683 {
1684 	struct ib_send_wr *bad_wr;
1685 	struct ib_wc wc;
1686 	int ret;
1687 
1688 	cb->state = RDMA_READ_ADV;
1689 
1690 	/* Send STAG/TO/Len to client */
1691 	krping_format_send(cb, cb->start_dma_addr);
1692 	if (cb->state == ERROR) {
1693 		printk(KERN_ERR PFX "krping_format_send failed\n");
1694 		return;
1695 	}
1696 	ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1697 	if (ret) {
1698 		printk(KERN_ERR PFX "post send error %d\n", ret);
1699 		return;
1700 	}
1701 
1702 	/* Spin waiting for send completion */
1703 	while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1704 	if (ret < 0) {
1705 		printk(KERN_ERR PFX "poll error %d\n", ret);
1706 		return;
1707 	}
1708 	if (wc.status) {
1709 		printk(KERN_ERR PFX "send completion error %d\n", wc.status);
1710 		return;
1711 	}
1712 
1713 	/* Spin waiting for server's Start STAG/TO/Len */
1714 	while (cb->state < RDMA_WRITE_ADV) {
1715 		krping_cq_event_handler(cb->cq, cb);
1716 	}
1717 
1718 	bw_test(cb);
1719 }
1720 
1721 /*
1722  * Manual qp flush test
1723  */
1724 static void flush_qp(struct krping_cb *cb)
1725 {
1726 	struct ib_send_wr wr = { 0 }, *bad;
1727 	struct ib_recv_wr recv_wr = { 0 }, *recv_bad;
1728 	struct ib_wc wc;
1729 	int ret;
1730 	int flushed = 0;
1731 	int ccnt = 0;
1732 
1733 	rdma_disconnect(cb->cm_id);
1734 	DEBUG_LOG("disconnected!\n");
1735 
1736 	wr.opcode = IB_WR_SEND;
1737 	wr.wr_id = 0xdeadbeefcafebabe;
1738 	ret = ib_post_send(cb->qp, &wr, &bad);
1739 	if (ret) {
1740 		printk(KERN_ERR PFX "%s post_send failed ret %d\n", __func__, ret);
1741 		return;
1742 	}
1743 
1744 	recv_wr.wr_id = 0xcafebabedeadbeef;
1745 	ret = ib_post_recv(cb->qp, &recv_wr, &recv_bad);
1746 	if (ret) {
1747 		printk(KERN_ERR PFX "%s post_recv failed ret %d\n", __func__, ret);
1748 		return;
1749 	}
1750 
1751 	/* poll until the flush WRs complete */
1752 	do {
1753 		ret = ib_poll_cq(cb->cq, 1, &wc);
1754 		if (ret < 0) {
1755 			printk(KERN_ERR PFX "ib_poll_cq failed %d\n", ret);
1756 			return;
1757 		}
1758 		if (ret == 0)
1759 			continue;
1760 		ccnt++;
1761 		if (wc.wr_id == 0xdeadbeefcafebabe ||
1762 		    wc.wr_id == 0xcafebabedeadbeef)
1763 			flushed++;
1764 	} while (flushed != 2);
1765 	DEBUG_LOG("qp_flushed! ccnt %u\n", ccnt);
1766 }
1767 
1768 static void krping_fr_test(struct krping_cb *cb)
1769 {
1770 	struct ib_send_wr inv, *bad;
1771 	struct ib_reg_wr fr;
1772 	struct ib_wc wc;
1773 	u8 key = 0;
1774 	struct ib_mr *mr;
1775 	int ret;
1776 	int size = cb->size;
1777 	int plen = (((size - 1) & PAGE_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
1778 	unsigned long start;
1779 	int count = 0;
1780 	int scnt = 0;
1781 	struct scatterlist sg = {0};
1782 
1783 	mr = ib_alloc_mr(cb->pd, IB_MR_TYPE_MEM_REG, plen);
1784 	if (IS_ERR(mr)) {
1785 		printk(KERN_ERR PFX "ib_alloc_mr failed %ld\n", PTR_ERR(mr));
1786 		return;
1787 	}
1788 
1789 	sg_dma_address(&sg) = (dma_addr_t)0xcafebabe0000ULL;
1790 	sg_dma_len(&sg) = size;
1791 	ret = ib_map_mr_sg(mr, &sg, 1, NULL, PAGE_SIZE);
1792 	if (ret <= 0) {
1793 		printk(KERN_ERR PFX "ib_map_mr_sge err %d\n", ret);
1794 		goto err2;
1795 	}
1796 
1797 	memset(&fr, 0, sizeof fr);
1798 	fr.wr.opcode = IB_WR_REG_MR;
1799 	fr.access = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE;
1800 	fr.mr = mr;
1801 	fr.wr.next = &inv;
1802 
1803 	memset(&inv, 0, sizeof inv);
1804 	inv.opcode = IB_WR_LOCAL_INV;
1805 	inv.send_flags = IB_SEND_SIGNALED;
1806 
1807 	DEBUG_LOG("fr_test: stag index 0x%x plen %u size %u depth %u\n", mr->rkey >> 8, plen, cb->size, cb->txdepth);
1808 	start = time_uptime;
1809 	while (!cb->count || count <= cb->count) {
1810 		if (SIGPENDING(curthread)) {
1811 			printk(KERN_ERR PFX "signal!\n");
1812 			break;
1813 		}
1814 		if ((time_uptime - start) >= 9) {
1815 			DEBUG_LOG("fr_test: pausing 1 second! count %u latest size %u plen %u\n", count, size, plen);
1816 			wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
1817 			if (cb->state == ERROR)
1818 				break;
1819 			start = time_uptime;
1820 		}
1821 		while (scnt < (cb->txdepth>>1)) {
1822 			ib_update_fast_reg_key(mr, ++key);
1823 			fr.key = mr->rkey;
1824 			inv.ex.invalidate_rkey = mr->rkey;
1825 
1826 			size = arc4random() % cb->size;
1827 			if (size == 0)
1828 				size = cb->size;
1829 			sg_dma_len(&sg) = size;
1830 			ret = ib_map_mr_sg(mr, &sg, 1, NULL, PAGE_SIZE);
1831 			if (ret <= 0) {
1832 				printk(KERN_ERR PFX "ib_map_mr_sge err %d\n", ret);
1833 				goto err2;
1834 			}
1835 			ret = ib_post_send(cb->qp, &fr.wr, &bad);
1836 			if (ret) {
1837 				printk(KERN_ERR PFX "ib_post_send failed %d\n", ret);
1838 				goto err2;
1839 			}
1840 			scnt++;
1841 		}
1842 
1843 		ret = ib_poll_cq(cb->cq, 1, &wc);
1844 		if (ret < 0) {
1845 			printk(KERN_ERR PFX "ib_poll_cq failed %d\n", ret);
1846 			goto err2;
1847 		}
1848 		if (ret == 1) {
1849 			if (wc.status) {
1850 				printk(KERN_ERR PFX "completion error %u\n", wc.status);
1851 				goto err2;
1852 			}
1853 			count++;
1854 			scnt--;
1855 		}
1856 	}
1857 err2:
1858 	flush_qp(cb);
1859 	DEBUG_LOG("fr_test: done!\n");
1860 	ib_dereg_mr(mr);
1861 }
1862 
1863 static int krping_connect_client(struct krping_cb *cb)
1864 {
1865 	struct rdma_conn_param conn_param;
1866 	int ret;
1867 
1868 	memset(&conn_param, 0, sizeof conn_param);
1869 	conn_param.responder_resources = 1;
1870 	conn_param.initiator_depth = 1;
1871 	conn_param.retry_count = 10;
1872 
1873 	ret = rdma_connect(cb->cm_id, &conn_param);
1874 	if (ret) {
1875 		printk(KERN_ERR PFX "rdma_connect error %d\n", ret);
1876 		return ret;
1877 	}
1878 
1879 	wait_event_interruptible(cb->sem, cb->state >= CONNECTED);
1880 	if (cb->state == ERROR) {
1881 		printk(KERN_ERR PFX "wait for CONNECTED state %d\n", cb->state);
1882 		return -1;
1883 	}
1884 
1885 	DEBUG_LOG("rdma_connect successful\n");
1886 	return 0;
1887 }
1888 
1889 static int krping_bind_client(struct krping_cb *cb)
1890 {
1891 	struct sockaddr_storage sin;
1892 	int ret;
1893 
1894 	fill_sockaddr(&sin, cb);
1895 
1896 	ret = rdma_resolve_addr(cb->cm_id, NULL, (struct sockaddr *)&sin, 2000);
1897 	if (ret) {
1898 		printk(KERN_ERR PFX "rdma_resolve_addr error %d\n", ret);
1899 		return ret;
1900 	}
1901 
1902 	wait_event_interruptible(cb->sem, cb->state >= ROUTE_RESOLVED);
1903 	if (cb->state != ROUTE_RESOLVED) {
1904 		printk(KERN_ERR PFX
1905 		       "addr/route resolution did not resolve: state %d\n",
1906 		       cb->state);
1907 		return -EINTR;
1908 	}
1909 
1910 	if (!reg_supported(cb->cm_id->device))
1911 		return -EINVAL;
1912 
1913 	DEBUG_LOG("rdma_resolve_addr - rdma_resolve_route successful\n");
1914 	return 0;
1915 }
1916 
1917 static void krping_run_client(struct krping_cb *cb)
1918 {
1919 	struct ib_recv_wr *bad_wr;
1920 	int ret;
1921 
1922 	/* set type of service, if any */
1923 	if (cb->tos != 0)
1924 		rdma_set_service_type(cb->cm_id, cb->tos);
1925 
1926 	ret = krping_bind_client(cb);
1927 	if (ret)
1928 		return;
1929 
1930 	ret = krping_setup_qp(cb, cb->cm_id);
1931 	if (ret) {
1932 		printk(KERN_ERR PFX "setup_qp failed: %d\n", ret);
1933 		return;
1934 	}
1935 
1936 	ret = krping_setup_buffers(cb);
1937 	if (ret) {
1938 		printk(KERN_ERR PFX "krping_setup_buffers failed: %d\n", ret);
1939 		goto err1;
1940 	}
1941 
1942 	ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr);
1943 	if (ret) {
1944 		printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
1945 		goto err2;
1946 	}
1947 
1948 	ret = krping_connect_client(cb);
1949 	if (ret) {
1950 		printk(KERN_ERR PFX "connect error %d\n", ret);
1951 		goto err2;
1952 	}
1953 
1954 	if (cb->wlat)
1955 		krping_wlat_test_client(cb);
1956 	else if (cb->rlat)
1957 		krping_rlat_test_client(cb);
1958 	else if (cb->bw)
1959 		krping_bw_test_client(cb);
1960 	else if (cb->frtest)
1961 		krping_fr_test(cb);
1962 	else
1963 		krping_test_client(cb);
1964 	rdma_disconnect(cb->cm_id);
1965 err2:
1966 	krping_free_buffers(cb);
1967 err1:
1968 	krping_free_qp(cb);
1969 }
1970 
1971 static uint16_t
1972 krping_get_ipv6_scope_id(char *name)
1973 {
1974 	struct ifnet *ifp;
1975 	uint16_t retval;
1976 
1977 	if (name == NULL)
1978 		return (0);
1979 	CURVNET_SET_QUIET(TD_TO_VNET(curthread));
1980 	ifp = ifunit_ref(name);
1981 	CURVNET_RESTORE();
1982 	if (ifp == NULL)
1983 		return (0);
1984 	retval = ifp->if_index;
1985 	if_rele(ifp);
1986 	return (retval);
1987 }
1988 
1989 int krping_doit(char *cmd)
1990 {
1991 	struct krping_cb *cb;
1992 	int op;
1993 	int ret = 0;
1994 	char *optarg;
1995 	char *scope;
1996 	unsigned long optint;
1997 
1998 	cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1999 	if (!cb)
2000 		return -ENOMEM;
2001 
2002 	mutex_lock(&krping_mutex);
2003 	list_add_tail(&cb->list, &krping_cbs);
2004 	mutex_unlock(&krping_mutex);
2005 
2006 	cb->server = -1;
2007 	cb->state = IDLE;
2008 	cb->size = 64;
2009 	cb->txdepth = RPING_SQ_DEPTH;
2010 	init_waitqueue_head(&cb->sem);
2011 
2012 	while ((op = krping_getopt("krping", &cmd, krping_opts, NULL, &optarg,
2013 			      &optint)) != 0) {
2014 		switch (op) {
2015 		case 'a':
2016 			cb->addr_str = optarg;
2017 			cb->addr_type = AF_INET;
2018 			DEBUG_LOG("ipaddr (%s)\n", optarg);
2019 			if (inet_pton(AF_INET, optarg, cb->addr) != 1) {
2020 				printk(KERN_ERR PFX "bad addr string %s\n",
2021 				    optarg);
2022 				ret = EINVAL;
2023 			}
2024 			break;
2025 		case 'A':
2026 			cb->addr_str = optarg;
2027 			cb->addr_type = AF_INET6;
2028 			DEBUG_LOG("ipv6addr (%s)\n", optarg);
2029 			scope = strstr(optarg, "%");
2030 			/* extract scope ID, if any */
2031 			if (scope != NULL)
2032 				*scope++ = 0;
2033 			/* extract IPv6 network address */
2034 			if (inet_pton(AF_INET6, optarg, cb->addr) != 1) {
2035 				printk(KERN_ERR PFX "bad addr string %s\n",
2036 				    optarg);
2037 				ret = EINVAL;
2038 			} else if (IN6_IS_SCOPE_LINKLOCAL((struct in6_addr *)cb->addr) ||
2039 			    IN6_IS_ADDR_MC_INTFACELOCAL((struct in6_addr *)cb->addr)) {
2040 				uint16_t scope_id = krping_get_ipv6_scope_id(scope);
2041 				DEBUG_LOG("ipv6 scope ID = %d\n", scope_id);
2042 				cb->addr[2] = scope_id >> 8;
2043 				cb->addr[3] = scope_id & 0xFF;
2044 			}
2045 			break;
2046 		case 'p':
2047 			cb->port = htons(optint);
2048 			DEBUG_LOG("port %d\n", (int)optint);
2049 			break;
2050 		case 'P':
2051 			cb->poll = 1;
2052 			DEBUG_LOG("server\n");
2053 			break;
2054 		case 's':
2055 			cb->server = 1;
2056 			DEBUG_LOG("server\n");
2057 			break;
2058 		case 'c':
2059 			cb->server = 0;
2060 			DEBUG_LOG("client\n");
2061 			break;
2062 		case 'S':
2063 			cb->size = optint;
2064 			if ((cb->size < 1) ||
2065 			    (cb->size > RPING_BUFSIZE)) {
2066 				printk(KERN_ERR PFX "Invalid size %d "
2067 				       "(valid range is 1 to %d)\n",
2068 				       cb->size, RPING_BUFSIZE);
2069 				ret = EINVAL;
2070 			} else
2071 				DEBUG_LOG("size %d\n", (int)optint);
2072 			break;
2073 		case 'C':
2074 			cb->count = optint;
2075 			if (cb->count < 0) {
2076 				printk(KERN_ERR PFX "Invalid count %d\n",
2077 					cb->count);
2078 				ret = EINVAL;
2079 			} else
2080 				DEBUG_LOG("count %d\n", (int) cb->count);
2081 			break;
2082 		case 'v':
2083 			cb->verbose++;
2084 			DEBUG_LOG("verbose\n");
2085 			break;
2086 		case 'V':
2087 			cb->validate++;
2088 			DEBUG_LOG("validate data\n");
2089 			break;
2090 		case 'l':
2091 			cb->wlat++;
2092 			break;
2093 		case 'L':
2094 			cb->rlat++;
2095 			break;
2096 		case 'B':
2097 			cb->bw++;
2098 			break;
2099 		case 'd':
2100 			cb->duplex++;
2101 			break;
2102 		case 'I':
2103 			cb->server_invalidate = 1;
2104 			break;
2105 		case 't':
2106 			cb->tos = optint;
2107 			DEBUG_LOG("type of service, tos=%d\n", (int) cb->tos);
2108 			break;
2109 		case 'T':
2110 			cb->txdepth = optint;
2111 			DEBUG_LOG("txdepth %d\n", (int) cb->txdepth);
2112 			break;
2113 		case 'Z':
2114 			cb->local_dma_lkey = 1;
2115 			DEBUG_LOG("using local dma lkey\n");
2116 			break;
2117 		case 'R':
2118 			cb->read_inv = 1;
2119 			DEBUG_LOG("using read-with-inv\n");
2120 			break;
2121 		case 'f':
2122 			cb->frtest = 1;
2123 			DEBUG_LOG("fast-reg test!\n");
2124 			break;
2125 		default:
2126 			printk(KERN_ERR PFX "unknown opt %s\n", optarg);
2127 			ret = -EINVAL;
2128 			break;
2129 		}
2130 	}
2131 	if (ret)
2132 		goto out;
2133 
2134 	if (cb->server == -1) {
2135 		printk(KERN_ERR PFX "must be either client or server\n");
2136 		ret = -EINVAL;
2137 		goto out;
2138 	}
2139 
2140 	if (cb->server && cb->frtest) {
2141 		printk(KERN_ERR PFX "must be client to run frtest\n");
2142 		ret = -EINVAL;
2143 		goto out;
2144 	}
2145 
2146 	if ((cb->frtest + cb->bw + cb->rlat + cb->wlat) > 1) {
2147 		printk(KERN_ERR PFX "Pick only one test: fr, bw, rlat, wlat\n");
2148 		ret = -EINVAL;
2149 		goto out;
2150 	}
2151 
2152 	if (cb->wlat || cb->rlat || cb->bw) {
2153 		printk(KERN_ERR PFX "wlat, rlat, and bw tests only support mem_mode MR - which is no longer supported\n");
2154 		ret = -EINVAL;
2155 		goto out;
2156 	}
2157 
2158 	cb->cm_id = rdma_create_id(TD_TO_VNET(curthread), krping_cma_event_handler, cb, RDMA_PS_TCP, IB_QPT_RC);
2159 	if (IS_ERR(cb->cm_id)) {
2160 		ret = PTR_ERR(cb->cm_id);
2161 		printk(KERN_ERR PFX "rdma_create_id error %d\n", ret);
2162 		goto out;
2163 	}
2164 	DEBUG_LOG("created cm_id %p\n", cb->cm_id);
2165 
2166 	if (cb->server)
2167 		krping_run_server(cb);
2168 	else
2169 		krping_run_client(cb);
2170 
2171 	DEBUG_LOG("destroy cm_id %p\n", cb->cm_id);
2172 	rdma_destroy_id(cb->cm_id);
2173 out:
2174 	mutex_lock(&krping_mutex);
2175 	list_del(&cb->list);
2176 	mutex_unlock(&krping_mutex);
2177 	kfree(cb);
2178 	return ret;
2179 }
2180 
2181 void
2182 krping_walk_cb_list(void (*f)(struct krping_stats *, void *), void *arg)
2183 {
2184 	struct krping_cb *cb;
2185 
2186 	mutex_lock(&krping_mutex);
2187 	list_for_each_entry(cb, &krping_cbs, list)
2188 	    (*f)(cb->pd ? &cb->stats : NULL, arg);
2189 	mutex_unlock(&krping_mutex);
2190 }
2191 
2192 void
2193 krping_cancel_all(void)
2194 {
2195 	struct krping_cb *cb;
2196 
2197 	mutex_lock(&krping_mutex);
2198 	list_for_each_entry(cb, &krping_cbs, list) {
2199 		cb->state = ERROR;
2200 		wake_up_interruptible(&cb->sem);
2201 	}
2202 	mutex_unlock(&krping_mutex);
2203 }
2204 
2205