xref: /freebsd/sys/contrib/rdma/krping/krping.c (revision cddbc3b40812213ff00041f79174cac0be360a2a)
1 /*
2  * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3  * Copyright (c) 2006-2009 Open Grid Computing, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <linux/module.h>
38 #include <linux/moduleparam.h>
39 #include <linux/slab.h>
40 #include <linux/err.h>
41 #include <linux/string.h>
42 #include <linux/list.h>
43 #include <linux/in.h>
44 #include <linux/device.h>
45 #include <linux/pci.h>
46 #include <linux/sched.h>
47 #include <linux/wait.h>
48 
49 #include <asm/atomic.h>
50 
51 #include <rdma/ib_verbs.h>
52 #include <rdma/rdma_cm.h>
53 
54 #include "krping.h"
55 #include "getopt.h"
56 
57 #define PFX "krping: "
58 
59 extern int krping_debug;
60 #define DEBUG_LOG(...) do { if (krping_debug) log(LOG_INFO, __VA_ARGS__); } while (0)
61 #define BIND_INFO 1
62 
63 MODULE_AUTHOR("Steve Wise");
64 MODULE_DESCRIPTION("RDMA ping server");
65 MODULE_LICENSE("Dual BSD/GPL");
66 MODULE_VERSION(krping, 1);
67 MODULE_DEPEND(krping, linuxkpi, 1, 1, 1);
68 
69 static __inline uint64_t
70 get_cycles(void)
71 {
72 	uint32_t low, high;
73 	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
74 	return (low | ((u_int64_t)high << 32));
75 }
76 
77 typedef uint64_t cycles_t;
78 
79 enum mem_type {
80 	DMA = 1,
81 	REG = 2,
82 };
83 
84 static const struct krping_option krping_opts[] = {
85 	{"count", OPT_INT, 'C'},
86 	{"size", OPT_INT, 'S'},
87 	{"addr", OPT_STRING, 'a'},
88 	{"addr6", OPT_STRING, 'A'},
89 	{"port", OPT_INT, 'p'},
90 	{"verbose", OPT_NOPARAM, 'v'},
91 	{"validate", OPT_NOPARAM, 'V'},
92 	{"server", OPT_NOPARAM, 's'},
93 	{"client", OPT_NOPARAM, 'c'},
94 	{"server_inv", OPT_NOPARAM, 'I'},
95  	{"wlat", OPT_NOPARAM, 'l'},
96  	{"rlat", OPT_NOPARAM, 'L'},
97  	{"bw", OPT_NOPARAM, 'B'},
98  	{"duplex", OPT_NOPARAM, 'd'},
99 	{"tos", OPT_INT, 't'},
100  	{"txdepth", OPT_INT, 'T'},
101  	{"poll", OPT_NOPARAM, 'P'},
102  	{"local_dma_lkey", OPT_NOPARAM, 'Z'},
103  	{"read_inv", OPT_NOPARAM, 'R'},
104  	{"fr", OPT_NOPARAM, 'f'},
105 	{NULL, 0, 0}
106 };
107 
108 #define htonll(x) cpu_to_be64((x))
109 #define ntohll(x) cpu_to_be64((x))
110 
111 static DEFINE_MUTEX(krping_mutex);
112 
113 /*
114  * List of running krping threads.
115  */
116 static LIST_HEAD(krping_cbs);
117 
118 /*
119  * Invoke like this, one on each side, using the server's address on
120  * the RDMA device (iw%d):
121  *
122  * /bin/echo server,port=9999,addr=192.168.69.142,validate > /proc/krping
123  * /bin/echo client,port=9999,addr=192.168.69.142,validate > /proc/krping
124  * /bin/echo client,port=9999,addr6=2001:db8:0:f101::1,validate > /proc/krping
125  *
126  * krping "ping/pong" loop:
127  * 	client sends source rkey/addr/len
128  *	server receives source rkey/add/len
129  *	server rdma reads "ping" data from source
130  * 	server sends "go ahead" on rdma read completion
131  *	client sends sink rkey/addr/len
132  * 	server receives sink rkey/addr/len
133  * 	server rdma writes "pong" data to sink
134  * 	server sends "go ahead" on rdma write completion
135  * 	<repeat loop>
136  */
137 
138 /*
139  * These states are used to signal events between the completion handler
140  * and the main client or server thread.
141  *
142  * Once CONNECTED, they cycle through RDMA_READ_ADV, RDMA_WRITE_ADV,
143  * and RDMA_WRITE_COMPLETE for each ping.
144  */
145 enum test_state {
146 	IDLE = 1,
147 	CONNECT_REQUEST,
148 	ADDR_RESOLVED,
149 	ROUTE_RESOLVED,
150 	CONNECTED,
151 	RDMA_READ_ADV,
152 	RDMA_READ_COMPLETE,
153 	RDMA_WRITE_ADV,
154 	RDMA_WRITE_COMPLETE,
155 	ERROR
156 };
157 
158 struct krping_rdma_info {
159 	uint64_t buf;
160 	uint32_t rkey;
161 	uint32_t size;
162 };
163 
164 /*
165  * Default max buffer size for IO...
166  */
167 #define RPING_BUFSIZE 128*1024
168 #define RPING_SQ_DEPTH 64
169 
170 /*
171  * Control block struct.
172  */
173 struct krping_cb {
174 	int server;			/* 0 iff client */
175 	struct ib_cq *cq;
176 	struct ib_pd *pd;
177 	struct ib_qp *qp;
178 
179 	struct ib_mr *dma_mr;
180 
181 	struct ib_fast_reg_page_list *page_list;
182 	int page_list_len;
183 	struct ib_reg_wr reg_mr_wr;
184 	struct ib_send_wr invalidate_wr;
185 	struct ib_mr *reg_mr;
186 	int server_invalidate;
187 	int read_inv;
188 	u8 key;
189 
190 	struct ib_recv_wr rq_wr;	/* recv work request record */
191 	struct ib_sge recv_sgl;		/* recv single SGE */
192 	struct krping_rdma_info recv_buf __aligned(16);	/* malloc'd buffer */
193 	u64 recv_dma_addr;
194 	DECLARE_PCI_UNMAP_ADDR(recv_mapping)
195 
196 	struct ib_send_wr sq_wr;	/* send work requrest record */
197 	struct ib_sge send_sgl;
198 	struct krping_rdma_info send_buf __aligned(16); /* single send buf */
199 	u64 send_dma_addr;
200 	DECLARE_PCI_UNMAP_ADDR(send_mapping)
201 
202 	struct ib_rdma_wr rdma_sq_wr;	/* rdma work request record */
203 	struct ib_sge rdma_sgl;		/* rdma single SGE */
204 	char *rdma_buf;			/* used as rdma sink */
205 	u64  rdma_dma_addr;
206 	DECLARE_PCI_UNMAP_ADDR(rdma_mapping)
207 	struct ib_mr *rdma_mr;
208 
209 	uint32_t remote_rkey;		/* remote guys RKEY */
210 	uint64_t remote_addr;		/* remote guys TO */
211 	uint32_t remote_len;		/* remote guys LEN */
212 
213 	char *start_buf;		/* rdma read src */
214 	u64  start_dma_addr;
215 	DECLARE_PCI_UNMAP_ADDR(start_mapping)
216 	struct ib_mr *start_mr;
217 
218 	enum test_state state;		/* used for cond/signalling */
219 	wait_queue_head_t sem;
220 	struct krping_stats stats;
221 
222 	uint16_t port;			/* dst port in NBO */
223 	u8 addr[16] __aligned(8);	/* dst addr in NBO */
224 	char *addr_str;			/* dst addr string */
225 	uint8_t addr_type;		/* ADDR_FAMILY - IPv4/V6 */
226 	int verbose;			/* verbose logging */
227 	int count;			/* ping count */
228 	int size;			/* ping data size */
229 	int validate;			/* validate ping data */
230 	int wlat;			/* run wlat test */
231 	int rlat;			/* run rlat test */
232 	int bw;				/* run bw test */
233 	int duplex;			/* run bw full duplex test */
234 	int poll;			/* poll or block for rlat test */
235 	int txdepth;			/* SQ depth */
236 	int local_dma_lkey;		/* use 0 for lkey */
237 	int frtest;			/* reg test */
238 	int tos;			/* type of service */
239 
240 	/* CM stuff */
241 	struct rdma_cm_id *cm_id;	/* connection on client side,*/
242 					/* listener on server side. */
243 	struct rdma_cm_id *child_cm_id;	/* connection on server side */
244 	struct list_head list;
245 };
246 
247 static int krping_cma_event_handler(struct rdma_cm_id *cma_id,
248 				   struct rdma_cm_event *event)
249 {
250 	int ret;
251 	struct krping_cb *cb = cma_id->context;
252 
253 	DEBUG_LOG("cma_event type %d cma_id %p (%s)\n", event->event, cma_id,
254 		  (cma_id == cb->cm_id) ? "parent" : "child");
255 
256 	switch (event->event) {
257 	case RDMA_CM_EVENT_ADDR_RESOLVED:
258 		cb->state = ADDR_RESOLVED;
259 		ret = rdma_resolve_route(cma_id, 2000);
260 		if (ret) {
261 			printk(KERN_ERR PFX "rdma_resolve_route error %d\n",
262 			       ret);
263 			wake_up_interruptible(&cb->sem);
264 		}
265 		break;
266 
267 	case RDMA_CM_EVENT_ROUTE_RESOLVED:
268 		cb->state = ROUTE_RESOLVED;
269 		wake_up_interruptible(&cb->sem);
270 		break;
271 
272 	case RDMA_CM_EVENT_CONNECT_REQUEST:
273 		cb->state = CONNECT_REQUEST;
274 		cb->child_cm_id = cma_id;
275 		DEBUG_LOG("child cma %p\n", cb->child_cm_id);
276 		wake_up_interruptible(&cb->sem);
277 		break;
278 
279 	case RDMA_CM_EVENT_ESTABLISHED:
280 		DEBUG_LOG("ESTABLISHED\n");
281 		if (!cb->server) {
282 			cb->state = CONNECTED;
283 		}
284 		wake_up_interruptible(&cb->sem);
285 		break;
286 
287 	case RDMA_CM_EVENT_ADDR_ERROR:
288 	case RDMA_CM_EVENT_ROUTE_ERROR:
289 	case RDMA_CM_EVENT_CONNECT_ERROR:
290 	case RDMA_CM_EVENT_UNREACHABLE:
291 	case RDMA_CM_EVENT_REJECTED:
292 		printk(KERN_ERR PFX "cma event %d, error %d\n", event->event,
293 		       event->status);
294 		cb->state = ERROR;
295 		wake_up_interruptible(&cb->sem);
296 		break;
297 
298 	case RDMA_CM_EVENT_DISCONNECTED:
299 		printk(KERN_ERR PFX "DISCONNECT EVENT...\n");
300 		cb->state = ERROR;
301 		wake_up_interruptible(&cb->sem);
302 		break;
303 
304 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
305 		printk(KERN_ERR PFX "cma detected device removal!!!!\n");
306 		cb->state = ERROR;
307 		wake_up_interruptible(&cb->sem);
308 		break;
309 
310 	default:
311 		printk(KERN_ERR PFX "oof bad type!\n");
312 		wake_up_interruptible(&cb->sem);
313 		break;
314 	}
315 	return 0;
316 }
317 
318 static int server_recv(struct krping_cb *cb, struct ib_wc *wc)
319 {
320 	if (wc->byte_len != sizeof(cb->recv_buf)) {
321 		printk(KERN_ERR PFX "Received bogus data, size %d\n",
322 		       wc->byte_len);
323 		return -1;
324 	}
325 
326 	cb->remote_rkey = ntohl(cb->recv_buf.rkey);
327 	cb->remote_addr = ntohll(cb->recv_buf.buf);
328 	cb->remote_len  = ntohl(cb->recv_buf.size);
329 	DEBUG_LOG("Received rkey %x addr %llx len %d from peer\n",
330 		  cb->remote_rkey, (unsigned long long)cb->remote_addr,
331 		  cb->remote_len);
332 
333 	if (cb->state <= CONNECTED || cb->state == RDMA_WRITE_COMPLETE)
334 		cb->state = RDMA_READ_ADV;
335 	else
336 		cb->state = RDMA_WRITE_ADV;
337 
338 	return 0;
339 }
340 
341 static int client_recv(struct krping_cb *cb, struct ib_wc *wc)
342 {
343 	if (wc->byte_len != sizeof(cb->recv_buf)) {
344 		printk(KERN_ERR PFX "Received bogus data, size %d\n",
345 		       wc->byte_len);
346 		return -1;
347 	}
348 
349 	if (cb->state == RDMA_READ_ADV)
350 		cb->state = RDMA_WRITE_ADV;
351 	else
352 		cb->state = RDMA_WRITE_COMPLETE;
353 
354 	return 0;
355 }
356 
357 static void krping_cq_event_handler(struct ib_cq *cq, void *ctx)
358 {
359 	struct krping_cb *cb = ctx;
360 	struct ib_wc wc;
361 	struct ib_recv_wr *bad_wr;
362 	int ret;
363 
364 	BUG_ON(cb->cq != cq);
365 	if (cb->state == ERROR) {
366 		printk(KERN_ERR PFX "cq completion in ERROR state\n");
367 		return;
368 	}
369 	if (cb->frtest) {
370 		printk(KERN_ERR PFX "cq completion event in frtest!\n");
371 		return;
372 	}
373 	if (!cb->wlat && !cb->rlat && !cb->bw)
374 		ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP);
375 	while ((ret = ib_poll_cq(cb->cq, 1, &wc)) == 1) {
376 		if (wc.status) {
377 			if (wc.status == IB_WC_WR_FLUSH_ERR) {
378 				DEBUG_LOG("cq flushed\n");
379 				continue;
380 			} else {
381 				printk(KERN_ERR PFX "cq completion failed with "
382 				       "wr_id %jx status %d opcode %d vender_err %x\n",
383 					(uintmax_t)wc.wr_id, wc.status, wc.opcode, wc.vendor_err);
384 				goto error;
385 			}
386 		}
387 
388 		switch (wc.opcode) {
389 		case IB_WC_SEND:
390 			DEBUG_LOG("send completion\n");
391 			cb->stats.send_bytes += cb->send_sgl.length;
392 			cb->stats.send_msgs++;
393 			break;
394 
395 		case IB_WC_RDMA_WRITE:
396 			DEBUG_LOG("rdma write completion\n");
397 			cb->stats.write_bytes += cb->rdma_sq_wr.wr.sg_list->length;
398 			cb->stats.write_msgs++;
399 			cb->state = RDMA_WRITE_COMPLETE;
400 			wake_up_interruptible(&cb->sem);
401 			break;
402 
403 		case IB_WC_RDMA_READ:
404 			DEBUG_LOG("rdma read completion\n");
405 			cb->stats.read_bytes += cb->rdma_sq_wr.wr.sg_list->length;
406 			cb->stats.read_msgs++;
407 			cb->state = RDMA_READ_COMPLETE;
408 			wake_up_interruptible(&cb->sem);
409 			break;
410 
411 		case IB_WC_RECV:
412 			DEBUG_LOG("recv completion\n");
413 			cb->stats.recv_bytes += sizeof(cb->recv_buf);
414 			cb->stats.recv_msgs++;
415 			if (cb->wlat || cb->rlat || cb->bw)
416 				ret = server_recv(cb, &wc);
417 			else
418 				ret = cb->server ? server_recv(cb, &wc) :
419 						   client_recv(cb, &wc);
420 			if (ret) {
421 				printk(KERN_ERR PFX "recv wc error: %d\n", ret);
422 				goto error;
423 			}
424 
425 			ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr);
426 			if (ret) {
427 				printk(KERN_ERR PFX "post recv error: %d\n",
428 				       ret);
429 				goto error;
430 			}
431 			wake_up_interruptible(&cb->sem);
432 			break;
433 
434 		default:
435 			printk(KERN_ERR PFX
436 			       "%s:%d Unexpected opcode %d, Shutting down\n",
437 			       __func__, __LINE__, wc.opcode);
438 			goto error;
439 		}
440 	}
441 	if (ret) {
442 		printk(KERN_ERR PFX "poll error %d\n", ret);
443 		goto error;
444 	}
445 	return;
446 error:
447 	cb->state = ERROR;
448 	wake_up_interruptible(&cb->sem);
449 }
450 
451 static int krping_accept(struct krping_cb *cb)
452 {
453 	struct rdma_conn_param conn_param;
454 	int ret;
455 
456 	DEBUG_LOG("accepting client connection request\n");
457 
458 	memset(&conn_param, 0, sizeof conn_param);
459 	conn_param.responder_resources = 1;
460 	conn_param.initiator_depth = 1;
461 
462 	ret = rdma_accept(cb->child_cm_id, &conn_param);
463 	if (ret) {
464 		printk(KERN_ERR PFX "rdma_accept error: %d\n", ret);
465 		return ret;
466 	}
467 
468 	if (!cb->wlat && !cb->rlat && !cb->bw) {
469 		wait_event_interruptible(cb->sem, cb->state >= CONNECTED);
470 		if (cb->state == ERROR) {
471 			printk(KERN_ERR PFX "wait for CONNECTED state %d\n",
472 				cb->state);
473 			return -1;
474 		}
475 	}
476 	return 0;
477 }
478 
479 static void krping_setup_wr(struct krping_cb *cb)
480 {
481 	cb->recv_sgl.addr = cb->recv_dma_addr;
482 	cb->recv_sgl.length = sizeof cb->recv_buf;
483 	cb->recv_sgl.lkey = cb->pd->local_dma_lkey;
484 	cb->rq_wr.sg_list = &cb->recv_sgl;
485 	cb->rq_wr.num_sge = 1;
486 
487 	cb->send_sgl.addr = cb->send_dma_addr;
488 	cb->send_sgl.length = sizeof cb->send_buf;
489 	cb->send_sgl.lkey = cb->pd->local_dma_lkey;
490 
491 	cb->sq_wr.opcode = IB_WR_SEND;
492 	cb->sq_wr.send_flags = IB_SEND_SIGNALED;
493 	cb->sq_wr.sg_list = &cb->send_sgl;
494 	cb->sq_wr.num_sge = 1;
495 
496 	if (cb->server || cb->wlat || cb->rlat || cb->bw) {
497 		cb->rdma_sgl.addr = cb->rdma_dma_addr;
498 		cb->rdma_sq_wr.wr.send_flags = IB_SEND_SIGNALED;
499 		cb->rdma_sq_wr.wr.sg_list = &cb->rdma_sgl;
500 		cb->rdma_sq_wr.wr.num_sge = 1;
501 	}
502 
503 	/*
504 	 * A chain of 2 WRs, INVALDATE_MR + REG_MR.
505 	 * both unsignaled.  The client uses them to reregister
506 	 * the rdma buffers with a new key each iteration.
507 	 */
508 	cb->reg_mr_wr.wr.opcode = IB_WR_REG_MR;
509 	cb->reg_mr_wr.mr = cb->reg_mr;
510 
511 	cb->invalidate_wr.next = &cb->reg_mr_wr.wr;
512 	cb->invalidate_wr.opcode = IB_WR_LOCAL_INV;
513 }
514 
515 static int krping_setup_buffers(struct krping_cb *cb)
516 {
517 	int ret;
518 
519 	DEBUG_LOG(PFX "krping_setup_buffers called on cb %p\n", cb);
520 
521 	cb->recv_dma_addr = ib_dma_map_single(cb->pd->device,
522 				   &cb->recv_buf,
523 				   sizeof(cb->recv_buf), DMA_BIDIRECTIONAL);
524 	pci_unmap_addr_set(cb, recv_mapping, cb->recv_dma_addr);
525 	cb->send_dma_addr = ib_dma_map_single(cb->pd->device,
526 					   &cb->send_buf, sizeof(cb->send_buf),
527 					   DMA_BIDIRECTIONAL);
528 	pci_unmap_addr_set(cb, send_mapping, cb->send_dma_addr);
529 
530 	cb->rdma_buf = ib_dma_alloc_coherent(cb->pd->device, cb->size,
531 					     &cb->rdma_dma_addr,
532 					     GFP_KERNEL);
533 	if (!cb->rdma_buf) {
534 		DEBUG_LOG(PFX "rdma_buf allocation failed\n");
535 		ret = -ENOMEM;
536 		goto bail;
537 	}
538 	pci_unmap_addr_set(cb, rdma_mapping, cb->rdma_dma_addr);
539 	cb->page_list_len = (((cb->size - 1) & PAGE_MASK) + PAGE_SIZE)
540 				>> PAGE_SHIFT;
541 	cb->reg_mr = ib_alloc_mr(cb->pd,  IB_MR_TYPE_MEM_REG,
542 				 cb->page_list_len);
543 	if (IS_ERR(cb->reg_mr)) {
544 		ret = PTR_ERR(cb->reg_mr);
545 		DEBUG_LOG(PFX "recv_buf reg_mr failed %d\n", ret);
546 		goto bail;
547 	}
548 	DEBUG_LOG(PFX "reg rkey 0x%x page_list_len %u\n",
549 		cb->reg_mr->rkey, cb->page_list_len);
550 
551 	if (!cb->server || cb->wlat || cb->rlat || cb->bw) {
552 
553 		cb->start_buf = ib_dma_alloc_coherent(cb->pd->device, cb->size,
554 						      &cb->start_dma_addr,
555 						      GFP_KERNEL);
556 		if (!cb->start_buf) {
557 			DEBUG_LOG(PFX "start_buf malloc failed\n");
558 			ret = -ENOMEM;
559 			goto bail;
560 		}
561 		pci_unmap_addr_set(cb, start_mapping, cb->start_dma_addr);
562 	}
563 
564 	krping_setup_wr(cb);
565 	DEBUG_LOG(PFX "allocated & registered buffers...\n");
566 	return 0;
567 bail:
568 	if (cb->reg_mr && !IS_ERR(cb->reg_mr))
569 		ib_dereg_mr(cb->reg_mr);
570 	if (cb->rdma_mr && !IS_ERR(cb->rdma_mr))
571 		ib_dereg_mr(cb->rdma_mr);
572 	if (cb->dma_mr && !IS_ERR(cb->dma_mr))
573 		ib_dereg_mr(cb->dma_mr);
574 	if (cb->rdma_buf) {
575 		ib_dma_free_coherent(cb->pd->device, cb->size, cb->rdma_buf,
576 				     cb->rdma_dma_addr);
577 	}
578 	if (cb->start_buf) {
579 		ib_dma_free_coherent(cb->pd->device, cb->size, cb->start_buf,
580 				     cb->start_dma_addr);
581 	}
582 	return ret;
583 }
584 
585 static void krping_free_buffers(struct krping_cb *cb)
586 {
587 	DEBUG_LOG("krping_free_buffers called on cb %p\n", cb);
588 
589 	if (cb->dma_mr)
590 		ib_dereg_mr(cb->dma_mr);
591 	if (cb->rdma_mr)
592 		ib_dereg_mr(cb->rdma_mr);
593 	if (cb->start_mr)
594 		ib_dereg_mr(cb->start_mr);
595 	if (cb->reg_mr)
596 		ib_dereg_mr(cb->reg_mr);
597 
598 	dma_unmap_single(cb->pd->device->dma_device,
599 			 pci_unmap_addr(cb, recv_mapping),
600 			 sizeof(cb->recv_buf), DMA_BIDIRECTIONAL);
601 	dma_unmap_single(cb->pd->device->dma_device,
602 			 pci_unmap_addr(cb, send_mapping),
603 			 sizeof(cb->send_buf), DMA_BIDIRECTIONAL);
604 
605 	ib_dma_free_coherent(cb->pd->device, cb->size, cb->rdma_buf,
606 			     cb->rdma_dma_addr);
607 
608 	if (cb->start_buf) {
609 		ib_dma_free_coherent(cb->pd->device, cb->size, cb->start_buf,
610 				     cb->start_dma_addr);
611 	}
612 }
613 
614 static int krping_create_qp(struct krping_cb *cb)
615 {
616 	struct ib_qp_init_attr init_attr;
617 	int ret;
618 
619 	memset(&init_attr, 0, sizeof(init_attr));
620 	init_attr.cap.max_send_wr = cb->txdepth;
621 	init_attr.cap.max_recv_wr = 2;
622 
623 	/* For flush_qp() */
624 	init_attr.cap.max_send_wr++;
625 	init_attr.cap.max_recv_wr++;
626 
627 	init_attr.cap.max_recv_sge = 1;
628 	init_attr.cap.max_send_sge = 1;
629 	init_attr.qp_type = IB_QPT_RC;
630 	init_attr.send_cq = cb->cq;
631 	init_attr.recv_cq = cb->cq;
632 	init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
633 
634 	if (cb->server) {
635 		ret = rdma_create_qp(cb->child_cm_id, cb->pd, &init_attr);
636 		if (!ret)
637 			cb->qp = cb->child_cm_id->qp;
638 	} else {
639 		ret = rdma_create_qp(cb->cm_id, cb->pd, &init_attr);
640 		if (!ret)
641 			cb->qp = cb->cm_id->qp;
642 	}
643 
644 	return ret;
645 }
646 
647 static void krping_free_qp(struct krping_cb *cb)
648 {
649 	ib_destroy_qp(cb->qp);
650 	ib_destroy_cq(cb->cq);
651 	ib_dealloc_pd(cb->pd);
652 }
653 
654 static int krping_setup_qp(struct krping_cb *cb, struct rdma_cm_id *cm_id)
655 {
656 	int ret;
657 	struct ib_cq_init_attr attr = {0};
658 
659 	cb->pd = ib_alloc_pd(cm_id->device, 0);
660 	if (IS_ERR(cb->pd)) {
661 		printk(KERN_ERR PFX "ib_alloc_pd failed\n");
662 		return PTR_ERR(cb->pd);
663 	}
664 	DEBUG_LOG("created pd %p\n", cb->pd);
665 
666 	strlcpy(cb->stats.name, cb->pd->device->name, sizeof(cb->stats.name));
667 
668 	attr.cqe = cb->txdepth * 2;
669 	attr.comp_vector = 0;
670 	cb->cq = ib_create_cq(cm_id->device, krping_cq_event_handler, NULL,
671 			      cb, &attr);
672 	if (IS_ERR(cb->cq)) {
673 		printk(KERN_ERR PFX "ib_create_cq failed\n");
674 		ret = PTR_ERR(cb->cq);
675 		goto err1;
676 	}
677 	DEBUG_LOG("created cq %p\n", cb->cq);
678 
679 	if (!cb->wlat && !cb->rlat && !cb->bw && !cb->frtest) {
680 		ret = ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP);
681 		if (ret) {
682 			printk(KERN_ERR PFX "ib_create_cq failed\n");
683 			goto err2;
684 		}
685 	}
686 
687 	ret = krping_create_qp(cb);
688 	if (ret) {
689 		printk(KERN_ERR PFX "krping_create_qp failed: %d\n", ret);
690 		goto err2;
691 	}
692 	DEBUG_LOG("created qp %p\n", cb->qp);
693 	return 0;
694 err2:
695 	ib_destroy_cq(cb->cq);
696 err1:
697 	ib_dealloc_pd(cb->pd);
698 	return ret;
699 }
700 
701 /*
702  * return the (possibly rebound) rkey for the rdma buffer.
703  * REG mode: invalidate and rebind via reg wr.
704  * other modes: just return the mr rkey.
705  */
706 static u32 krping_rdma_rkey(struct krping_cb *cb, u64 buf, int post_inv)
707 {
708 	u32 rkey;
709 	struct ib_send_wr *bad_wr;
710 	int ret;
711 	struct scatterlist sg = {0};
712 
713 	cb->invalidate_wr.ex.invalidate_rkey = cb->reg_mr->rkey;
714 
715 	/*
716 	 * Update the reg key.
717 	 */
718 	ib_update_fast_reg_key(cb->reg_mr, ++cb->key);
719 	cb->reg_mr_wr.key = cb->reg_mr->rkey;
720 
721 	/*
722 	 * Update the reg WR with new buf info.
723 	 */
724 	if (buf == (u64)cb->start_dma_addr)
725 		cb->reg_mr_wr.access = IB_ACCESS_REMOTE_READ;
726 	else
727 		cb->reg_mr_wr.access = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE;
728 	sg_dma_address(&sg) = buf;
729 	sg_dma_len(&sg) = cb->size;
730 
731 	ret = ib_map_mr_sg(cb->reg_mr, &sg, 1, NULL, PAGE_SIZE);
732 	BUG_ON(ret <= 0 || ret > cb->page_list_len);
733 
734 	DEBUG_LOG(PFX "post_inv = %d, reg_mr new rkey 0x%x pgsz %u len %u"
735 		" iova_start %llx\n",
736 		post_inv,
737 		cb->reg_mr_wr.key,
738 		cb->reg_mr->page_size,
739 		cb->reg_mr->length,
740 	        (unsigned long long)cb->reg_mr->iova);
741 
742 	if (post_inv)
743 		ret = ib_post_send(cb->qp, &cb->invalidate_wr, &bad_wr);
744 	else
745 		ret = ib_post_send(cb->qp, &cb->reg_mr_wr.wr, &bad_wr);
746 	if (ret) {
747 		printk(KERN_ERR PFX "post send error %d\n", ret);
748 		cb->state = ERROR;
749 	}
750 	rkey = cb->reg_mr->rkey;
751 	return rkey;
752 }
753 
754 static void krping_format_send(struct krping_cb *cb, u64 buf)
755 {
756 	struct krping_rdma_info *info = &cb->send_buf;
757 	u32 rkey;
758 
759 	/*
760 	 * Client side will do reg or mw bind before
761 	 * advertising the rdma buffer.  Server side
762 	 * sends have no data.
763 	 */
764 	if (!cb->server || cb->wlat || cb->rlat || cb->bw) {
765 		rkey = krping_rdma_rkey(cb, buf, !cb->server_invalidate);
766 		info->buf = htonll(buf);
767 		info->rkey = htonl(rkey);
768 		info->size = htonl(cb->size);
769 		DEBUG_LOG("RDMA addr %llx rkey %x len %d\n",
770 			  (unsigned long long)buf, rkey, cb->size);
771 	}
772 }
773 
774 static void krping_test_server(struct krping_cb *cb)
775 {
776 	struct ib_send_wr *bad_wr, inv;
777 	int ret;
778 
779 	while (1) {
780 		/* Wait for client's Start STAG/TO/Len */
781 		wait_event_interruptible(cb->sem, cb->state >= RDMA_READ_ADV);
782 		if (cb->state != RDMA_READ_ADV) {
783 			printk(KERN_ERR PFX "wait for RDMA_READ_ADV state %d\n",
784 				cb->state);
785 			break;
786 		}
787 
788 		DEBUG_LOG("server received sink adv\n");
789 
790 		cb->rdma_sq_wr.rkey = cb->remote_rkey;
791 		cb->rdma_sq_wr.remote_addr = cb->remote_addr;
792 		cb->rdma_sq_wr.wr.sg_list->length = cb->remote_len;
793 		cb->rdma_sgl.lkey = krping_rdma_rkey(cb, cb->rdma_dma_addr, !cb->read_inv);
794 		cb->rdma_sq_wr.wr.next = NULL;
795 
796 		/* Issue RDMA Read. */
797 		if (cb->read_inv)
798 			cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
799 		else {
800 
801 			cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_READ;
802 			/*
803 			 * Immediately follow the read with a
804 			 * fenced LOCAL_INV.
805 			 */
806 			cb->rdma_sq_wr.wr.next = &inv;
807 			memset(&inv, 0, sizeof inv);
808 			inv.opcode = IB_WR_LOCAL_INV;
809 			inv.ex.invalidate_rkey = cb->reg_mr->rkey;
810 			inv.send_flags = IB_SEND_FENCE;
811 		}
812 
813 		ret = ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr);
814 		if (ret) {
815 			printk(KERN_ERR PFX "post send error %d\n", ret);
816 			break;
817 		}
818 		cb->rdma_sq_wr.wr.next = NULL;
819 
820 		DEBUG_LOG("server posted rdma read req \n");
821 
822 		/* Wait for read completion */
823 		wait_event_interruptible(cb->sem,
824 					 cb->state >= RDMA_READ_COMPLETE);
825 		if (cb->state != RDMA_READ_COMPLETE) {
826 			printk(KERN_ERR PFX
827 			       "wait for RDMA_READ_COMPLETE state %d\n",
828 			       cb->state);
829 			break;
830 		}
831 		DEBUG_LOG("server received read complete\n");
832 
833 		/* Display data in recv buf */
834 		if (cb->verbose)
835 			printk(KERN_INFO PFX "server ping data: %s\n",
836 				cb->rdma_buf);
837 
838 		/* Tell client to continue */
839 		if (cb->server && cb->server_invalidate) {
840 			cb->sq_wr.ex.invalidate_rkey = cb->remote_rkey;
841 			cb->sq_wr.opcode = IB_WR_SEND_WITH_INV;
842 			DEBUG_LOG("send-w-inv rkey 0x%x\n", cb->remote_rkey);
843 		}
844 		ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
845 		if (ret) {
846 			printk(KERN_ERR PFX "post send error %d\n", ret);
847 			break;
848 		}
849 		DEBUG_LOG("server posted go ahead\n");
850 
851 		/* Wait for client's RDMA STAG/TO/Len */
852 		wait_event_interruptible(cb->sem, cb->state >= RDMA_WRITE_ADV);
853 		if (cb->state != RDMA_WRITE_ADV) {
854 			printk(KERN_ERR PFX
855 			       "wait for RDMA_WRITE_ADV state %d\n",
856 			       cb->state);
857 			break;
858 		}
859 		DEBUG_LOG("server received sink adv\n");
860 
861 		/* RDMA Write echo data */
862 		cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_WRITE;
863 		cb->rdma_sq_wr.rkey = cb->remote_rkey;
864 		cb->rdma_sq_wr.remote_addr = cb->remote_addr;
865 		cb->rdma_sq_wr.wr.sg_list->length = strlen(cb->rdma_buf) + 1;
866 		if (cb->local_dma_lkey)
867 			cb->rdma_sgl.lkey = cb->pd->local_dma_lkey;
868 		else
869 			cb->rdma_sgl.lkey = krping_rdma_rkey(cb, cb->rdma_dma_addr, 0);
870 
871 		DEBUG_LOG("rdma write from lkey %x laddr %llx len %d\n",
872 			  cb->rdma_sq_wr.wr.sg_list->lkey,
873 			  (unsigned long long)cb->rdma_sq_wr.wr.sg_list->addr,
874 			  cb->rdma_sq_wr.wr.sg_list->length);
875 
876 		ret = ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr);
877 		if (ret) {
878 			printk(KERN_ERR PFX "post send error %d\n", ret);
879 			break;
880 		}
881 
882 		/* Wait for completion */
883 		ret = wait_event_interruptible(cb->sem, cb->state >=
884 							 RDMA_WRITE_COMPLETE);
885 		if (cb->state != RDMA_WRITE_COMPLETE) {
886 			printk(KERN_ERR PFX
887 			       "wait for RDMA_WRITE_COMPLETE state %d\n",
888 			       cb->state);
889 			break;
890 		}
891 		DEBUG_LOG("server rdma write complete \n");
892 
893 		cb->state = CONNECTED;
894 
895 		/* Tell client to begin again */
896 		if (cb->server && cb->server_invalidate) {
897 			cb->sq_wr.ex.invalidate_rkey = cb->remote_rkey;
898 			cb->sq_wr.opcode = IB_WR_SEND_WITH_INV;
899 			DEBUG_LOG("send-w-inv rkey 0x%x\n", cb->remote_rkey);
900 		}
901 		ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
902 		if (ret) {
903 			printk(KERN_ERR PFX "post send error %d\n", ret);
904 			break;
905 		}
906 		DEBUG_LOG("server posted go ahead\n");
907 	}
908 }
909 
910 static void rlat_test(struct krping_cb *cb)
911 {
912 	int scnt;
913 	int iters = cb->count;
914 	struct timeval start_tv, stop_tv;
915 	int ret;
916 	struct ib_wc wc;
917 	struct ib_send_wr *bad_wr;
918 	int ne;
919 
920 	scnt = 0;
921 	cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_READ;
922 	cb->rdma_sq_wr.rkey = cb->remote_rkey;
923 	cb->rdma_sq_wr.remote_addr = cb->remote_addr;
924 	cb->rdma_sq_wr.wr.sg_list->length = cb->size;
925 
926 	microtime(&start_tv);
927 	if (!cb->poll) {
928 		cb->state = RDMA_READ_ADV;
929 		ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP);
930 	}
931 	while (scnt < iters) {
932 
933 		cb->state = RDMA_READ_ADV;
934 		ret = ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr);
935 		if (ret) {
936 			printk(KERN_ERR PFX
937 				"Couldn't post send: ret=%d scnt %d\n",
938 				ret, scnt);
939 			return;
940 		}
941 
942 		do {
943 			if (!cb->poll) {
944 				wait_event_interruptible(cb->sem,
945 					cb->state != RDMA_READ_ADV);
946 				if (cb->state == RDMA_READ_COMPLETE) {
947 					ne = 1;
948 					ib_req_notify_cq(cb->cq,
949 						IB_CQ_NEXT_COMP);
950 				} else {
951 					ne = -1;
952 				}
953 			} else
954 				ne = ib_poll_cq(cb->cq, 1, &wc);
955 			if (cb->state == ERROR) {
956 				printk(KERN_ERR PFX
957 					"state == ERROR...bailing scnt %d\n",
958 					scnt);
959 				return;
960 			}
961 		} while (ne == 0);
962 
963 		if (ne < 0) {
964 			printk(KERN_ERR PFX "poll CQ failed %d\n", ne);
965 			return;
966 		}
967 		if (cb->poll && wc.status != IB_WC_SUCCESS) {
968 			printk(KERN_ERR PFX "Completion wth error at %s:\n",
969 				cb->server ? "server" : "client");
970 			printk(KERN_ERR PFX "Failed status %d: wr_id %d\n",
971 				wc.status, (int) wc.wr_id);
972 			return;
973 		}
974 		++scnt;
975 	}
976 	microtime(&stop_tv);
977 
978         if (stop_tv.tv_usec < start_tv.tv_usec) {
979                 stop_tv.tv_usec += 1000000;
980                 stop_tv.tv_sec  -= 1;
981         }
982 
983 	printk(KERN_ERR PFX "delta sec %lu delta usec %lu iter %d size %d\n",
984 		(unsigned long)(stop_tv.tv_sec - start_tv.tv_sec),
985 		(unsigned long)(stop_tv.tv_usec - start_tv.tv_usec),
986 		scnt, cb->size);
987 }
988 
989 static void wlat_test(struct krping_cb *cb)
990 {
991 	int ccnt, scnt, rcnt;
992 	int iters=cb->count;
993 	volatile char *poll_buf = (char *) cb->start_buf;
994 	char *buf = (char *)cb->rdma_buf;
995 	struct timeval start_tv, stop_tv;
996 	cycles_t *post_cycles_start, *post_cycles_stop;
997 	cycles_t *poll_cycles_start, *poll_cycles_stop;
998 	cycles_t *last_poll_cycles_start;
999 	cycles_t sum_poll = 0, sum_post = 0, sum_last_poll = 0;
1000 	int i;
1001 	int cycle_iters = 1000;
1002 
1003 	ccnt = 0;
1004 	scnt = 0;
1005 	rcnt = 0;
1006 
1007 	post_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1008 	if (!post_cycles_start) {
1009 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1010 		return;
1011 	}
1012 	post_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1013 	if (!post_cycles_stop) {
1014 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1015 		return;
1016 	}
1017 	poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1018 	if (!poll_cycles_start) {
1019 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1020 		return;
1021 	}
1022 	poll_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1023 	if (!poll_cycles_stop) {
1024 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1025 		return;
1026 	}
1027 	last_poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t),
1028 		GFP_KERNEL);
1029 	if (!last_poll_cycles_start) {
1030 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1031 		return;
1032 	}
1033 	cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_WRITE;
1034 	cb->rdma_sq_wr.rkey = cb->remote_rkey;
1035 	cb->rdma_sq_wr.remote_addr = cb->remote_addr;
1036 	cb->rdma_sq_wr.wr.sg_list->length = cb->size;
1037 
1038 	if (cycle_iters > iters)
1039 		cycle_iters = iters;
1040 	microtime(&start_tv);
1041 	while (scnt < iters || ccnt < iters || rcnt < iters) {
1042 
1043 		/* Wait till buffer changes. */
1044 		if (rcnt < iters && !(scnt < 1 && !cb->server)) {
1045 			++rcnt;
1046 			while (*poll_buf != (char)rcnt) {
1047 				if (cb->state == ERROR) {
1048 					printk(KERN_ERR PFX
1049 						"state = ERROR, bailing\n");
1050 					return;
1051 				}
1052 			}
1053 		}
1054 
1055 		if (scnt < iters) {
1056 			struct ib_send_wr *bad_wr;
1057 
1058 			*buf = (char)scnt+1;
1059 			if (scnt < cycle_iters)
1060 				post_cycles_start[scnt] = get_cycles();
1061 			if (ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr)) {
1062 				printk(KERN_ERR PFX
1063 					"Couldn't post send: scnt=%d\n",
1064 					scnt);
1065 				return;
1066 			}
1067 			if (scnt < cycle_iters)
1068 				post_cycles_stop[scnt] = get_cycles();
1069 			scnt++;
1070 		}
1071 
1072 		if (ccnt < iters) {
1073 			struct ib_wc wc;
1074 			int ne;
1075 
1076 			if (ccnt < cycle_iters)
1077 				poll_cycles_start[ccnt] = get_cycles();
1078 			do {
1079 				if (ccnt < cycle_iters)
1080 					last_poll_cycles_start[ccnt] =
1081 						get_cycles();
1082 				ne = ib_poll_cq(cb->cq, 1, &wc);
1083 			} while (ne == 0);
1084 			if (ccnt < cycle_iters)
1085 				poll_cycles_stop[ccnt] = get_cycles();
1086 			++ccnt;
1087 
1088 			if (ne < 0) {
1089 				printk(KERN_ERR PFX "poll CQ failed %d\n", ne);
1090 				return;
1091 			}
1092 			if (wc.status != IB_WC_SUCCESS) {
1093 				printk(KERN_ERR PFX
1094 					"Completion wth error at %s:\n",
1095 					cb->server ? "server" : "client");
1096 				printk(KERN_ERR PFX
1097 					"Failed status %d: wr_id %d\n",
1098 					wc.status, (int) wc.wr_id);
1099 				printk(KERN_ERR PFX
1100 					"scnt=%d, rcnt=%d, ccnt=%d\n",
1101 					scnt, rcnt, ccnt);
1102 				return;
1103 			}
1104 		}
1105 	}
1106 	microtime(&stop_tv);
1107 
1108         if (stop_tv.tv_usec < start_tv.tv_usec) {
1109                 stop_tv.tv_usec += 1000000;
1110                 stop_tv.tv_sec  -= 1;
1111         }
1112 
1113 	for (i=0; i < cycle_iters; i++) {
1114 		sum_post += post_cycles_stop[i] - post_cycles_start[i];
1115 		sum_poll += poll_cycles_stop[i] - poll_cycles_start[i];
1116 		sum_last_poll += poll_cycles_stop[i]-last_poll_cycles_start[i];
1117 	}
1118 	printk(KERN_ERR PFX
1119 		"delta sec %lu delta usec %lu iter %d size %d cycle_iters %d"
1120 		" sum_post %llu sum_poll %llu sum_last_poll %llu\n",
1121 		(unsigned long)(stop_tv.tv_sec - start_tv.tv_sec),
1122 		(unsigned long)(stop_tv.tv_usec - start_tv.tv_usec),
1123 		scnt, cb->size, cycle_iters,
1124 		(unsigned long long)sum_post, (unsigned long long)sum_poll,
1125 		(unsigned long long)sum_last_poll);
1126 	kfree(post_cycles_start);
1127 	kfree(post_cycles_stop);
1128 	kfree(poll_cycles_start);
1129 	kfree(poll_cycles_stop);
1130 	kfree(last_poll_cycles_start);
1131 }
1132 
1133 static void bw_test(struct krping_cb *cb)
1134 {
1135 	int ccnt, scnt, rcnt;
1136 	int iters=cb->count;
1137 	struct timeval start_tv, stop_tv;
1138 	cycles_t *post_cycles_start, *post_cycles_stop;
1139 	cycles_t *poll_cycles_start, *poll_cycles_stop;
1140 	cycles_t *last_poll_cycles_start;
1141 	cycles_t sum_poll = 0, sum_post = 0, sum_last_poll = 0;
1142 	int i;
1143 	int cycle_iters = 1000;
1144 
1145 	ccnt = 0;
1146 	scnt = 0;
1147 	rcnt = 0;
1148 
1149 	post_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1150 	if (!post_cycles_start) {
1151 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1152 		return;
1153 	}
1154 	post_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1155 	if (!post_cycles_stop) {
1156 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1157 		return;
1158 	}
1159 	poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1160 	if (!poll_cycles_start) {
1161 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1162 		return;
1163 	}
1164 	poll_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1165 	if (!poll_cycles_stop) {
1166 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1167 		return;
1168 	}
1169 	last_poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t),
1170 		GFP_KERNEL);
1171 	if (!last_poll_cycles_start) {
1172 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1173 		return;
1174 	}
1175 	cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_WRITE;
1176 	cb->rdma_sq_wr.rkey = cb->remote_rkey;
1177 	cb->rdma_sq_wr.remote_addr = cb->remote_addr;
1178 	cb->rdma_sq_wr.wr.sg_list->length = cb->size;
1179 
1180 	if (cycle_iters > iters)
1181 		cycle_iters = iters;
1182 	microtime(&start_tv);
1183 	while (scnt < iters || ccnt < iters) {
1184 
1185 		while (scnt < iters && scnt - ccnt < cb->txdepth) {
1186 			struct ib_send_wr *bad_wr;
1187 
1188 			if (scnt < cycle_iters)
1189 				post_cycles_start[scnt] = get_cycles();
1190 			if (ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr)) {
1191 				printk(KERN_ERR PFX
1192 					"Couldn't post send: scnt=%d\n",
1193 					scnt);
1194 				return;
1195 			}
1196 			if (scnt < cycle_iters)
1197 				post_cycles_stop[scnt] = get_cycles();
1198 			++scnt;
1199 		}
1200 
1201 		if (ccnt < iters) {
1202 			int ne;
1203 			struct ib_wc wc;
1204 
1205 			if (ccnt < cycle_iters)
1206 				poll_cycles_start[ccnt] = get_cycles();
1207 			do {
1208 				if (ccnt < cycle_iters)
1209 					last_poll_cycles_start[ccnt] =
1210 						get_cycles();
1211 				ne = ib_poll_cq(cb->cq, 1, &wc);
1212 			} while (ne == 0);
1213 			if (ccnt < cycle_iters)
1214 				poll_cycles_stop[ccnt] = get_cycles();
1215 			ccnt += 1;
1216 
1217 			if (ne < 0) {
1218 				printk(KERN_ERR PFX "poll CQ failed %d\n", ne);
1219 				return;
1220 			}
1221 			if (wc.status != IB_WC_SUCCESS) {
1222 				printk(KERN_ERR PFX
1223 					"Completion wth error at %s:\n",
1224 					cb->server ? "server" : "client");
1225 				printk(KERN_ERR PFX
1226 					"Failed status %d: wr_id %d\n",
1227 					wc.status, (int) wc.wr_id);
1228 				return;
1229 			}
1230 		}
1231 	}
1232 	microtime(&stop_tv);
1233 
1234         if (stop_tv.tv_usec < start_tv.tv_usec) {
1235                 stop_tv.tv_usec += 1000000;
1236                 stop_tv.tv_sec  -= 1;
1237         }
1238 
1239 	for (i=0; i < cycle_iters; i++) {
1240 		sum_post += post_cycles_stop[i] - post_cycles_start[i];
1241 		sum_poll += poll_cycles_stop[i] - poll_cycles_start[i];
1242 		sum_last_poll += poll_cycles_stop[i]-last_poll_cycles_start[i];
1243 	}
1244 	printk(KERN_ERR PFX
1245 		"delta sec %lu delta usec %lu iter %d size %d cycle_iters %d"
1246 		" sum_post %llu sum_poll %llu sum_last_poll %llu\n",
1247 		(unsigned long)(stop_tv.tv_sec - start_tv.tv_sec),
1248 		(unsigned long)(stop_tv.tv_usec - start_tv.tv_usec),
1249 		scnt, cb->size, cycle_iters,
1250 		(unsigned long long)sum_post, (unsigned long long)sum_poll,
1251 		(unsigned long long)sum_last_poll);
1252 	kfree(post_cycles_start);
1253 	kfree(post_cycles_stop);
1254 	kfree(poll_cycles_start);
1255 	kfree(poll_cycles_stop);
1256 	kfree(last_poll_cycles_start);
1257 }
1258 
1259 static void krping_rlat_test_server(struct krping_cb *cb)
1260 {
1261 	struct ib_send_wr *bad_wr;
1262 	struct ib_wc wc;
1263 	int ret;
1264 
1265 	/* Spin waiting for client's Start STAG/TO/Len */
1266 	while (cb->state < RDMA_READ_ADV) {
1267 		krping_cq_event_handler(cb->cq, cb);
1268 	}
1269 
1270 	/* Send STAG/TO/Len to client */
1271 	krping_format_send(cb, cb->start_dma_addr);
1272 	ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1273 	if (ret) {
1274 		printk(KERN_ERR PFX "post send error %d\n", ret);
1275 		return;
1276 	}
1277 
1278 	/* Spin waiting for send completion */
1279 	while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1280 	if (ret < 0) {
1281 		printk(KERN_ERR PFX "poll error %d\n", ret);
1282 		return;
1283 	}
1284 	if (wc.status) {
1285 		printk(KERN_ERR PFX "send completiong error %d\n", wc.status);
1286 		return;
1287 	}
1288 
1289 	wait_event_interruptible(cb->sem, cb->state == ERROR);
1290 }
1291 
1292 static void krping_wlat_test_server(struct krping_cb *cb)
1293 {
1294 	struct ib_send_wr *bad_wr;
1295 	struct ib_wc wc;
1296 	int ret;
1297 
1298 	/* Spin waiting for client's Start STAG/TO/Len */
1299 	while (cb->state < RDMA_READ_ADV) {
1300 		krping_cq_event_handler(cb->cq, cb);
1301 	}
1302 
1303 	/* Send STAG/TO/Len to client */
1304 	krping_format_send(cb, cb->start_dma_addr);
1305 	ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1306 	if (ret) {
1307 		printk(KERN_ERR PFX "post send error %d\n", ret);
1308 		return;
1309 	}
1310 
1311 	/* Spin waiting for send completion */
1312 	while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1313 	if (ret < 0) {
1314 		printk(KERN_ERR PFX "poll error %d\n", ret);
1315 		return;
1316 	}
1317 	if (wc.status) {
1318 		printk(KERN_ERR PFX "send completiong error %d\n", wc.status);
1319 		return;
1320 	}
1321 
1322 	wlat_test(cb);
1323 	wait_event_interruptible(cb->sem, cb->state == ERROR);
1324 }
1325 
1326 static void krping_bw_test_server(struct krping_cb *cb)
1327 {
1328 	struct ib_send_wr *bad_wr;
1329 	struct ib_wc wc;
1330 	int ret;
1331 
1332 	/* Spin waiting for client's Start STAG/TO/Len */
1333 	while (cb->state < RDMA_READ_ADV) {
1334 		krping_cq_event_handler(cb->cq, cb);
1335 	}
1336 
1337 	/* Send STAG/TO/Len to client */
1338 	krping_format_send(cb, cb->start_dma_addr);
1339 	ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1340 	if (ret) {
1341 		printk(KERN_ERR PFX "post send error %d\n", ret);
1342 		return;
1343 	}
1344 
1345 	/* Spin waiting for send completion */
1346 	while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1347 	if (ret < 0) {
1348 		printk(KERN_ERR PFX "poll error %d\n", ret);
1349 		return;
1350 	}
1351 	if (wc.status) {
1352 		printk(KERN_ERR PFX "send completiong error %d\n", wc.status);
1353 		return;
1354 	}
1355 
1356 	if (cb->duplex)
1357 		bw_test(cb);
1358 	wait_event_interruptible(cb->sem, cb->state == ERROR);
1359 }
1360 
1361 static int reg_supported(struct ib_device *dev)
1362 {
1363 	u64 needed_flags = IB_DEVICE_MEM_MGT_EXTENSIONS;
1364 
1365 	if ((dev->attrs.device_cap_flags & needed_flags) != needed_flags) {
1366 		printk(KERN_ERR PFX
1367 			"Fastreg not supported - device_cap_flags 0x%llx\n",
1368 			(unsigned long long)dev->attrs.device_cap_flags);
1369 		return 0;
1370 	}
1371 	DEBUG_LOG("Fastreg supported - device_cap_flags 0x%llx\n",
1372 		(unsigned long long)dev->attrs.device_cap_flags);
1373 	return 1;
1374 }
1375 
1376 static void fill_sockaddr(struct sockaddr_storage *sin, struct krping_cb *cb)
1377 {
1378 	memset(sin, 0, sizeof(*sin));
1379 
1380 	if (cb->addr_type == AF_INET) {
1381 		struct sockaddr_in *sin4 = (struct sockaddr_in *)sin;
1382 		sin4->sin_len = sizeof(*sin4);
1383 		sin4->sin_family = AF_INET;
1384 		memcpy((void *)&sin4->sin_addr.s_addr, cb->addr, 4);
1385 		sin4->sin_port = cb->port;
1386 	} else if (cb->addr_type == AF_INET6) {
1387 		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sin;
1388 		sin6->sin6_len = sizeof(*sin6);
1389 		sin6->sin6_family = AF_INET6;
1390 		memcpy((void *)&sin6->sin6_addr, cb->addr, 16);
1391 		sin6->sin6_port = cb->port;
1392 	}
1393 }
1394 
1395 static int krping_bind_server(struct krping_cb *cb)
1396 {
1397 	struct sockaddr_storage sin;
1398 	int ret;
1399 
1400 
1401 	fill_sockaddr(&sin, cb);
1402 
1403 	ret = rdma_bind_addr(cb->cm_id, (struct sockaddr *)&sin);
1404 	if (ret) {
1405 		printk(KERN_ERR PFX "rdma_bind_addr error %d\n", ret);
1406 		return ret;
1407 	}
1408 	DEBUG_LOG("rdma_bind_addr successful\n");
1409 
1410 	DEBUG_LOG("rdma_listen\n");
1411 	ret = rdma_listen(cb->cm_id, 3);
1412 	if (ret) {
1413 		printk(KERN_ERR PFX "rdma_listen failed: %d\n", ret);
1414 		return ret;
1415 	}
1416 
1417 	wait_event_interruptible(cb->sem, cb->state >= CONNECT_REQUEST);
1418 	if (cb->state != CONNECT_REQUEST) {
1419 		printk(KERN_ERR PFX "wait for CONNECT_REQUEST state %d\n",
1420 			cb->state);
1421 		return -1;
1422 	}
1423 
1424 	if (!reg_supported(cb->child_cm_id->device))
1425 		return -EINVAL;
1426 
1427 	return 0;
1428 }
1429 
1430 static void krping_run_server(struct krping_cb *cb)
1431 {
1432 	struct ib_recv_wr *bad_wr;
1433 	int ret;
1434 
1435 	ret = krping_bind_server(cb);
1436 	if (ret)
1437 		return;
1438 
1439 	ret = krping_setup_qp(cb, cb->child_cm_id);
1440 	if (ret) {
1441 		printk(KERN_ERR PFX "setup_qp failed: %d\n", ret);
1442 		goto err0;
1443 	}
1444 
1445 	ret = krping_setup_buffers(cb);
1446 	if (ret) {
1447 		printk(KERN_ERR PFX "krping_setup_buffers failed: %d\n", ret);
1448 		goto err1;
1449 	}
1450 
1451 	ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr);
1452 	if (ret) {
1453 		printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
1454 		goto err2;
1455 	}
1456 
1457 	ret = krping_accept(cb);
1458 	if (ret) {
1459 		printk(KERN_ERR PFX "connect error %d\n", ret);
1460 		goto err2;
1461 	}
1462 
1463 	if (cb->wlat)
1464 		krping_wlat_test_server(cb);
1465 	else if (cb->rlat)
1466 		krping_rlat_test_server(cb);
1467 	else if (cb->bw)
1468 		krping_bw_test_server(cb);
1469 	else
1470 		krping_test_server(cb);
1471 	rdma_disconnect(cb->child_cm_id);
1472 err2:
1473 	krping_free_buffers(cb);
1474 err1:
1475 	krping_free_qp(cb);
1476 err0:
1477 	rdma_destroy_id(cb->child_cm_id);
1478 }
1479 
1480 static void krping_test_client(struct krping_cb *cb)
1481 {
1482 	int ping, start, cc, i, ret;
1483 	struct ib_send_wr *bad_wr;
1484 	unsigned char c;
1485 
1486 	start = 65;
1487 	for (ping = 0; !cb->count || ping < cb->count; ping++) {
1488 		cb->state = RDMA_READ_ADV;
1489 
1490 		/* Put some ascii text in the buffer. */
1491 		cc = sprintf(cb->start_buf, "rdma-ping-%d: ", ping);
1492 		for (i = cc, c = start; i < cb->size; i++) {
1493 			cb->start_buf[i] = c;
1494 			c++;
1495 			if (c > 122)
1496 				c = 65;
1497 		}
1498 		start++;
1499 		if (start > 122)
1500 			start = 65;
1501 		cb->start_buf[cb->size - 1] = 0;
1502 
1503 		krping_format_send(cb, cb->start_dma_addr);
1504 		if (cb->state == ERROR) {
1505 			printk(KERN_ERR PFX "krping_format_send failed\n");
1506 			break;
1507 		}
1508 		ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1509 		if (ret) {
1510 			printk(KERN_ERR PFX "post send error %d\n", ret);
1511 			break;
1512 		}
1513 
1514 		/* Wait for server to ACK */
1515 		wait_event_interruptible(cb->sem, cb->state >= RDMA_WRITE_ADV);
1516 		if (cb->state != RDMA_WRITE_ADV) {
1517 			printk(KERN_ERR PFX
1518 			       "wait for RDMA_WRITE_ADV state %d\n",
1519 			       cb->state);
1520 			break;
1521 		}
1522 
1523 		krping_format_send(cb, cb->rdma_dma_addr);
1524 		ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1525 		if (ret) {
1526 			printk(KERN_ERR PFX "post send error %d\n", ret);
1527 			break;
1528 		}
1529 
1530 		/* Wait for the server to say the RDMA Write is complete. */
1531 		wait_event_interruptible(cb->sem,
1532 					 cb->state >= RDMA_WRITE_COMPLETE);
1533 		if (cb->state != RDMA_WRITE_COMPLETE) {
1534 			printk(KERN_ERR PFX
1535 			       "wait for RDMA_WRITE_COMPLETE state %d\n",
1536 			       cb->state);
1537 			break;
1538 		}
1539 
1540 		if (cb->validate)
1541 			if (memcmp(cb->start_buf, cb->rdma_buf, cb->size)) {
1542 				printk(KERN_ERR PFX "data mismatch!\n");
1543 				break;
1544 			}
1545 
1546 		if (cb->verbose)
1547 			printk(KERN_INFO PFX "ping data: %s\n", cb->rdma_buf);
1548 #ifdef SLOW_KRPING
1549 		wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
1550 #endif
1551 	}
1552 }
1553 
1554 static void krping_rlat_test_client(struct krping_cb *cb)
1555 {
1556 	struct ib_send_wr *bad_wr;
1557 	struct ib_wc wc;
1558 	int ret;
1559 
1560 	cb->state = RDMA_READ_ADV;
1561 
1562 	/* Send STAG/TO/Len to client */
1563 	krping_format_send(cb, cb->start_dma_addr);
1564 	if (cb->state == ERROR) {
1565 		printk(KERN_ERR PFX "krping_format_send failed\n");
1566 		return;
1567 	}
1568 	ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1569 	if (ret) {
1570 		printk(KERN_ERR PFX "post send error %d\n", ret);
1571 		return;
1572 	}
1573 
1574 	/* Spin waiting for send completion */
1575 	while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1576 	if (ret < 0) {
1577 		printk(KERN_ERR PFX "poll error %d\n", ret);
1578 		return;
1579 	}
1580 	if (wc.status) {
1581 		printk(KERN_ERR PFX "send completion error %d\n", wc.status);
1582 		return;
1583 	}
1584 
1585 	/* Spin waiting for server's Start STAG/TO/Len */
1586 	while (cb->state < RDMA_WRITE_ADV) {
1587 		krping_cq_event_handler(cb->cq, cb);
1588 	}
1589 
1590 #if 0
1591 {
1592 	int i;
1593 	struct timeval start, stop;
1594 	time_t sec;
1595 	suseconds_t usec;
1596 	unsigned long long elapsed;
1597 	struct ib_wc wc;
1598 	struct ib_send_wr *bad_wr;
1599 	int ne;
1600 
1601 	cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_WRITE;
1602 	cb->rdma_sq_wr.rkey = cb->remote_rkey;
1603 	cb->rdma_sq_wr.remote_addr = cb->remote_addr;
1604 	cb->rdma_sq_wr.wr.sg_list->length = 0;
1605 	cb->rdma_sq_wr.wr.num_sge = 0;
1606 
1607 	microtime(&start);
1608 	for (i=0; i < 100000; i++) {
1609 		if (ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr)) {
1610 			printk(KERN_ERR PFX  "Couldn't post send\n");
1611 			return;
1612 		}
1613 		do {
1614 			ne = ib_poll_cq(cb->cq, 1, &wc);
1615 		} while (ne == 0);
1616 		if (ne < 0) {
1617 			printk(KERN_ERR PFX "poll CQ failed %d\n", ne);
1618 			return;
1619 		}
1620 		if (wc.status != IB_WC_SUCCESS) {
1621 			printk(KERN_ERR PFX "Completion wth error at %s:\n",
1622 				cb->server ? "server" : "client");
1623 			printk(KERN_ERR PFX "Failed status %d: wr_id %d\n",
1624 				wc.status, (int) wc.wr_id);
1625 			return;
1626 		}
1627 	}
1628 	microtime(&stop);
1629 
1630 	if (stop.tv_usec < start.tv_usec) {
1631 		stop.tv_usec += 1000000;
1632 		stop.tv_sec  -= 1;
1633 	}
1634 	sec     = stop.tv_sec - start.tv_sec;
1635 	usec    = stop.tv_usec - start.tv_usec;
1636 	elapsed = sec * 1000000 + usec;
1637 	printk(KERN_ERR PFX "0B-write-lat iters 100000 usec %llu\n", elapsed);
1638 }
1639 #endif
1640 
1641 	rlat_test(cb);
1642 }
1643 
1644 static void krping_wlat_test_client(struct krping_cb *cb)
1645 {
1646 	struct ib_send_wr *bad_wr;
1647 	struct ib_wc wc;
1648 	int ret;
1649 
1650 	cb->state = RDMA_READ_ADV;
1651 
1652 	/* Send STAG/TO/Len to client */
1653 	krping_format_send(cb, cb->start_dma_addr);
1654 	if (cb->state == ERROR) {
1655 		printk(KERN_ERR PFX "krping_format_send failed\n");
1656 		return;
1657 	}
1658 	ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1659 	if (ret) {
1660 		printk(KERN_ERR PFX "post send error %d\n", ret);
1661 		return;
1662 	}
1663 
1664 	/* Spin waiting for send completion */
1665 	while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1666 	if (ret < 0) {
1667 		printk(KERN_ERR PFX "poll error %d\n", ret);
1668 		return;
1669 	}
1670 	if (wc.status) {
1671 		printk(KERN_ERR PFX "send completion error %d\n", wc.status);
1672 		return;
1673 	}
1674 
1675 	/* Spin waiting for server's Start STAG/TO/Len */
1676 	while (cb->state < RDMA_WRITE_ADV) {
1677 		krping_cq_event_handler(cb->cq, cb);
1678 	}
1679 
1680 	wlat_test(cb);
1681 }
1682 
1683 static void krping_bw_test_client(struct krping_cb *cb)
1684 {
1685 	struct ib_send_wr *bad_wr;
1686 	struct ib_wc wc;
1687 	int ret;
1688 
1689 	cb->state = RDMA_READ_ADV;
1690 
1691 	/* Send STAG/TO/Len to client */
1692 	krping_format_send(cb, cb->start_dma_addr);
1693 	if (cb->state == ERROR) {
1694 		printk(KERN_ERR PFX "krping_format_send failed\n");
1695 		return;
1696 	}
1697 	ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1698 	if (ret) {
1699 		printk(KERN_ERR PFX "post send error %d\n", ret);
1700 		return;
1701 	}
1702 
1703 	/* Spin waiting for send completion */
1704 	while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1705 	if (ret < 0) {
1706 		printk(KERN_ERR PFX "poll error %d\n", ret);
1707 		return;
1708 	}
1709 	if (wc.status) {
1710 		printk(KERN_ERR PFX "send completion error %d\n", wc.status);
1711 		return;
1712 	}
1713 
1714 	/* Spin waiting for server's Start STAG/TO/Len */
1715 	while (cb->state < RDMA_WRITE_ADV) {
1716 		krping_cq_event_handler(cb->cq, cb);
1717 	}
1718 
1719 	bw_test(cb);
1720 }
1721 
1722 /*
1723  * Manual qp flush test
1724  */
1725 static void flush_qp(struct krping_cb *cb)
1726 {
1727 	struct ib_send_wr wr = { 0 }, *bad;
1728 	struct ib_recv_wr recv_wr = { 0 }, *recv_bad;
1729 	struct ib_wc wc;
1730 	int ret;
1731 	int flushed = 0;
1732 	int ccnt = 0;
1733 
1734 	rdma_disconnect(cb->cm_id);
1735 	DEBUG_LOG("disconnected!\n");
1736 
1737 	wr.opcode = IB_WR_SEND;
1738 	wr.wr_id = 0xdeadbeefcafebabe;
1739 	ret = ib_post_send(cb->qp, &wr, &bad);
1740 	if (ret) {
1741 		printk(KERN_ERR PFX "%s post_send failed ret %d\n", __func__, ret);
1742 		return;
1743 	}
1744 
1745 	recv_wr.wr_id = 0xcafebabedeadbeef;
1746 	ret = ib_post_recv(cb->qp, &recv_wr, &recv_bad);
1747 	if (ret) {
1748 		printk(KERN_ERR PFX "%s post_recv failed ret %d\n", __func__, ret);
1749 		return;
1750 	}
1751 
1752 	/* poll until the flush WRs complete */
1753 	do {
1754 		ret = ib_poll_cq(cb->cq, 1, &wc);
1755 		if (ret < 0) {
1756 			printk(KERN_ERR PFX "ib_poll_cq failed %d\n", ret);
1757 			return;
1758 		}
1759 		if (ret == 0)
1760 			continue;
1761 		ccnt++;
1762 		if (wc.wr_id == 0xdeadbeefcafebabe ||
1763 		    wc.wr_id == 0xcafebabedeadbeef)
1764 			flushed++;
1765 	} while (flushed != 2);
1766 	DEBUG_LOG("qp_flushed! ccnt %u\n", ccnt);
1767 }
1768 
1769 static void krping_fr_test(struct krping_cb *cb)
1770 {
1771 	struct ib_send_wr inv, *bad;
1772 	struct ib_reg_wr fr;
1773 	struct ib_wc wc;
1774 	u8 key = 0;
1775 	struct ib_mr *mr;
1776 	int ret;
1777 	int size = cb->size;
1778 	int plen = (((size - 1) & PAGE_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
1779 	unsigned long start;
1780 	int count = 0;
1781 	int scnt = 0;
1782 	struct scatterlist sg = {0};
1783 
1784 	mr = ib_alloc_mr(cb->pd, IB_MR_TYPE_MEM_REG, plen);
1785 	if (IS_ERR(mr)) {
1786 		printk(KERN_ERR PFX "ib_alloc_mr failed %ld\n", PTR_ERR(mr));
1787 		return;
1788 	}
1789 
1790 	sg_dma_address(&sg) = (dma_addr_t)0xcafebabe0000ULL;
1791 	sg_dma_len(&sg) = size;
1792 	ret = ib_map_mr_sg(mr, &sg, 1, NULL, PAGE_SIZE);
1793 	if (ret <= 0) {
1794 		printk(KERN_ERR PFX "ib_map_mr_sge err %d\n", ret);
1795 		goto err2;
1796 	}
1797 
1798 	memset(&fr, 0, sizeof fr);
1799 	fr.wr.opcode = IB_WR_REG_MR;
1800 	fr.access = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE;
1801 	fr.mr = mr;
1802 	fr.wr.next = &inv;
1803 
1804 	memset(&inv, 0, sizeof inv);
1805 	inv.opcode = IB_WR_LOCAL_INV;
1806 	inv.send_flags = IB_SEND_SIGNALED;
1807 
1808 	DEBUG_LOG("fr_test: stag index 0x%x plen %u size %u depth %u\n", mr->rkey >> 8, plen, cb->size, cb->txdepth);
1809 	start = time_uptime;
1810 	while (!cb->count || count <= cb->count) {
1811 		if (SIGPENDING(curthread)) {
1812 			printk(KERN_ERR PFX "signal!\n");
1813 			break;
1814 		}
1815 		if ((time_uptime - start) >= 9) {
1816 			DEBUG_LOG("fr_test: pausing 1 second! count %u latest size %u plen %u\n", count, size, plen);
1817 			wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
1818 			if (cb->state == ERROR)
1819 				break;
1820 			start = time_uptime;
1821 		}
1822 		while (scnt < (cb->txdepth>>1)) {
1823 			ib_update_fast_reg_key(mr, ++key);
1824 			fr.key = mr->rkey;
1825 			inv.ex.invalidate_rkey = mr->rkey;
1826 
1827 			size = arc4random() % cb->size;
1828 			if (size == 0)
1829 				size = cb->size;
1830 			sg_dma_len(&sg) = size;
1831 			ret = ib_map_mr_sg(mr, &sg, 1, NULL, PAGE_SIZE);
1832 			if (ret <= 0) {
1833 				printk(KERN_ERR PFX "ib_map_mr_sge err %d\n", ret);
1834 				goto err2;
1835 			}
1836 			ret = ib_post_send(cb->qp, &fr.wr, &bad);
1837 			if (ret) {
1838 				printk(KERN_ERR PFX "ib_post_send failed %d\n", ret);
1839 				goto err2;
1840 			}
1841 			scnt++;
1842 		}
1843 
1844 		ret = ib_poll_cq(cb->cq, 1, &wc);
1845 		if (ret < 0) {
1846 			printk(KERN_ERR PFX "ib_poll_cq failed %d\n", ret);
1847 			goto err2;
1848 		}
1849 		if (ret == 1) {
1850 			if (wc.status) {
1851 				printk(KERN_ERR PFX "completion error %u\n", wc.status);
1852 				goto err2;
1853 			}
1854 			count++;
1855 			scnt--;
1856 		}
1857 	}
1858 err2:
1859 	flush_qp(cb);
1860 	DEBUG_LOG("fr_test: done!\n");
1861 	ib_dereg_mr(mr);
1862 }
1863 
1864 static int krping_connect_client(struct krping_cb *cb)
1865 {
1866 	struct rdma_conn_param conn_param;
1867 	int ret;
1868 
1869 	memset(&conn_param, 0, sizeof conn_param);
1870 	conn_param.responder_resources = 1;
1871 	conn_param.initiator_depth = 1;
1872 	conn_param.retry_count = 10;
1873 
1874 	ret = rdma_connect(cb->cm_id, &conn_param);
1875 	if (ret) {
1876 		printk(KERN_ERR PFX "rdma_connect error %d\n", ret);
1877 		return ret;
1878 	}
1879 
1880 	wait_event_interruptible(cb->sem, cb->state >= CONNECTED);
1881 	if (cb->state == ERROR) {
1882 		printk(KERN_ERR PFX "wait for CONNECTED state %d\n", cb->state);
1883 		return -1;
1884 	}
1885 
1886 	DEBUG_LOG("rdma_connect successful\n");
1887 	return 0;
1888 }
1889 
1890 static int krping_bind_client(struct krping_cb *cb)
1891 {
1892 	struct sockaddr_storage sin;
1893 	int ret;
1894 
1895 	fill_sockaddr(&sin, cb);
1896 
1897 	ret = rdma_resolve_addr(cb->cm_id, NULL, (struct sockaddr *)&sin, 2000);
1898 	if (ret) {
1899 		printk(KERN_ERR PFX "rdma_resolve_addr error %d\n", ret);
1900 		return ret;
1901 	}
1902 
1903 	wait_event_interruptible(cb->sem, cb->state >= ROUTE_RESOLVED);
1904 	if (cb->state != ROUTE_RESOLVED) {
1905 		printk(KERN_ERR PFX
1906 		       "addr/route resolution did not resolve: state %d\n",
1907 		       cb->state);
1908 		return -EINTR;
1909 	}
1910 
1911 	if (!reg_supported(cb->cm_id->device))
1912 		return -EINVAL;
1913 
1914 	DEBUG_LOG("rdma_resolve_addr - rdma_resolve_route successful\n");
1915 	return 0;
1916 }
1917 
1918 static void krping_run_client(struct krping_cb *cb)
1919 {
1920 	struct ib_recv_wr *bad_wr;
1921 	int ret;
1922 
1923 	/* set type of service, if any */
1924 	if (cb->tos != 0)
1925 		rdma_set_service_type(cb->cm_id, cb->tos);
1926 
1927 	ret = krping_bind_client(cb);
1928 	if (ret)
1929 		return;
1930 
1931 	ret = krping_setup_qp(cb, cb->cm_id);
1932 	if (ret) {
1933 		printk(KERN_ERR PFX "setup_qp failed: %d\n", ret);
1934 		return;
1935 	}
1936 
1937 	ret = krping_setup_buffers(cb);
1938 	if (ret) {
1939 		printk(KERN_ERR PFX "krping_setup_buffers failed: %d\n", ret);
1940 		goto err1;
1941 	}
1942 
1943 	ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr);
1944 	if (ret) {
1945 		printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
1946 		goto err2;
1947 	}
1948 
1949 	ret = krping_connect_client(cb);
1950 	if (ret) {
1951 		printk(KERN_ERR PFX "connect error %d\n", ret);
1952 		goto err2;
1953 	}
1954 
1955 	if (cb->wlat)
1956 		krping_wlat_test_client(cb);
1957 	else if (cb->rlat)
1958 		krping_rlat_test_client(cb);
1959 	else if (cb->bw)
1960 		krping_bw_test_client(cb);
1961 	else if (cb->frtest)
1962 		krping_fr_test(cb);
1963 	else
1964 		krping_test_client(cb);
1965 	rdma_disconnect(cb->cm_id);
1966 err2:
1967 	krping_free_buffers(cb);
1968 err1:
1969 	krping_free_qp(cb);
1970 }
1971 
1972 static uint16_t
1973 krping_get_ipv6_scope_id(char *name)
1974 {
1975 	struct ifnet *ifp;
1976 	uint16_t retval;
1977 
1978 	if (name == NULL)
1979 		return (0);
1980 	CURVNET_SET_QUIET(TD_TO_VNET(curthread));
1981 	ifp = ifunit_ref(name);
1982 	CURVNET_RESTORE();
1983 	if (ifp == NULL)
1984 		return (0);
1985 	retval = ifp->if_index;
1986 	if_rele(ifp);
1987 	return (retval);
1988 }
1989 
1990 int krping_doit(char *cmd)
1991 {
1992 	struct krping_cb *cb;
1993 	int op;
1994 	int ret = 0;
1995 	char *optarg;
1996 	char *scope;
1997 	unsigned long optint;
1998 
1999 	cb = kzalloc(sizeof(*cb), GFP_KERNEL);
2000 	if (!cb)
2001 		return -ENOMEM;
2002 
2003 	mutex_lock(&krping_mutex);
2004 	list_add_tail(&cb->list, &krping_cbs);
2005 	mutex_unlock(&krping_mutex);
2006 
2007 	cb->server = -1;
2008 	cb->state = IDLE;
2009 	cb->size = 64;
2010 	cb->txdepth = RPING_SQ_DEPTH;
2011 	init_waitqueue_head(&cb->sem);
2012 
2013 	while ((op = krping_getopt("krping", &cmd, krping_opts, NULL, &optarg,
2014 			      &optint)) != 0) {
2015 		switch (op) {
2016 		case 'a':
2017 			cb->addr_str = optarg;
2018 			cb->addr_type = AF_INET;
2019 			DEBUG_LOG("ipaddr (%s)\n", optarg);
2020 			if (inet_pton(AF_INET, optarg, cb->addr) != 1) {
2021 				printk(KERN_ERR PFX "bad addr string %s\n",
2022 				    optarg);
2023 				ret = EINVAL;
2024 			}
2025 			break;
2026 		case 'A':
2027 			cb->addr_str = optarg;
2028 			cb->addr_type = AF_INET6;
2029 			DEBUG_LOG("ipv6addr (%s)\n", optarg);
2030 			scope = strstr(optarg, "%");
2031 			/* extract scope ID, if any */
2032 			if (scope != NULL)
2033 				*scope++ = 0;
2034 			/* extract IPv6 network address */
2035 			if (inet_pton(AF_INET6, optarg, cb->addr) != 1) {
2036 				printk(KERN_ERR PFX "bad addr string %s\n",
2037 				    optarg);
2038 				ret = EINVAL;
2039 			} else if (IN6_IS_SCOPE_LINKLOCAL((struct in6_addr *)cb->addr) ||
2040 			    IN6_IS_ADDR_MC_INTFACELOCAL((struct in6_addr *)cb->addr)) {
2041 				uint16_t scope_id = krping_get_ipv6_scope_id(scope);
2042 				DEBUG_LOG("ipv6 scope ID = %d\n", scope_id);
2043 				cb->addr[2] = scope_id >> 8;
2044 				cb->addr[3] = scope_id & 0xFF;
2045 			}
2046 			break;
2047 		case 'p':
2048 			cb->port = htons(optint);
2049 			DEBUG_LOG("port %d\n", (int)optint);
2050 			break;
2051 		case 'P':
2052 			cb->poll = 1;
2053 			DEBUG_LOG("server\n");
2054 			break;
2055 		case 's':
2056 			cb->server = 1;
2057 			DEBUG_LOG("server\n");
2058 			break;
2059 		case 'c':
2060 			cb->server = 0;
2061 			DEBUG_LOG("client\n");
2062 			break;
2063 		case 'S':
2064 			cb->size = optint;
2065 			if ((cb->size < 1) ||
2066 			    (cb->size > RPING_BUFSIZE)) {
2067 				printk(KERN_ERR PFX "Invalid size %d "
2068 				       "(valid range is 1 to %d)\n",
2069 				       cb->size, RPING_BUFSIZE);
2070 				ret = EINVAL;
2071 			} else
2072 				DEBUG_LOG("size %d\n", (int)optint);
2073 			break;
2074 		case 'C':
2075 			cb->count = optint;
2076 			if (cb->count < 0) {
2077 				printk(KERN_ERR PFX "Invalid count %d\n",
2078 					cb->count);
2079 				ret = EINVAL;
2080 			} else
2081 				DEBUG_LOG("count %d\n", (int) cb->count);
2082 			break;
2083 		case 'v':
2084 			cb->verbose++;
2085 			DEBUG_LOG("verbose\n");
2086 			break;
2087 		case 'V':
2088 			cb->validate++;
2089 			DEBUG_LOG("validate data\n");
2090 			break;
2091 		case 'l':
2092 			cb->wlat++;
2093 			break;
2094 		case 'L':
2095 			cb->rlat++;
2096 			break;
2097 		case 'B':
2098 			cb->bw++;
2099 			break;
2100 		case 'd':
2101 			cb->duplex++;
2102 			break;
2103 		case 'I':
2104 			cb->server_invalidate = 1;
2105 			break;
2106 		case 't':
2107 			cb->tos = optint;
2108 			DEBUG_LOG("type of service, tos=%d\n", (int) cb->tos);
2109 			break;
2110 		case 'T':
2111 			cb->txdepth = optint;
2112 			DEBUG_LOG("txdepth %d\n", (int) cb->txdepth);
2113 			break;
2114 		case 'Z':
2115 			cb->local_dma_lkey = 1;
2116 			DEBUG_LOG("using local dma lkey\n");
2117 			break;
2118 		case 'R':
2119 			cb->read_inv = 1;
2120 			DEBUG_LOG("using read-with-inv\n");
2121 			break;
2122 		case 'f':
2123 			cb->frtest = 1;
2124 			DEBUG_LOG("fast-reg test!\n");
2125 			break;
2126 		default:
2127 			printk(KERN_ERR PFX "unknown opt %s\n", optarg);
2128 			ret = -EINVAL;
2129 			break;
2130 		}
2131 	}
2132 	if (ret)
2133 		goto out;
2134 
2135 	if (cb->server == -1) {
2136 		printk(KERN_ERR PFX "must be either client or server\n");
2137 		ret = -EINVAL;
2138 		goto out;
2139 	}
2140 
2141 	if (cb->server && cb->frtest) {
2142 		printk(KERN_ERR PFX "must be client to run frtest\n");
2143 		ret = -EINVAL;
2144 		goto out;
2145 	}
2146 
2147 	if ((cb->frtest + cb->bw + cb->rlat + cb->wlat) > 1) {
2148 		printk(KERN_ERR PFX "Pick only one test: fr, bw, rlat, wlat\n");
2149 		ret = -EINVAL;
2150 		goto out;
2151 	}
2152 
2153 	if (cb->wlat || cb->rlat || cb->bw) {
2154 		printk(KERN_ERR PFX "wlat, rlat, and bw tests only support mem_mode MR - which is no longer supported\n");
2155 		ret = -EINVAL;
2156 		goto out;
2157 	}
2158 
2159 	cb->cm_id = rdma_create_id(TD_TO_VNET(curthread), krping_cma_event_handler, cb, RDMA_PS_TCP, IB_QPT_RC);
2160 	if (IS_ERR(cb->cm_id)) {
2161 		ret = PTR_ERR(cb->cm_id);
2162 		printk(KERN_ERR PFX "rdma_create_id error %d\n", ret);
2163 		goto out;
2164 	}
2165 	DEBUG_LOG("created cm_id %p\n", cb->cm_id);
2166 
2167 	if (cb->server)
2168 		krping_run_server(cb);
2169 	else
2170 		krping_run_client(cb);
2171 
2172 	DEBUG_LOG("destroy cm_id %p\n", cb->cm_id);
2173 	rdma_destroy_id(cb->cm_id);
2174 out:
2175 	mutex_lock(&krping_mutex);
2176 	list_del(&cb->list);
2177 	mutex_unlock(&krping_mutex);
2178 	kfree(cb);
2179 	return ret;
2180 }
2181 
2182 void
2183 krping_walk_cb_list(void (*f)(struct krping_stats *, void *), void *arg)
2184 {
2185 	struct krping_cb *cb;
2186 
2187 	mutex_lock(&krping_mutex);
2188 	list_for_each_entry(cb, &krping_cbs, list)
2189 	    (*f)(cb->pd ? &cb->stats : NULL, arg);
2190 	mutex_unlock(&krping_mutex);
2191 }
2192