xref: /freebsd/sys/contrib/rdma/krping/krping.c (revision 5bf5ca772c6de2d53344a78cf461447cc322ccea)
1 /*
2  * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3  * Copyright (c) 2006-2009 Open Grid Computing, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <linux/module.h>
38 #include <linux/moduleparam.h>
39 #include <linux/slab.h>
40 #include <linux/err.h>
41 #include <linux/string.h>
42 #include <linux/list.h>
43 #include <linux/in.h>
44 #include <linux/device.h>
45 #include <linux/pci.h>
46 #include <linux/sched.h>
47 #include <linux/wait.h>
48 
49 #include <asm/atomic.h>
50 
51 #include <rdma/ib_verbs.h>
52 #include <rdma/rdma_cm.h>
53 
54 #include "krping.h"
55 #include "getopt.h"
56 
57 #define PFX "krping: "
58 
59 extern int krping_debug;
60 #define DEBUG_LOG(...) do { if (krping_debug) log(LOG_INFO, __VA_ARGS__); } while (0)
61 #define BIND_INFO 1
62 
63 MODULE_AUTHOR("Steve Wise");
64 MODULE_DESCRIPTION("RDMA ping server");
65 MODULE_LICENSE("Dual BSD/GPL");
66 MODULE_VERSION(krping, 1);
67 MODULE_DEPEND(krping, linuxkpi, 1, 1, 1);
68 
69 static __inline uint64_t
70 get_cycles(void)
71 {
72 	uint32_t low, high;
73 	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
74 	return (low | ((u_int64_t)high << 32));
75 }
76 
77 typedef uint64_t cycles_t;
78 
79 enum mem_type {
80 	DMA = 1,
81 	REG = 2,
82 };
83 
84 static const struct krping_option krping_opts[] = {
85 	{"count", OPT_INT, 'C'},
86 	{"size", OPT_INT, 'S'},
87 	{"addr", OPT_STRING, 'a'},
88 	{"addr6", OPT_STRING, 'A'},
89 	{"port", OPT_INT, 'p'},
90 	{"verbose", OPT_NOPARAM, 'v'},
91 	{"validate", OPT_NOPARAM, 'V'},
92 	{"server", OPT_NOPARAM, 's'},
93 	{"client", OPT_NOPARAM, 'c'},
94 	{"server_inv", OPT_NOPARAM, 'I'},
95  	{"wlat", OPT_NOPARAM, 'l'},
96  	{"rlat", OPT_NOPARAM, 'L'},
97  	{"bw", OPT_NOPARAM, 'B'},
98  	{"duplex", OPT_NOPARAM, 'd'},
99  	{"txdepth", OPT_INT, 'T'},
100  	{"poll", OPT_NOPARAM, 'P'},
101  	{"local_dma_lkey", OPT_NOPARAM, 'Z'},
102  	{"read_inv", OPT_NOPARAM, 'R'},
103  	{"fr", OPT_NOPARAM, 'f'},
104 	{NULL, 0, 0}
105 };
106 
107 #define htonll(x) cpu_to_be64((x))
108 #define ntohll(x) cpu_to_be64((x))
109 
110 static DEFINE_MUTEX(krping_mutex);
111 
112 /*
113  * List of running krping threads.
114  */
115 static LIST_HEAD(krping_cbs);
116 
117 /*
118  * Invoke like this, one on each side, using the server's address on
119  * the RDMA device (iw%d):
120  *
121  * /bin/echo server,port=9999,addr=192.168.69.142,validate > /proc/krping
122  * /bin/echo client,port=9999,addr=192.168.69.142,validate > /proc/krping
123  * /bin/echo client,port=9999,addr6=2001:db8:0:f101::1,validate > /proc/krping
124  *
125  * krping "ping/pong" loop:
126  * 	client sends source rkey/addr/len
127  *	server receives source rkey/add/len
128  *	server rdma reads "ping" data from source
129  * 	server sends "go ahead" on rdma read completion
130  *	client sends sink rkey/addr/len
131  * 	server receives sink rkey/addr/len
132  * 	server rdma writes "pong" data to sink
133  * 	server sends "go ahead" on rdma write completion
134  * 	<repeat loop>
135  */
136 
137 /*
138  * These states are used to signal events between the completion handler
139  * and the main client or server thread.
140  *
141  * Once CONNECTED, they cycle through RDMA_READ_ADV, RDMA_WRITE_ADV,
142  * and RDMA_WRITE_COMPLETE for each ping.
143  */
144 enum test_state {
145 	IDLE = 1,
146 	CONNECT_REQUEST,
147 	ADDR_RESOLVED,
148 	ROUTE_RESOLVED,
149 	CONNECTED,
150 	RDMA_READ_ADV,
151 	RDMA_READ_COMPLETE,
152 	RDMA_WRITE_ADV,
153 	RDMA_WRITE_COMPLETE,
154 	ERROR
155 };
156 
157 struct krping_rdma_info {
158 	uint64_t buf;
159 	uint32_t rkey;
160 	uint32_t size;
161 };
162 
163 /*
164  * Default max buffer size for IO...
165  */
166 #define RPING_BUFSIZE 128*1024
167 #define RPING_SQ_DEPTH 64
168 
169 /*
170  * Control block struct.
171  */
172 struct krping_cb {
173 	int server;			/* 0 iff client */
174 	struct ib_cq *cq;
175 	struct ib_pd *pd;
176 	struct ib_qp *qp;
177 
178 	struct ib_mr *dma_mr;
179 
180 	struct ib_fast_reg_page_list *page_list;
181 	int page_list_len;
182 	struct ib_reg_wr reg_mr_wr;
183 	struct ib_send_wr invalidate_wr;
184 	struct ib_mr *reg_mr;
185 	int server_invalidate;
186 	int read_inv;
187 	u8 key;
188 
189 	struct ib_recv_wr rq_wr;	/* recv work request record */
190 	struct ib_sge recv_sgl;		/* recv single SGE */
191 	struct krping_rdma_info recv_buf __aligned(16);	/* malloc'd buffer */
192 	u64 recv_dma_addr;
193 	DECLARE_PCI_UNMAP_ADDR(recv_mapping)
194 
195 	struct ib_send_wr sq_wr;	/* send work requrest record */
196 	struct ib_sge send_sgl;
197 	struct krping_rdma_info send_buf __aligned(16); /* single send buf */
198 	u64 send_dma_addr;
199 	DECLARE_PCI_UNMAP_ADDR(send_mapping)
200 
201 	struct ib_rdma_wr rdma_sq_wr;	/* rdma work request record */
202 	struct ib_sge rdma_sgl;		/* rdma single SGE */
203 	char *rdma_buf;			/* used as rdma sink */
204 	u64  rdma_dma_addr;
205 	DECLARE_PCI_UNMAP_ADDR(rdma_mapping)
206 	struct ib_mr *rdma_mr;
207 
208 	uint32_t remote_rkey;		/* remote guys RKEY */
209 	uint64_t remote_addr;		/* remote guys TO */
210 	uint32_t remote_len;		/* remote guys LEN */
211 
212 	char *start_buf;		/* rdma read src */
213 	u64  start_dma_addr;
214 	DECLARE_PCI_UNMAP_ADDR(start_mapping)
215 	struct ib_mr *start_mr;
216 
217 	enum test_state state;		/* used for cond/signalling */
218 	wait_queue_head_t sem;
219 	struct krping_stats stats;
220 
221 	uint16_t port;			/* dst port in NBO */
222 	u8 addr[16] __aligned(8);	/* dst addr in NBO */
223 	char *addr_str;			/* dst addr string */
224 	uint8_t addr_type;		/* ADDR_FAMILY - IPv4/V6 */
225 	int verbose;			/* verbose logging */
226 	int count;			/* ping count */
227 	int size;			/* ping data size */
228 	int validate;			/* validate ping data */
229 	int wlat;			/* run wlat test */
230 	int rlat;			/* run rlat test */
231 	int bw;				/* run bw test */
232 	int duplex;			/* run bw full duplex test */
233 	int poll;			/* poll or block for rlat test */
234 	int txdepth;			/* SQ depth */
235 	int local_dma_lkey;		/* use 0 for lkey */
236 	int frtest;			/* reg test */
237 
238 	/* CM stuff */
239 	struct rdma_cm_id *cm_id;	/* connection on client side,*/
240 					/* listener on server side. */
241 	struct rdma_cm_id *child_cm_id;	/* connection on server side */
242 	struct list_head list;
243 };
244 
245 static int krping_cma_event_handler(struct rdma_cm_id *cma_id,
246 				   struct rdma_cm_event *event)
247 {
248 	int ret;
249 	struct krping_cb *cb = cma_id->context;
250 
251 	DEBUG_LOG("cma_event type %d cma_id %p (%s)\n", event->event, cma_id,
252 		  (cma_id == cb->cm_id) ? "parent" : "child");
253 
254 	switch (event->event) {
255 	case RDMA_CM_EVENT_ADDR_RESOLVED:
256 		cb->state = ADDR_RESOLVED;
257 		ret = rdma_resolve_route(cma_id, 2000);
258 		if (ret) {
259 			printk(KERN_ERR PFX "rdma_resolve_route error %d\n",
260 			       ret);
261 			wake_up_interruptible(&cb->sem);
262 		}
263 		break;
264 
265 	case RDMA_CM_EVENT_ROUTE_RESOLVED:
266 		cb->state = ROUTE_RESOLVED;
267 		wake_up_interruptible(&cb->sem);
268 		break;
269 
270 	case RDMA_CM_EVENT_CONNECT_REQUEST:
271 		cb->state = CONNECT_REQUEST;
272 		cb->child_cm_id = cma_id;
273 		DEBUG_LOG("child cma %p\n", cb->child_cm_id);
274 		wake_up_interruptible(&cb->sem);
275 		break;
276 
277 	case RDMA_CM_EVENT_ESTABLISHED:
278 		DEBUG_LOG("ESTABLISHED\n");
279 		if (!cb->server) {
280 			cb->state = CONNECTED;
281 		}
282 		wake_up_interruptible(&cb->sem);
283 		break;
284 
285 	case RDMA_CM_EVENT_ADDR_ERROR:
286 	case RDMA_CM_EVENT_ROUTE_ERROR:
287 	case RDMA_CM_EVENT_CONNECT_ERROR:
288 	case RDMA_CM_EVENT_UNREACHABLE:
289 	case RDMA_CM_EVENT_REJECTED:
290 		printk(KERN_ERR PFX "cma event %d, error %d\n", event->event,
291 		       event->status);
292 		cb->state = ERROR;
293 		wake_up_interruptible(&cb->sem);
294 		break;
295 
296 	case RDMA_CM_EVENT_DISCONNECTED:
297 		printk(KERN_ERR PFX "DISCONNECT EVENT...\n");
298 		cb->state = ERROR;
299 		wake_up_interruptible(&cb->sem);
300 		break;
301 
302 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
303 		printk(KERN_ERR PFX "cma detected device removal!!!!\n");
304 		break;
305 
306 	default:
307 		printk(KERN_ERR PFX "oof bad type!\n");
308 		wake_up_interruptible(&cb->sem);
309 		break;
310 	}
311 	return 0;
312 }
313 
314 static int server_recv(struct krping_cb *cb, struct ib_wc *wc)
315 {
316 	if (wc->byte_len != sizeof(cb->recv_buf)) {
317 		printk(KERN_ERR PFX "Received bogus data, size %d\n",
318 		       wc->byte_len);
319 		return -1;
320 	}
321 
322 	cb->remote_rkey = ntohl(cb->recv_buf.rkey);
323 	cb->remote_addr = ntohll(cb->recv_buf.buf);
324 	cb->remote_len  = ntohl(cb->recv_buf.size);
325 	DEBUG_LOG("Received rkey %x addr %llx len %d from peer\n",
326 		  cb->remote_rkey, (unsigned long long)cb->remote_addr,
327 		  cb->remote_len);
328 
329 	if (cb->state <= CONNECTED || cb->state == RDMA_WRITE_COMPLETE)
330 		cb->state = RDMA_READ_ADV;
331 	else
332 		cb->state = RDMA_WRITE_ADV;
333 
334 	return 0;
335 }
336 
337 static int client_recv(struct krping_cb *cb, struct ib_wc *wc)
338 {
339 	if (wc->byte_len != sizeof(cb->recv_buf)) {
340 		printk(KERN_ERR PFX "Received bogus data, size %d\n",
341 		       wc->byte_len);
342 		return -1;
343 	}
344 
345 	if (cb->state == RDMA_READ_ADV)
346 		cb->state = RDMA_WRITE_ADV;
347 	else
348 		cb->state = RDMA_WRITE_COMPLETE;
349 
350 	return 0;
351 }
352 
353 static void krping_cq_event_handler(struct ib_cq *cq, void *ctx)
354 {
355 	struct krping_cb *cb = ctx;
356 	struct ib_wc wc;
357 	struct ib_recv_wr *bad_wr;
358 	int ret;
359 
360 	BUG_ON(cb->cq != cq);
361 	if (cb->state == ERROR) {
362 		printk(KERN_ERR PFX "cq completion in ERROR state\n");
363 		return;
364 	}
365 	if (cb->frtest) {
366 		printk(KERN_ERR PFX "cq completion event in frtest!\n");
367 		return;
368 	}
369 	if (!cb->wlat && !cb->rlat && !cb->bw)
370 		ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP);
371 	while ((ret = ib_poll_cq(cb->cq, 1, &wc)) == 1) {
372 		if (wc.status) {
373 			if (wc.status == IB_WC_WR_FLUSH_ERR) {
374 				DEBUG_LOG("cq flushed\n");
375 				continue;
376 			} else {
377 				printk(KERN_ERR PFX "cq completion failed with "
378 				       "wr_id %jx status %d opcode %d vender_err %x\n",
379 					(uintmax_t)wc.wr_id, wc.status, wc.opcode, wc.vendor_err);
380 				goto error;
381 			}
382 		}
383 
384 		switch (wc.opcode) {
385 		case IB_WC_SEND:
386 			DEBUG_LOG("send completion\n");
387 			cb->stats.send_bytes += cb->send_sgl.length;
388 			cb->stats.send_msgs++;
389 			break;
390 
391 		case IB_WC_RDMA_WRITE:
392 			DEBUG_LOG("rdma write completion\n");
393 			cb->stats.write_bytes += cb->rdma_sq_wr.wr.sg_list->length;
394 			cb->stats.write_msgs++;
395 			cb->state = RDMA_WRITE_COMPLETE;
396 			wake_up_interruptible(&cb->sem);
397 			break;
398 
399 		case IB_WC_RDMA_READ:
400 			DEBUG_LOG("rdma read completion\n");
401 			cb->stats.read_bytes += cb->rdma_sq_wr.wr.sg_list->length;
402 			cb->stats.read_msgs++;
403 			cb->state = RDMA_READ_COMPLETE;
404 			wake_up_interruptible(&cb->sem);
405 			break;
406 
407 		case IB_WC_RECV:
408 			DEBUG_LOG("recv completion\n");
409 			cb->stats.recv_bytes += sizeof(cb->recv_buf);
410 			cb->stats.recv_msgs++;
411 			if (cb->wlat || cb->rlat || cb->bw)
412 				ret = server_recv(cb, &wc);
413 			else
414 				ret = cb->server ? server_recv(cb, &wc) :
415 						   client_recv(cb, &wc);
416 			if (ret) {
417 				printk(KERN_ERR PFX "recv wc error: %d\n", ret);
418 				goto error;
419 			}
420 
421 			ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr);
422 			if (ret) {
423 				printk(KERN_ERR PFX "post recv error: %d\n",
424 				       ret);
425 				goto error;
426 			}
427 			wake_up_interruptible(&cb->sem);
428 			break;
429 
430 		default:
431 			printk(KERN_ERR PFX
432 			       "%s:%d Unexpected opcode %d, Shutting down\n",
433 			       __func__, __LINE__, wc.opcode);
434 			goto error;
435 		}
436 	}
437 	if (ret) {
438 		printk(KERN_ERR PFX "poll error %d\n", ret);
439 		goto error;
440 	}
441 	return;
442 error:
443 	cb->state = ERROR;
444 	wake_up_interruptible(&cb->sem);
445 }
446 
447 static int krping_accept(struct krping_cb *cb)
448 {
449 	struct rdma_conn_param conn_param;
450 	int ret;
451 
452 	DEBUG_LOG("accepting client connection request\n");
453 
454 	memset(&conn_param, 0, sizeof conn_param);
455 	conn_param.responder_resources = 1;
456 	conn_param.initiator_depth = 1;
457 
458 	ret = rdma_accept(cb->child_cm_id, &conn_param);
459 	if (ret) {
460 		printk(KERN_ERR PFX "rdma_accept error: %d\n", ret);
461 		return ret;
462 	}
463 
464 	if (!cb->wlat && !cb->rlat && !cb->bw) {
465 		wait_event_interruptible(cb->sem, cb->state >= CONNECTED);
466 		if (cb->state == ERROR) {
467 			printk(KERN_ERR PFX "wait for CONNECTED state %d\n",
468 				cb->state);
469 			return -1;
470 		}
471 	}
472 	return 0;
473 }
474 
475 static void krping_setup_wr(struct krping_cb *cb)
476 {
477 	cb->recv_sgl.addr = cb->recv_dma_addr;
478 	cb->recv_sgl.length = sizeof cb->recv_buf;
479 	cb->recv_sgl.lkey = cb->pd->local_dma_lkey;
480 	cb->rq_wr.sg_list = &cb->recv_sgl;
481 	cb->rq_wr.num_sge = 1;
482 
483 	cb->send_sgl.addr = cb->send_dma_addr;
484 	cb->send_sgl.length = sizeof cb->send_buf;
485 	cb->send_sgl.lkey = cb->pd->local_dma_lkey;
486 
487 	cb->sq_wr.opcode = IB_WR_SEND;
488 	cb->sq_wr.send_flags = IB_SEND_SIGNALED;
489 	cb->sq_wr.sg_list = &cb->send_sgl;
490 	cb->sq_wr.num_sge = 1;
491 
492 	if (cb->server || cb->wlat || cb->rlat || cb->bw) {
493 		cb->rdma_sgl.addr = cb->rdma_dma_addr;
494 		cb->rdma_sq_wr.wr.send_flags = IB_SEND_SIGNALED;
495 		cb->rdma_sq_wr.wr.sg_list = &cb->rdma_sgl;
496 		cb->rdma_sq_wr.wr.num_sge = 1;
497 	}
498 
499 	/*
500 	 * A chain of 2 WRs, INVALDATE_MR + REG_MR.
501 	 * both unsignaled.  The client uses them to reregister
502 	 * the rdma buffers with a new key each iteration.
503 	 */
504 	cb->reg_mr_wr.wr.opcode = IB_WR_REG_MR;
505 	cb->reg_mr_wr.mr = cb->reg_mr;
506 
507 	cb->invalidate_wr.next = &cb->reg_mr_wr.wr;
508 	cb->invalidate_wr.opcode = IB_WR_LOCAL_INV;
509 }
510 
511 static int krping_setup_buffers(struct krping_cb *cb)
512 {
513 	int ret;
514 
515 	DEBUG_LOG(PFX "krping_setup_buffers called on cb %p\n", cb);
516 
517 	cb->recv_dma_addr = ib_dma_map_single(cb->pd->device,
518 				   &cb->recv_buf,
519 				   sizeof(cb->recv_buf), DMA_BIDIRECTIONAL);
520 	pci_unmap_addr_set(cb, recv_mapping, cb->recv_dma_addr);
521 	cb->send_dma_addr = ib_dma_map_single(cb->pd->device,
522 					   &cb->send_buf, sizeof(cb->send_buf),
523 					   DMA_BIDIRECTIONAL);
524 	pci_unmap_addr_set(cb, send_mapping, cb->send_dma_addr);
525 
526 	cb->rdma_buf = ib_dma_alloc_coherent(cb->pd->device, cb->size,
527 					     &cb->rdma_dma_addr,
528 					     GFP_KERNEL);
529 	if (!cb->rdma_buf) {
530 		DEBUG_LOG(PFX "rdma_buf allocation failed\n");
531 		ret = -ENOMEM;
532 		goto bail;
533 	}
534 	pci_unmap_addr_set(cb, rdma_mapping, cb->rdma_dma_addr);
535 	cb->page_list_len = (((cb->size - 1) & PAGE_MASK) + PAGE_SIZE)
536 				>> PAGE_SHIFT;
537 	cb->reg_mr = ib_alloc_mr(cb->pd,  IB_MR_TYPE_MEM_REG,
538 				 cb->page_list_len);
539 	if (IS_ERR(cb->reg_mr)) {
540 		ret = PTR_ERR(cb->reg_mr);
541 		DEBUG_LOG(PFX "recv_buf reg_mr failed %d\n", ret);
542 		goto bail;
543 	}
544 	DEBUG_LOG(PFX "reg rkey 0x%x page_list_len %u\n",
545 		cb->reg_mr->rkey, cb->page_list_len);
546 
547 	if (!cb->server || cb->wlat || cb->rlat || cb->bw) {
548 
549 		cb->start_buf = ib_dma_alloc_coherent(cb->pd->device, cb->size,
550 						      &cb->start_dma_addr,
551 						      GFP_KERNEL);
552 		if (!cb->start_buf) {
553 			DEBUG_LOG(PFX "start_buf malloc failed\n");
554 			ret = -ENOMEM;
555 			goto bail;
556 		}
557 		pci_unmap_addr_set(cb, start_mapping, cb->start_dma_addr);
558 	}
559 
560 	krping_setup_wr(cb);
561 	DEBUG_LOG(PFX "allocated & registered buffers...\n");
562 	return 0;
563 bail:
564 	if (cb->reg_mr && !IS_ERR(cb->reg_mr))
565 		ib_dereg_mr(cb->reg_mr);
566 	if (cb->rdma_mr && !IS_ERR(cb->rdma_mr))
567 		ib_dereg_mr(cb->rdma_mr);
568 	if (cb->dma_mr && !IS_ERR(cb->dma_mr))
569 		ib_dereg_mr(cb->dma_mr);
570 	if (cb->rdma_buf) {
571 		ib_dma_free_coherent(cb->pd->device, cb->size, cb->rdma_buf,
572 				     cb->rdma_dma_addr);
573 	}
574 	if (cb->start_buf) {
575 		ib_dma_free_coherent(cb->pd->device, cb->size, cb->start_buf,
576 				     cb->start_dma_addr);
577 	}
578 	return ret;
579 }
580 
581 static void krping_free_buffers(struct krping_cb *cb)
582 {
583 	DEBUG_LOG("krping_free_buffers called on cb %p\n", cb);
584 
585 	if (cb->dma_mr)
586 		ib_dereg_mr(cb->dma_mr);
587 	if (cb->rdma_mr)
588 		ib_dereg_mr(cb->rdma_mr);
589 	if (cb->start_mr)
590 		ib_dereg_mr(cb->start_mr);
591 	if (cb->reg_mr)
592 		ib_dereg_mr(cb->reg_mr);
593 
594 	dma_unmap_single(cb->pd->device->dma_device,
595 			 pci_unmap_addr(cb, recv_mapping),
596 			 sizeof(cb->recv_buf), DMA_BIDIRECTIONAL);
597 	dma_unmap_single(cb->pd->device->dma_device,
598 			 pci_unmap_addr(cb, send_mapping),
599 			 sizeof(cb->send_buf), DMA_BIDIRECTIONAL);
600 
601 	ib_dma_free_coherent(cb->pd->device, cb->size, cb->rdma_buf,
602 			     cb->rdma_dma_addr);
603 
604 	if (cb->start_buf) {
605 		ib_dma_free_coherent(cb->pd->device, cb->size, cb->start_buf,
606 				     cb->start_dma_addr);
607 	}
608 }
609 
610 static int krping_create_qp(struct krping_cb *cb)
611 {
612 	struct ib_qp_init_attr init_attr;
613 	int ret;
614 
615 	memset(&init_attr, 0, sizeof(init_attr));
616 	init_attr.cap.max_send_wr = cb->txdepth;
617 	init_attr.cap.max_recv_wr = 2;
618 
619 	/* For flush_qp() */
620 	init_attr.cap.max_send_wr++;
621 	init_attr.cap.max_recv_wr++;
622 
623 	init_attr.cap.max_recv_sge = 1;
624 	init_attr.cap.max_send_sge = 1;
625 	init_attr.qp_type = IB_QPT_RC;
626 	init_attr.send_cq = cb->cq;
627 	init_attr.recv_cq = cb->cq;
628 	init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
629 
630 	if (cb->server) {
631 		ret = rdma_create_qp(cb->child_cm_id, cb->pd, &init_attr);
632 		if (!ret)
633 			cb->qp = cb->child_cm_id->qp;
634 	} else {
635 		ret = rdma_create_qp(cb->cm_id, cb->pd, &init_attr);
636 		if (!ret)
637 			cb->qp = cb->cm_id->qp;
638 	}
639 
640 	return ret;
641 }
642 
643 static void krping_free_qp(struct krping_cb *cb)
644 {
645 	ib_destroy_qp(cb->qp);
646 	ib_destroy_cq(cb->cq);
647 	ib_dealloc_pd(cb->pd);
648 }
649 
650 static int krping_setup_qp(struct krping_cb *cb, struct rdma_cm_id *cm_id)
651 {
652 	int ret;
653 	struct ib_cq_init_attr attr = {0};
654 
655 	cb->pd = ib_alloc_pd(cm_id->device, 0);
656 	if (IS_ERR(cb->pd)) {
657 		printk(KERN_ERR PFX "ib_alloc_pd failed\n");
658 		return PTR_ERR(cb->pd);
659 	}
660 	DEBUG_LOG("created pd %p\n", cb->pd);
661 
662 	strlcpy(cb->stats.name, cb->pd->device->name, sizeof(cb->stats.name));
663 
664 	attr.cqe = cb->txdepth * 2;
665 	attr.comp_vector = 0;
666 	cb->cq = ib_create_cq(cm_id->device, krping_cq_event_handler, NULL,
667 			      cb, &attr);
668 	if (IS_ERR(cb->cq)) {
669 		printk(KERN_ERR PFX "ib_create_cq failed\n");
670 		ret = PTR_ERR(cb->cq);
671 		goto err1;
672 	}
673 	DEBUG_LOG("created cq %p\n", cb->cq);
674 
675 	if (!cb->wlat && !cb->rlat && !cb->bw && !cb->frtest) {
676 		ret = ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP);
677 		if (ret) {
678 			printk(KERN_ERR PFX "ib_create_cq failed\n");
679 			goto err2;
680 		}
681 	}
682 
683 	ret = krping_create_qp(cb);
684 	if (ret) {
685 		printk(KERN_ERR PFX "krping_create_qp failed: %d\n", ret);
686 		goto err2;
687 	}
688 	DEBUG_LOG("created qp %p\n", cb->qp);
689 	return 0;
690 err2:
691 	ib_destroy_cq(cb->cq);
692 err1:
693 	ib_dealloc_pd(cb->pd);
694 	return ret;
695 }
696 
697 /*
698  * return the (possibly rebound) rkey for the rdma buffer.
699  * REG mode: invalidate and rebind via reg wr.
700  * other modes: just return the mr rkey.
701  */
702 static u32 krping_rdma_rkey(struct krping_cb *cb, u64 buf, int post_inv)
703 {
704 	u32 rkey;
705 	struct ib_send_wr *bad_wr;
706 	int ret;
707 	struct scatterlist sg = {0};
708 
709 	cb->invalidate_wr.ex.invalidate_rkey = cb->reg_mr->rkey;
710 
711 	/*
712 	 * Update the reg key.
713 	 */
714 	ib_update_fast_reg_key(cb->reg_mr, ++cb->key);
715 	cb->reg_mr_wr.key = cb->reg_mr->rkey;
716 
717 	/*
718 	 * Update the reg WR with new buf info.
719 	 */
720 	if (buf == (u64)cb->start_dma_addr)
721 		cb->reg_mr_wr.access = IB_ACCESS_REMOTE_READ;
722 	else
723 		cb->reg_mr_wr.access = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE;
724 	sg_dma_address(&sg) = buf;
725 	sg_dma_len(&sg) = cb->size;
726 
727 	ret = ib_map_mr_sg(cb->reg_mr, &sg, 1, NULL, PAGE_SIZE);
728 	BUG_ON(ret <= 0 || ret > cb->page_list_len);
729 
730 	DEBUG_LOG(PFX "post_inv = %d, reg_mr new rkey 0x%x pgsz %u len %u"
731 		" iova_start %llx\n",
732 		post_inv,
733 		cb->reg_mr_wr.key,
734 		cb->reg_mr->page_size,
735 		cb->reg_mr->length,
736 	        (unsigned long long)cb->reg_mr->iova);
737 
738 	if (post_inv)
739 		ret = ib_post_send(cb->qp, &cb->invalidate_wr, &bad_wr);
740 	else
741 		ret = ib_post_send(cb->qp, &cb->reg_mr_wr.wr, &bad_wr);
742 	if (ret) {
743 		printk(KERN_ERR PFX "post send error %d\n", ret);
744 		cb->state = ERROR;
745 	}
746 	rkey = cb->reg_mr->rkey;
747 	return rkey;
748 }
749 
750 static void krping_format_send(struct krping_cb *cb, u64 buf)
751 {
752 	struct krping_rdma_info *info = &cb->send_buf;
753 	u32 rkey;
754 
755 	/*
756 	 * Client side will do reg or mw bind before
757 	 * advertising the rdma buffer.  Server side
758 	 * sends have no data.
759 	 */
760 	if (!cb->server || cb->wlat || cb->rlat || cb->bw) {
761 		rkey = krping_rdma_rkey(cb, buf, !cb->server_invalidate);
762 		info->buf = htonll(buf);
763 		info->rkey = htonl(rkey);
764 		info->size = htonl(cb->size);
765 		DEBUG_LOG("RDMA addr %llx rkey %x len %d\n",
766 			  (unsigned long long)buf, rkey, cb->size);
767 	}
768 }
769 
770 static void krping_test_server(struct krping_cb *cb)
771 {
772 	struct ib_send_wr *bad_wr, inv;
773 	int ret;
774 
775 	while (1) {
776 		/* Wait for client's Start STAG/TO/Len */
777 		wait_event_interruptible(cb->sem, cb->state >= RDMA_READ_ADV);
778 		if (cb->state != RDMA_READ_ADV) {
779 			printk(KERN_ERR PFX "wait for RDMA_READ_ADV state %d\n",
780 				cb->state);
781 			break;
782 		}
783 
784 		DEBUG_LOG("server received sink adv\n");
785 
786 		cb->rdma_sq_wr.rkey = cb->remote_rkey;
787 		cb->rdma_sq_wr.remote_addr = cb->remote_addr;
788 		cb->rdma_sq_wr.wr.sg_list->length = cb->remote_len;
789 		cb->rdma_sgl.lkey = krping_rdma_rkey(cb, cb->rdma_dma_addr, !cb->read_inv);
790 		cb->rdma_sq_wr.wr.next = NULL;
791 
792 		/* Issue RDMA Read. */
793 		if (cb->read_inv)
794 			cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
795 		else {
796 
797 			cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_READ;
798 			/*
799 			 * Immediately follow the read with a
800 			 * fenced LOCAL_INV.
801 			 */
802 			cb->rdma_sq_wr.wr.next = &inv;
803 			memset(&inv, 0, sizeof inv);
804 			inv.opcode = IB_WR_LOCAL_INV;
805 			inv.ex.invalidate_rkey = cb->reg_mr->rkey;
806 			inv.send_flags = IB_SEND_FENCE;
807 		}
808 
809 		ret = ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr);
810 		if (ret) {
811 			printk(KERN_ERR PFX "post send error %d\n", ret);
812 			break;
813 		}
814 		cb->rdma_sq_wr.wr.next = NULL;
815 
816 		DEBUG_LOG("server posted rdma read req \n");
817 
818 		/* Wait for read completion */
819 		wait_event_interruptible(cb->sem,
820 					 cb->state >= RDMA_READ_COMPLETE);
821 		if (cb->state != RDMA_READ_COMPLETE) {
822 			printk(KERN_ERR PFX
823 			       "wait for RDMA_READ_COMPLETE state %d\n",
824 			       cb->state);
825 			break;
826 		}
827 		DEBUG_LOG("server received read complete\n");
828 
829 		/* Display data in recv buf */
830 		if (cb->verbose)
831 			printk(KERN_INFO PFX "server ping data: %s\n",
832 				cb->rdma_buf);
833 
834 		/* Tell client to continue */
835 		if (cb->server && cb->server_invalidate) {
836 			cb->sq_wr.ex.invalidate_rkey = cb->remote_rkey;
837 			cb->sq_wr.opcode = IB_WR_SEND_WITH_INV;
838 			DEBUG_LOG("send-w-inv rkey 0x%x\n", cb->remote_rkey);
839 		}
840 		ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
841 		if (ret) {
842 			printk(KERN_ERR PFX "post send error %d\n", ret);
843 			break;
844 		}
845 		DEBUG_LOG("server posted go ahead\n");
846 
847 		/* Wait for client's RDMA STAG/TO/Len */
848 		wait_event_interruptible(cb->sem, cb->state >= RDMA_WRITE_ADV);
849 		if (cb->state != RDMA_WRITE_ADV) {
850 			printk(KERN_ERR PFX
851 			       "wait for RDMA_WRITE_ADV state %d\n",
852 			       cb->state);
853 			break;
854 		}
855 		DEBUG_LOG("server received sink adv\n");
856 
857 		/* RDMA Write echo data */
858 		cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_WRITE;
859 		cb->rdma_sq_wr.rkey = cb->remote_rkey;
860 		cb->rdma_sq_wr.remote_addr = cb->remote_addr;
861 		cb->rdma_sq_wr.wr.sg_list->length = strlen(cb->rdma_buf) + 1;
862 		if (cb->local_dma_lkey)
863 			cb->rdma_sgl.lkey = cb->pd->local_dma_lkey;
864 		else
865 			cb->rdma_sgl.lkey = krping_rdma_rkey(cb, cb->rdma_dma_addr, 0);
866 
867 		DEBUG_LOG("rdma write from lkey %x laddr %llx len %d\n",
868 			  cb->rdma_sq_wr.wr.sg_list->lkey,
869 			  (unsigned long long)cb->rdma_sq_wr.wr.sg_list->addr,
870 			  cb->rdma_sq_wr.wr.sg_list->length);
871 
872 		ret = ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr);
873 		if (ret) {
874 			printk(KERN_ERR PFX "post send error %d\n", ret);
875 			break;
876 		}
877 
878 		/* Wait for completion */
879 		ret = wait_event_interruptible(cb->sem, cb->state >=
880 							 RDMA_WRITE_COMPLETE);
881 		if (cb->state != RDMA_WRITE_COMPLETE) {
882 			printk(KERN_ERR PFX
883 			       "wait for RDMA_WRITE_COMPLETE state %d\n",
884 			       cb->state);
885 			break;
886 		}
887 		DEBUG_LOG("server rdma write complete \n");
888 
889 		cb->state = CONNECTED;
890 
891 		/* Tell client to begin again */
892 		if (cb->server && cb->server_invalidate) {
893 			cb->sq_wr.ex.invalidate_rkey = cb->remote_rkey;
894 			cb->sq_wr.opcode = IB_WR_SEND_WITH_INV;
895 			DEBUG_LOG("send-w-inv rkey 0x%x\n", cb->remote_rkey);
896 		}
897 		ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
898 		if (ret) {
899 			printk(KERN_ERR PFX "post send error %d\n", ret);
900 			break;
901 		}
902 		DEBUG_LOG("server posted go ahead\n");
903 	}
904 }
905 
906 static void rlat_test(struct krping_cb *cb)
907 {
908 	int scnt;
909 	int iters = cb->count;
910 	struct timeval start_tv, stop_tv;
911 	int ret;
912 	struct ib_wc wc;
913 	struct ib_send_wr *bad_wr;
914 	int ne;
915 
916 	scnt = 0;
917 	cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_READ;
918 	cb->rdma_sq_wr.rkey = cb->remote_rkey;
919 	cb->rdma_sq_wr.remote_addr = cb->remote_addr;
920 	cb->rdma_sq_wr.wr.sg_list->length = cb->size;
921 
922 	microtime(&start_tv);
923 	if (!cb->poll) {
924 		cb->state = RDMA_READ_ADV;
925 		ib_req_notify_cq(cb->cq, IB_CQ_NEXT_COMP);
926 	}
927 	while (scnt < iters) {
928 
929 		cb->state = RDMA_READ_ADV;
930 		ret = ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr);
931 		if (ret) {
932 			printk(KERN_ERR PFX
933 				"Couldn't post send: ret=%d scnt %d\n",
934 				ret, scnt);
935 			return;
936 		}
937 
938 		do {
939 			if (!cb->poll) {
940 				wait_event_interruptible(cb->sem,
941 					cb->state != RDMA_READ_ADV);
942 				if (cb->state == RDMA_READ_COMPLETE) {
943 					ne = 1;
944 					ib_req_notify_cq(cb->cq,
945 						IB_CQ_NEXT_COMP);
946 				} else {
947 					ne = -1;
948 				}
949 			} else
950 				ne = ib_poll_cq(cb->cq, 1, &wc);
951 			if (cb->state == ERROR) {
952 				printk(KERN_ERR PFX
953 					"state == ERROR...bailing scnt %d\n",
954 					scnt);
955 				return;
956 			}
957 		} while (ne == 0);
958 
959 		if (ne < 0) {
960 			printk(KERN_ERR PFX "poll CQ failed %d\n", ne);
961 			return;
962 		}
963 		if (cb->poll && wc.status != IB_WC_SUCCESS) {
964 			printk(KERN_ERR PFX "Completion wth error at %s:\n",
965 				cb->server ? "server" : "client");
966 			printk(KERN_ERR PFX "Failed status %d: wr_id %d\n",
967 				wc.status, (int) wc.wr_id);
968 			return;
969 		}
970 		++scnt;
971 	}
972 	microtime(&stop_tv);
973 
974         if (stop_tv.tv_usec < start_tv.tv_usec) {
975                 stop_tv.tv_usec += 1000000;
976                 stop_tv.tv_sec  -= 1;
977         }
978 
979 	printk(KERN_ERR PFX "delta sec %lu delta usec %lu iter %d size %d\n",
980 		(unsigned long)(stop_tv.tv_sec - start_tv.tv_sec),
981 		(unsigned long)(stop_tv.tv_usec - start_tv.tv_usec),
982 		scnt, cb->size);
983 }
984 
985 static void wlat_test(struct krping_cb *cb)
986 {
987 	int ccnt, scnt, rcnt;
988 	int iters=cb->count;
989 	volatile char *poll_buf = (char *) cb->start_buf;
990 	char *buf = (char *)cb->rdma_buf;
991 	struct timeval start_tv, stop_tv;
992 	cycles_t *post_cycles_start, *post_cycles_stop;
993 	cycles_t *poll_cycles_start, *poll_cycles_stop;
994 	cycles_t *last_poll_cycles_start;
995 	cycles_t sum_poll = 0, sum_post = 0, sum_last_poll = 0;
996 	int i;
997 	int cycle_iters = 1000;
998 
999 	ccnt = 0;
1000 	scnt = 0;
1001 	rcnt = 0;
1002 
1003 	post_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1004 	if (!post_cycles_start) {
1005 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1006 		return;
1007 	}
1008 	post_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1009 	if (!post_cycles_stop) {
1010 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1011 		return;
1012 	}
1013 	poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1014 	if (!poll_cycles_start) {
1015 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1016 		return;
1017 	}
1018 	poll_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1019 	if (!poll_cycles_stop) {
1020 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1021 		return;
1022 	}
1023 	last_poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t),
1024 		GFP_KERNEL);
1025 	if (!last_poll_cycles_start) {
1026 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1027 		return;
1028 	}
1029 	cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_WRITE;
1030 	cb->rdma_sq_wr.rkey = cb->remote_rkey;
1031 	cb->rdma_sq_wr.remote_addr = cb->remote_addr;
1032 	cb->rdma_sq_wr.wr.sg_list->length = cb->size;
1033 
1034 	if (cycle_iters > iters)
1035 		cycle_iters = iters;
1036 	microtime(&start_tv);
1037 	while (scnt < iters || ccnt < iters || rcnt < iters) {
1038 
1039 		/* Wait till buffer changes. */
1040 		if (rcnt < iters && !(scnt < 1 && !cb->server)) {
1041 			++rcnt;
1042 			while (*poll_buf != (char)rcnt) {
1043 				if (cb->state == ERROR) {
1044 					printk(KERN_ERR PFX
1045 						"state = ERROR, bailing\n");
1046 					return;
1047 				}
1048 			}
1049 		}
1050 
1051 		if (scnt < iters) {
1052 			struct ib_send_wr *bad_wr;
1053 
1054 			*buf = (char)scnt+1;
1055 			if (scnt < cycle_iters)
1056 				post_cycles_start[scnt] = get_cycles();
1057 			if (ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr)) {
1058 				printk(KERN_ERR PFX
1059 					"Couldn't post send: scnt=%d\n",
1060 					scnt);
1061 				return;
1062 			}
1063 			if (scnt < cycle_iters)
1064 				post_cycles_stop[scnt] = get_cycles();
1065 			scnt++;
1066 		}
1067 
1068 		if (ccnt < iters) {
1069 			struct ib_wc wc;
1070 			int ne;
1071 
1072 			if (ccnt < cycle_iters)
1073 				poll_cycles_start[ccnt] = get_cycles();
1074 			do {
1075 				if (ccnt < cycle_iters)
1076 					last_poll_cycles_start[ccnt] =
1077 						get_cycles();
1078 				ne = ib_poll_cq(cb->cq, 1, &wc);
1079 			} while (ne == 0);
1080 			if (ccnt < cycle_iters)
1081 				poll_cycles_stop[ccnt] = get_cycles();
1082 			++ccnt;
1083 
1084 			if (ne < 0) {
1085 				printk(KERN_ERR PFX "poll CQ failed %d\n", ne);
1086 				return;
1087 			}
1088 			if (wc.status != IB_WC_SUCCESS) {
1089 				printk(KERN_ERR PFX
1090 					"Completion wth error at %s:\n",
1091 					cb->server ? "server" : "client");
1092 				printk(KERN_ERR PFX
1093 					"Failed status %d: wr_id %d\n",
1094 					wc.status, (int) wc.wr_id);
1095 				printk(KERN_ERR PFX
1096 					"scnt=%d, rcnt=%d, ccnt=%d\n",
1097 					scnt, rcnt, ccnt);
1098 				return;
1099 			}
1100 		}
1101 	}
1102 	microtime(&stop_tv);
1103 
1104         if (stop_tv.tv_usec < start_tv.tv_usec) {
1105                 stop_tv.tv_usec += 1000000;
1106                 stop_tv.tv_sec  -= 1;
1107         }
1108 
1109 	for (i=0; i < cycle_iters; i++) {
1110 		sum_post += post_cycles_stop[i] - post_cycles_start[i];
1111 		sum_poll += poll_cycles_stop[i] - poll_cycles_start[i];
1112 		sum_last_poll += poll_cycles_stop[i]-last_poll_cycles_start[i];
1113 	}
1114 	printk(KERN_ERR PFX
1115 		"delta sec %lu delta usec %lu iter %d size %d cycle_iters %d"
1116 		" sum_post %llu sum_poll %llu sum_last_poll %llu\n",
1117 		(unsigned long)(stop_tv.tv_sec - start_tv.tv_sec),
1118 		(unsigned long)(stop_tv.tv_usec - start_tv.tv_usec),
1119 		scnt, cb->size, cycle_iters,
1120 		(unsigned long long)sum_post, (unsigned long long)sum_poll,
1121 		(unsigned long long)sum_last_poll);
1122 	kfree(post_cycles_start);
1123 	kfree(post_cycles_stop);
1124 	kfree(poll_cycles_start);
1125 	kfree(poll_cycles_stop);
1126 	kfree(last_poll_cycles_start);
1127 }
1128 
1129 static void bw_test(struct krping_cb *cb)
1130 {
1131 	int ccnt, scnt, rcnt;
1132 	int iters=cb->count;
1133 	struct timeval start_tv, stop_tv;
1134 	cycles_t *post_cycles_start, *post_cycles_stop;
1135 	cycles_t *poll_cycles_start, *poll_cycles_stop;
1136 	cycles_t *last_poll_cycles_start;
1137 	cycles_t sum_poll = 0, sum_post = 0, sum_last_poll = 0;
1138 	int i;
1139 	int cycle_iters = 1000;
1140 
1141 	ccnt = 0;
1142 	scnt = 0;
1143 	rcnt = 0;
1144 
1145 	post_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1146 	if (!post_cycles_start) {
1147 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1148 		return;
1149 	}
1150 	post_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1151 	if (!post_cycles_stop) {
1152 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1153 		return;
1154 	}
1155 	poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1156 	if (!poll_cycles_start) {
1157 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1158 		return;
1159 	}
1160 	poll_cycles_stop = kmalloc(cycle_iters * sizeof(cycles_t), GFP_KERNEL);
1161 	if (!poll_cycles_stop) {
1162 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1163 		return;
1164 	}
1165 	last_poll_cycles_start = kmalloc(cycle_iters * sizeof(cycles_t),
1166 		GFP_KERNEL);
1167 	if (!last_poll_cycles_start) {
1168 		printk(KERN_ERR PFX "%s kmalloc failed\n", __FUNCTION__);
1169 		return;
1170 	}
1171 	cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_WRITE;
1172 	cb->rdma_sq_wr.rkey = cb->remote_rkey;
1173 	cb->rdma_sq_wr.remote_addr = cb->remote_addr;
1174 	cb->rdma_sq_wr.wr.sg_list->length = cb->size;
1175 
1176 	if (cycle_iters > iters)
1177 		cycle_iters = iters;
1178 	microtime(&start_tv);
1179 	while (scnt < iters || ccnt < iters) {
1180 
1181 		while (scnt < iters && scnt - ccnt < cb->txdepth) {
1182 			struct ib_send_wr *bad_wr;
1183 
1184 			if (scnt < cycle_iters)
1185 				post_cycles_start[scnt] = get_cycles();
1186 			if (ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr)) {
1187 				printk(KERN_ERR PFX
1188 					"Couldn't post send: scnt=%d\n",
1189 					scnt);
1190 				return;
1191 			}
1192 			if (scnt < cycle_iters)
1193 				post_cycles_stop[scnt] = get_cycles();
1194 			++scnt;
1195 		}
1196 
1197 		if (ccnt < iters) {
1198 			int ne;
1199 			struct ib_wc wc;
1200 
1201 			if (ccnt < cycle_iters)
1202 				poll_cycles_start[ccnt] = get_cycles();
1203 			do {
1204 				if (ccnt < cycle_iters)
1205 					last_poll_cycles_start[ccnt] =
1206 						get_cycles();
1207 				ne = ib_poll_cq(cb->cq, 1, &wc);
1208 			} while (ne == 0);
1209 			if (ccnt < cycle_iters)
1210 				poll_cycles_stop[ccnt] = get_cycles();
1211 			ccnt += 1;
1212 
1213 			if (ne < 0) {
1214 				printk(KERN_ERR PFX "poll CQ failed %d\n", ne);
1215 				return;
1216 			}
1217 			if (wc.status != IB_WC_SUCCESS) {
1218 				printk(KERN_ERR PFX
1219 					"Completion wth error at %s:\n",
1220 					cb->server ? "server" : "client");
1221 				printk(KERN_ERR PFX
1222 					"Failed status %d: wr_id %d\n",
1223 					wc.status, (int) wc.wr_id);
1224 				return;
1225 			}
1226 		}
1227 	}
1228 	microtime(&stop_tv);
1229 
1230         if (stop_tv.tv_usec < start_tv.tv_usec) {
1231                 stop_tv.tv_usec += 1000000;
1232                 stop_tv.tv_sec  -= 1;
1233         }
1234 
1235 	for (i=0; i < cycle_iters; i++) {
1236 		sum_post += post_cycles_stop[i] - post_cycles_start[i];
1237 		sum_poll += poll_cycles_stop[i] - poll_cycles_start[i];
1238 		sum_last_poll += poll_cycles_stop[i]-last_poll_cycles_start[i];
1239 	}
1240 	printk(KERN_ERR PFX
1241 		"delta sec %lu delta usec %lu iter %d size %d cycle_iters %d"
1242 		" sum_post %llu sum_poll %llu sum_last_poll %llu\n",
1243 		(unsigned long)(stop_tv.tv_sec - start_tv.tv_sec),
1244 		(unsigned long)(stop_tv.tv_usec - start_tv.tv_usec),
1245 		scnt, cb->size, cycle_iters,
1246 		(unsigned long long)sum_post, (unsigned long long)sum_poll,
1247 		(unsigned long long)sum_last_poll);
1248 	kfree(post_cycles_start);
1249 	kfree(post_cycles_stop);
1250 	kfree(poll_cycles_start);
1251 	kfree(poll_cycles_stop);
1252 	kfree(last_poll_cycles_start);
1253 }
1254 
1255 static void krping_rlat_test_server(struct krping_cb *cb)
1256 {
1257 	struct ib_send_wr *bad_wr;
1258 	struct ib_wc wc;
1259 	int ret;
1260 
1261 	/* Spin waiting for client's Start STAG/TO/Len */
1262 	while (cb->state < RDMA_READ_ADV) {
1263 		krping_cq_event_handler(cb->cq, cb);
1264 	}
1265 
1266 	/* Send STAG/TO/Len to client */
1267 	krping_format_send(cb, cb->start_dma_addr);
1268 	ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1269 	if (ret) {
1270 		printk(KERN_ERR PFX "post send error %d\n", ret);
1271 		return;
1272 	}
1273 
1274 	/* Spin waiting for send completion */
1275 	while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1276 	if (ret < 0) {
1277 		printk(KERN_ERR PFX "poll error %d\n", ret);
1278 		return;
1279 	}
1280 	if (wc.status) {
1281 		printk(KERN_ERR PFX "send completiong error %d\n", wc.status);
1282 		return;
1283 	}
1284 
1285 	wait_event_interruptible(cb->sem, cb->state == ERROR);
1286 }
1287 
1288 static void krping_wlat_test_server(struct krping_cb *cb)
1289 {
1290 	struct ib_send_wr *bad_wr;
1291 	struct ib_wc wc;
1292 	int ret;
1293 
1294 	/* Spin waiting for client's Start STAG/TO/Len */
1295 	while (cb->state < RDMA_READ_ADV) {
1296 		krping_cq_event_handler(cb->cq, cb);
1297 	}
1298 
1299 	/* Send STAG/TO/Len to client */
1300 	krping_format_send(cb, cb->start_dma_addr);
1301 	ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1302 	if (ret) {
1303 		printk(KERN_ERR PFX "post send error %d\n", ret);
1304 		return;
1305 	}
1306 
1307 	/* Spin waiting for send completion */
1308 	while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1309 	if (ret < 0) {
1310 		printk(KERN_ERR PFX "poll error %d\n", ret);
1311 		return;
1312 	}
1313 	if (wc.status) {
1314 		printk(KERN_ERR PFX "send completiong error %d\n", wc.status);
1315 		return;
1316 	}
1317 
1318 	wlat_test(cb);
1319 	wait_event_interruptible(cb->sem, cb->state == ERROR);
1320 }
1321 
1322 static void krping_bw_test_server(struct krping_cb *cb)
1323 {
1324 	struct ib_send_wr *bad_wr;
1325 	struct ib_wc wc;
1326 	int ret;
1327 
1328 	/* Spin waiting for client's Start STAG/TO/Len */
1329 	while (cb->state < RDMA_READ_ADV) {
1330 		krping_cq_event_handler(cb->cq, cb);
1331 	}
1332 
1333 	/* Send STAG/TO/Len to client */
1334 	krping_format_send(cb, cb->start_dma_addr);
1335 	ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1336 	if (ret) {
1337 		printk(KERN_ERR PFX "post send error %d\n", ret);
1338 		return;
1339 	}
1340 
1341 	/* Spin waiting for send completion */
1342 	while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1343 	if (ret < 0) {
1344 		printk(KERN_ERR PFX "poll error %d\n", ret);
1345 		return;
1346 	}
1347 	if (wc.status) {
1348 		printk(KERN_ERR PFX "send completiong error %d\n", wc.status);
1349 		return;
1350 	}
1351 
1352 	if (cb->duplex)
1353 		bw_test(cb);
1354 	wait_event_interruptible(cb->sem, cb->state == ERROR);
1355 }
1356 
1357 static int reg_supported(struct ib_device *dev)
1358 {
1359 	u64 needed_flags = IB_DEVICE_MEM_MGT_EXTENSIONS;
1360 
1361 	if ((dev->attrs.device_cap_flags & needed_flags) != needed_flags) {
1362 		printk(KERN_ERR PFX
1363 			"Fastreg not supported - device_cap_flags 0x%llx\n",
1364 			(unsigned long long)dev->attrs.device_cap_flags);
1365 		return 0;
1366 	}
1367 	DEBUG_LOG("Fastreg supported - device_cap_flags 0x%llx\n",
1368 		(unsigned long long)dev->attrs.device_cap_flags);
1369 	return 1;
1370 }
1371 
1372 static void fill_sockaddr(struct sockaddr_storage *sin, struct krping_cb *cb)
1373 {
1374 	memset(sin, 0, sizeof(*sin));
1375 
1376 	if (cb->addr_type == AF_INET) {
1377 		struct sockaddr_in *sin4 = (struct sockaddr_in *)sin;
1378 		sin4->sin_len = sizeof(*sin4);
1379 		sin4->sin_family = AF_INET;
1380 		memcpy((void *)&sin4->sin_addr.s_addr, cb->addr, 4);
1381 		sin4->sin_port = cb->port;
1382 	} else if (cb->addr_type == AF_INET6) {
1383 		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sin;
1384 		sin6->sin6_len = sizeof(*sin6);
1385 		sin6->sin6_family = AF_INET6;
1386 		memcpy((void *)&sin6->sin6_addr, cb->addr, 16);
1387 		sin6->sin6_port = cb->port;
1388 	}
1389 }
1390 
1391 static int krping_bind_server(struct krping_cb *cb)
1392 {
1393 	struct sockaddr_storage sin;
1394 	int ret;
1395 
1396 
1397 	fill_sockaddr(&sin, cb);
1398 
1399 	ret = rdma_bind_addr(cb->cm_id, (struct sockaddr *)&sin);
1400 	if (ret) {
1401 		printk(KERN_ERR PFX "rdma_bind_addr error %d\n", ret);
1402 		return ret;
1403 	}
1404 	DEBUG_LOG("rdma_bind_addr successful\n");
1405 
1406 	DEBUG_LOG("rdma_listen\n");
1407 	ret = rdma_listen(cb->cm_id, 3);
1408 	if (ret) {
1409 		printk(KERN_ERR PFX "rdma_listen failed: %d\n", ret);
1410 		return ret;
1411 	}
1412 
1413 	wait_event_interruptible(cb->sem, cb->state >= CONNECT_REQUEST);
1414 	if (cb->state != CONNECT_REQUEST) {
1415 		printk(KERN_ERR PFX "wait for CONNECT_REQUEST state %d\n",
1416 			cb->state);
1417 		return -1;
1418 	}
1419 
1420 	if (!reg_supported(cb->child_cm_id->device))
1421 		return -EINVAL;
1422 
1423 	return 0;
1424 }
1425 
1426 static void krping_run_server(struct krping_cb *cb)
1427 {
1428 	struct ib_recv_wr *bad_wr;
1429 	int ret;
1430 
1431 	ret = krping_bind_server(cb);
1432 	if (ret)
1433 		return;
1434 
1435 	ret = krping_setup_qp(cb, cb->child_cm_id);
1436 	if (ret) {
1437 		printk(KERN_ERR PFX "setup_qp failed: %d\n", ret);
1438 		goto err0;
1439 	}
1440 
1441 	ret = krping_setup_buffers(cb);
1442 	if (ret) {
1443 		printk(KERN_ERR PFX "krping_setup_buffers failed: %d\n", ret);
1444 		goto err1;
1445 	}
1446 
1447 	ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr);
1448 	if (ret) {
1449 		printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
1450 		goto err2;
1451 	}
1452 
1453 	ret = krping_accept(cb);
1454 	if (ret) {
1455 		printk(KERN_ERR PFX "connect error %d\n", ret);
1456 		goto err2;
1457 	}
1458 
1459 	if (cb->wlat)
1460 		krping_wlat_test_server(cb);
1461 	else if (cb->rlat)
1462 		krping_rlat_test_server(cb);
1463 	else if (cb->bw)
1464 		krping_bw_test_server(cb);
1465 	else
1466 		krping_test_server(cb);
1467 	rdma_disconnect(cb->child_cm_id);
1468 err2:
1469 	krping_free_buffers(cb);
1470 err1:
1471 	krping_free_qp(cb);
1472 err0:
1473 	rdma_destroy_id(cb->child_cm_id);
1474 }
1475 
1476 static void krping_test_client(struct krping_cb *cb)
1477 {
1478 	int ping, start, cc, i, ret;
1479 	struct ib_send_wr *bad_wr;
1480 	unsigned char c;
1481 
1482 	start = 65;
1483 	for (ping = 0; !cb->count || ping < cb->count; ping++) {
1484 		cb->state = RDMA_READ_ADV;
1485 
1486 		/* Put some ascii text in the buffer. */
1487 		cc = sprintf(cb->start_buf, "rdma-ping-%d: ", ping);
1488 		for (i = cc, c = start; i < cb->size; i++) {
1489 			cb->start_buf[i] = c;
1490 			c++;
1491 			if (c > 122)
1492 				c = 65;
1493 		}
1494 		start++;
1495 		if (start > 122)
1496 			start = 65;
1497 		cb->start_buf[cb->size - 1] = 0;
1498 
1499 		krping_format_send(cb, cb->start_dma_addr);
1500 		if (cb->state == ERROR) {
1501 			printk(KERN_ERR PFX "krping_format_send failed\n");
1502 			break;
1503 		}
1504 		ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1505 		if (ret) {
1506 			printk(KERN_ERR PFX "post send error %d\n", ret);
1507 			break;
1508 		}
1509 
1510 		/* Wait for server to ACK */
1511 		wait_event_interruptible(cb->sem, cb->state >= RDMA_WRITE_ADV);
1512 		if (cb->state != RDMA_WRITE_ADV) {
1513 			printk(KERN_ERR PFX
1514 			       "wait for RDMA_WRITE_ADV state %d\n",
1515 			       cb->state);
1516 			break;
1517 		}
1518 
1519 		krping_format_send(cb, cb->rdma_dma_addr);
1520 		ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1521 		if (ret) {
1522 			printk(KERN_ERR PFX "post send error %d\n", ret);
1523 			break;
1524 		}
1525 
1526 		/* Wait for the server to say the RDMA Write is complete. */
1527 		wait_event_interruptible(cb->sem,
1528 					 cb->state >= RDMA_WRITE_COMPLETE);
1529 		if (cb->state != RDMA_WRITE_COMPLETE) {
1530 			printk(KERN_ERR PFX
1531 			       "wait for RDMA_WRITE_COMPLETE state %d\n",
1532 			       cb->state);
1533 			break;
1534 		}
1535 
1536 		if (cb->validate)
1537 			if (memcmp(cb->start_buf, cb->rdma_buf, cb->size)) {
1538 				printk(KERN_ERR PFX "data mismatch!\n");
1539 				break;
1540 			}
1541 
1542 		if (cb->verbose)
1543 			printk(KERN_INFO PFX "ping data: %s\n", cb->rdma_buf);
1544 #ifdef SLOW_KRPING
1545 		wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
1546 #endif
1547 	}
1548 }
1549 
1550 static void krping_rlat_test_client(struct krping_cb *cb)
1551 {
1552 	struct ib_send_wr *bad_wr;
1553 	struct ib_wc wc;
1554 	int ret;
1555 
1556 	cb->state = RDMA_READ_ADV;
1557 
1558 	/* Send STAG/TO/Len to client */
1559 	krping_format_send(cb, cb->start_dma_addr);
1560 	if (cb->state == ERROR) {
1561 		printk(KERN_ERR PFX "krping_format_send failed\n");
1562 		return;
1563 	}
1564 	ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1565 	if (ret) {
1566 		printk(KERN_ERR PFX "post send error %d\n", ret);
1567 		return;
1568 	}
1569 
1570 	/* Spin waiting for send completion */
1571 	while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1572 	if (ret < 0) {
1573 		printk(KERN_ERR PFX "poll error %d\n", ret);
1574 		return;
1575 	}
1576 	if (wc.status) {
1577 		printk(KERN_ERR PFX "send completion error %d\n", wc.status);
1578 		return;
1579 	}
1580 
1581 	/* Spin waiting for server's Start STAG/TO/Len */
1582 	while (cb->state < RDMA_WRITE_ADV) {
1583 		krping_cq_event_handler(cb->cq, cb);
1584 	}
1585 
1586 #if 0
1587 {
1588 	int i;
1589 	struct timeval start, stop;
1590 	time_t sec;
1591 	suseconds_t usec;
1592 	unsigned long long elapsed;
1593 	struct ib_wc wc;
1594 	struct ib_send_wr *bad_wr;
1595 	int ne;
1596 
1597 	cb->rdma_sq_wr.wr.opcode = IB_WR_RDMA_WRITE;
1598 	cb->rdma_sq_wr.rkey = cb->remote_rkey;
1599 	cb->rdma_sq_wr.remote_addr = cb->remote_addr;
1600 	cb->rdma_sq_wr.wr.sg_list->length = 0;
1601 	cb->rdma_sq_wr.wr.num_sge = 0;
1602 
1603 	microtime(&start);
1604 	for (i=0; i < 100000; i++) {
1605 		if (ib_post_send(cb->qp, &cb->rdma_sq_wr.wr, &bad_wr)) {
1606 			printk(KERN_ERR PFX  "Couldn't post send\n");
1607 			return;
1608 		}
1609 		do {
1610 			ne = ib_poll_cq(cb->cq, 1, &wc);
1611 		} while (ne == 0);
1612 		if (ne < 0) {
1613 			printk(KERN_ERR PFX "poll CQ failed %d\n", ne);
1614 			return;
1615 		}
1616 		if (wc.status != IB_WC_SUCCESS) {
1617 			printk(KERN_ERR PFX "Completion wth error at %s:\n",
1618 				cb->server ? "server" : "client");
1619 			printk(KERN_ERR PFX "Failed status %d: wr_id %d\n",
1620 				wc.status, (int) wc.wr_id);
1621 			return;
1622 		}
1623 	}
1624 	microtime(&stop);
1625 
1626 	if (stop.tv_usec < start.tv_usec) {
1627 		stop.tv_usec += 1000000;
1628 		stop.tv_sec  -= 1;
1629 	}
1630 	sec     = stop.tv_sec - start.tv_sec;
1631 	usec    = stop.tv_usec - start.tv_usec;
1632 	elapsed = sec * 1000000 + usec;
1633 	printk(KERN_ERR PFX "0B-write-lat iters 100000 usec %llu\n", elapsed);
1634 }
1635 #endif
1636 
1637 	rlat_test(cb);
1638 }
1639 
1640 static void krping_wlat_test_client(struct krping_cb *cb)
1641 {
1642 	struct ib_send_wr *bad_wr;
1643 	struct ib_wc wc;
1644 	int ret;
1645 
1646 	cb->state = RDMA_READ_ADV;
1647 
1648 	/* Send STAG/TO/Len to client */
1649 	krping_format_send(cb, cb->start_dma_addr);
1650 	if (cb->state == ERROR) {
1651 		printk(KERN_ERR PFX "krping_format_send failed\n");
1652 		return;
1653 	}
1654 	ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1655 	if (ret) {
1656 		printk(KERN_ERR PFX "post send error %d\n", ret);
1657 		return;
1658 	}
1659 
1660 	/* Spin waiting for send completion */
1661 	while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1662 	if (ret < 0) {
1663 		printk(KERN_ERR PFX "poll error %d\n", ret);
1664 		return;
1665 	}
1666 	if (wc.status) {
1667 		printk(KERN_ERR PFX "send completion error %d\n", wc.status);
1668 		return;
1669 	}
1670 
1671 	/* Spin waiting for server's Start STAG/TO/Len */
1672 	while (cb->state < RDMA_WRITE_ADV) {
1673 		krping_cq_event_handler(cb->cq, cb);
1674 	}
1675 
1676 	wlat_test(cb);
1677 }
1678 
1679 static void krping_bw_test_client(struct krping_cb *cb)
1680 {
1681 	struct ib_send_wr *bad_wr;
1682 	struct ib_wc wc;
1683 	int ret;
1684 
1685 	cb->state = RDMA_READ_ADV;
1686 
1687 	/* Send STAG/TO/Len to client */
1688 	krping_format_send(cb, cb->start_dma_addr);
1689 	if (cb->state == ERROR) {
1690 		printk(KERN_ERR PFX "krping_format_send failed\n");
1691 		return;
1692 	}
1693 	ret = ib_post_send(cb->qp, &cb->sq_wr, &bad_wr);
1694 	if (ret) {
1695 		printk(KERN_ERR PFX "post send error %d\n", ret);
1696 		return;
1697 	}
1698 
1699 	/* Spin waiting for send completion */
1700 	while ((ret = ib_poll_cq(cb->cq, 1, &wc) == 0));
1701 	if (ret < 0) {
1702 		printk(KERN_ERR PFX "poll error %d\n", ret);
1703 		return;
1704 	}
1705 	if (wc.status) {
1706 		printk(KERN_ERR PFX "send completion error %d\n", wc.status);
1707 		return;
1708 	}
1709 
1710 	/* Spin waiting for server's Start STAG/TO/Len */
1711 	while (cb->state < RDMA_WRITE_ADV) {
1712 		krping_cq_event_handler(cb->cq, cb);
1713 	}
1714 
1715 	bw_test(cb);
1716 }
1717 
1718 /*
1719  * Manual qp flush test
1720  */
1721 static void flush_qp(struct krping_cb *cb)
1722 {
1723 	struct ib_send_wr wr = { 0 }, *bad;
1724 	struct ib_recv_wr recv_wr = { 0 }, *recv_bad;
1725 	struct ib_wc wc;
1726 	int ret;
1727 	int flushed = 0;
1728 	int ccnt = 0;
1729 
1730 	rdma_disconnect(cb->cm_id);
1731 	DEBUG_LOG("disconnected!\n");
1732 
1733 	wr.opcode = IB_WR_SEND;
1734 	wr.wr_id = 0xdeadbeefcafebabe;
1735 	ret = ib_post_send(cb->qp, &wr, &bad);
1736 	if (ret) {
1737 		printk(KERN_ERR PFX "%s post_send failed ret %d\n", __func__, ret);
1738 		return;
1739 	}
1740 
1741 	recv_wr.wr_id = 0xcafebabedeadbeef;
1742 	ret = ib_post_recv(cb->qp, &recv_wr, &recv_bad);
1743 	if (ret) {
1744 		printk(KERN_ERR PFX "%s post_recv failed ret %d\n", __func__, ret);
1745 		return;
1746 	}
1747 
1748 	/* poll until the flush WRs complete */
1749 	do {
1750 		ret = ib_poll_cq(cb->cq, 1, &wc);
1751 		if (ret < 0) {
1752 			printk(KERN_ERR PFX "ib_poll_cq failed %d\n", ret);
1753 			return;
1754 		}
1755 		if (ret == 0)
1756 			continue;
1757 		ccnt++;
1758 		if (wc.wr_id == 0xdeadbeefcafebabe ||
1759 		    wc.wr_id == 0xcafebabedeadbeef)
1760 			flushed++;
1761 	} while (flushed != 2);
1762 	DEBUG_LOG("qp_flushed! ccnt %u\n", ccnt);
1763 }
1764 
1765 static void krping_fr_test(struct krping_cb *cb)
1766 {
1767 	struct ib_send_wr inv, *bad;
1768 	struct ib_reg_wr fr;
1769 	struct ib_wc wc;
1770 	u8 key = 0;
1771 	struct ib_mr *mr;
1772 	int ret;
1773 	int size = cb->size;
1774 	int plen = (((size - 1) & PAGE_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
1775 	unsigned long start;
1776 	int count = 0;
1777 	int scnt = 0;
1778 	struct scatterlist sg = {0};
1779 
1780 	mr = ib_alloc_mr(cb->pd, IB_MR_TYPE_MEM_REG, plen);
1781 	if (IS_ERR(mr)) {
1782 		printk(KERN_ERR PFX "ib_alloc_mr failed %ld\n", PTR_ERR(mr));
1783 		return;
1784 	}
1785 
1786 	sg_dma_address(&sg) = (dma_addr_t)0xcafebabe0000ULL;
1787 	sg_dma_len(&sg) = size;
1788 	ret = ib_map_mr_sg(mr, &sg, 1, NULL, PAGE_SIZE);
1789 	if (ret <= 0) {
1790 		printk(KERN_ERR PFX "ib_map_mr_sge err %d\n", ret);
1791 		goto err2;
1792 	}
1793 
1794 	memset(&fr, 0, sizeof fr);
1795 	fr.wr.opcode = IB_WR_REG_MR;
1796 	fr.access = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE;
1797 	fr.mr = mr;
1798 	fr.wr.next = &inv;
1799 
1800 	memset(&inv, 0, sizeof inv);
1801 	inv.opcode = IB_WR_LOCAL_INV;
1802 	inv.send_flags = IB_SEND_SIGNALED;
1803 
1804 	DEBUG_LOG("fr_test: stag index 0x%x plen %u size %u depth %u\n", mr->rkey >> 8, plen, cb->size, cb->txdepth);
1805 	start = time_uptime;
1806 	while (!cb->count || count <= cb->count) {
1807 		if (SIGPENDING(curthread)) {
1808 			printk(KERN_ERR PFX "signal!\n");
1809 			break;
1810 		}
1811 		if ((time_uptime - start) >= 9) {
1812 			DEBUG_LOG("fr_test: pausing 1 second! count %u latest size %u plen %u\n", count, size, plen);
1813 			wait_event_interruptible_timeout(cb->sem, cb->state == ERROR, HZ);
1814 			if (cb->state == ERROR)
1815 				break;
1816 			start = time_uptime;
1817 		}
1818 		while (scnt < (cb->txdepth>>1)) {
1819 			ib_update_fast_reg_key(mr, ++key);
1820 			fr.key = mr->rkey;
1821 			inv.ex.invalidate_rkey = mr->rkey;
1822 
1823 			size = arc4random() % cb->size;
1824 			if (size == 0)
1825 				size = cb->size;
1826 			sg_dma_len(&sg) = size;
1827 			ret = ib_map_mr_sg(mr, &sg, 1, NULL, PAGE_SIZE);
1828 			if (ret <= 0) {
1829 				printk(KERN_ERR PFX "ib_map_mr_sge err %d\n", ret);
1830 				goto err2;
1831 			}
1832 			ret = ib_post_send(cb->qp, &fr.wr, &bad);
1833 			if (ret) {
1834 				printk(KERN_ERR PFX "ib_post_send failed %d\n", ret);
1835 				goto err2;
1836 			}
1837 			scnt++;
1838 		}
1839 
1840 		ret = ib_poll_cq(cb->cq, 1, &wc);
1841 		if (ret < 0) {
1842 			printk(KERN_ERR PFX "ib_poll_cq failed %d\n", ret);
1843 			goto err2;
1844 		}
1845 		if (ret == 1) {
1846 			if (wc.status) {
1847 				printk(KERN_ERR PFX "completion error %u\n", wc.status);
1848 				goto err2;
1849 			}
1850 			count++;
1851 			scnt--;
1852 		}
1853 	}
1854 err2:
1855 	flush_qp(cb);
1856 	DEBUG_LOG("fr_test: done!\n");
1857 	ib_dereg_mr(mr);
1858 }
1859 
1860 static int krping_connect_client(struct krping_cb *cb)
1861 {
1862 	struct rdma_conn_param conn_param;
1863 	int ret;
1864 
1865 	memset(&conn_param, 0, sizeof conn_param);
1866 	conn_param.responder_resources = 1;
1867 	conn_param.initiator_depth = 1;
1868 	conn_param.retry_count = 10;
1869 
1870 	ret = rdma_connect(cb->cm_id, &conn_param);
1871 	if (ret) {
1872 		printk(KERN_ERR PFX "rdma_connect error %d\n", ret);
1873 		return ret;
1874 	}
1875 
1876 	wait_event_interruptible(cb->sem, cb->state >= CONNECTED);
1877 	if (cb->state == ERROR) {
1878 		printk(KERN_ERR PFX "wait for CONNECTED state %d\n", cb->state);
1879 		return -1;
1880 	}
1881 
1882 	DEBUG_LOG("rdma_connect successful\n");
1883 	return 0;
1884 }
1885 
1886 static int krping_bind_client(struct krping_cb *cb)
1887 {
1888 	struct sockaddr_storage sin;
1889 	int ret;
1890 
1891 	fill_sockaddr(&sin, cb);
1892 
1893 	ret = rdma_resolve_addr(cb->cm_id, NULL, (struct sockaddr *)&sin, 2000);
1894 	if (ret) {
1895 		printk(KERN_ERR PFX "rdma_resolve_addr error %d\n", ret);
1896 		return ret;
1897 	}
1898 
1899 	wait_event_interruptible(cb->sem, cb->state >= ROUTE_RESOLVED);
1900 	if (cb->state != ROUTE_RESOLVED) {
1901 		printk(KERN_ERR PFX
1902 		       "addr/route resolution did not resolve: state %d\n",
1903 		       cb->state);
1904 		return -EINTR;
1905 	}
1906 
1907 	if (!reg_supported(cb->cm_id->device))
1908 		return -EINVAL;
1909 
1910 	DEBUG_LOG("rdma_resolve_addr - rdma_resolve_route successful\n");
1911 	return 0;
1912 }
1913 
1914 static void krping_run_client(struct krping_cb *cb)
1915 {
1916 	struct ib_recv_wr *bad_wr;
1917 	int ret;
1918 
1919 	ret = krping_bind_client(cb);
1920 	if (ret)
1921 		return;
1922 
1923 	ret = krping_setup_qp(cb, cb->cm_id);
1924 	if (ret) {
1925 		printk(KERN_ERR PFX "setup_qp failed: %d\n", ret);
1926 		return;
1927 	}
1928 
1929 	ret = krping_setup_buffers(cb);
1930 	if (ret) {
1931 		printk(KERN_ERR PFX "krping_setup_buffers failed: %d\n", ret);
1932 		goto err1;
1933 	}
1934 
1935 	ret = ib_post_recv(cb->qp, &cb->rq_wr, &bad_wr);
1936 	if (ret) {
1937 		printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
1938 		goto err2;
1939 	}
1940 
1941 	ret = krping_connect_client(cb);
1942 	if (ret) {
1943 		printk(KERN_ERR PFX "connect error %d\n", ret);
1944 		goto err2;
1945 	}
1946 
1947 	if (cb->wlat)
1948 		krping_wlat_test_client(cb);
1949 	else if (cb->rlat)
1950 		krping_rlat_test_client(cb);
1951 	else if (cb->bw)
1952 		krping_bw_test_client(cb);
1953 	else if (cb->frtest)
1954 		krping_fr_test(cb);
1955 	else
1956 		krping_test_client(cb);
1957 	rdma_disconnect(cb->cm_id);
1958 err2:
1959 	krping_free_buffers(cb);
1960 err1:
1961 	krping_free_qp(cb);
1962 }
1963 
1964 static uint16_t
1965 krping_get_ipv6_scope_id(char *name)
1966 {
1967 	struct ifnet *ifp;
1968 	uint16_t retval;
1969 
1970 	if (name == NULL)
1971 		return (0);
1972 	CURVNET_SET_QUIET(TD_TO_VNET(curthread));
1973 	ifp = ifunit_ref(name);
1974 	CURVNET_RESTORE();
1975 	if (ifp == NULL)
1976 		return (0);
1977 	retval = ifp->if_index;
1978 	if_rele(ifp);
1979 	return (retval);
1980 }
1981 
1982 int krping_doit(char *cmd)
1983 {
1984 	struct krping_cb *cb;
1985 	int op;
1986 	int ret = 0;
1987 	char *optarg;
1988 	char *scope;
1989 	unsigned long optint;
1990 
1991 	cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1992 	if (!cb)
1993 		return -ENOMEM;
1994 
1995 	mutex_lock(&krping_mutex);
1996 	list_add_tail(&cb->list, &krping_cbs);
1997 	mutex_unlock(&krping_mutex);
1998 
1999 	cb->server = -1;
2000 	cb->state = IDLE;
2001 	cb->size = 64;
2002 	cb->txdepth = RPING_SQ_DEPTH;
2003 	init_waitqueue_head(&cb->sem);
2004 
2005 	while ((op = krping_getopt("krping", &cmd, krping_opts, NULL, &optarg,
2006 			      &optint)) != 0) {
2007 		switch (op) {
2008 		case 'a':
2009 			cb->addr_str = optarg;
2010 			cb->addr_type = AF_INET;
2011 			DEBUG_LOG("ipaddr (%s)\n", optarg);
2012 			if (inet_pton(AF_INET, optarg, cb->addr) != 1) {
2013 				printk(KERN_ERR PFX "bad addr string %s\n",
2014 				    optarg);
2015 				ret = EINVAL;
2016 			}
2017 			break;
2018 		case 'A':
2019 			cb->addr_str = optarg;
2020 			cb->addr_type = AF_INET6;
2021 			DEBUG_LOG("ipv6addr (%s)\n", optarg);
2022 			scope = strstr(optarg, "%");
2023 			/* extract scope ID, if any */
2024 			if (scope != NULL)
2025 				*scope++ = 0;
2026 			/* extract IPv6 network address */
2027 			if (inet_pton(AF_INET6, optarg, cb->addr) != 1) {
2028 				printk(KERN_ERR PFX "bad addr string %s\n",
2029 				    optarg);
2030 				ret = EINVAL;
2031 			} else if (IN6_IS_SCOPE_LINKLOCAL((struct in6_addr *)cb->addr) ||
2032 			    IN6_IS_ADDR_MC_INTFACELOCAL((struct in6_addr *)cb->addr)) {
2033 				uint16_t scope_id = krping_get_ipv6_scope_id(scope);
2034 				DEBUG_LOG("ipv6 scope ID = %d\n", scope_id);
2035 				cb->addr[2] = scope_id >> 8;
2036 				cb->addr[3] = scope_id & 0xFF;
2037 			}
2038 			break;
2039 		case 'p':
2040 			cb->port = htons(optint);
2041 			DEBUG_LOG("port %d\n", (int)optint);
2042 			break;
2043 		case 'P':
2044 			cb->poll = 1;
2045 			DEBUG_LOG("server\n");
2046 			break;
2047 		case 's':
2048 			cb->server = 1;
2049 			DEBUG_LOG("server\n");
2050 			break;
2051 		case 'c':
2052 			cb->server = 0;
2053 			DEBUG_LOG("client\n");
2054 			break;
2055 		case 'S':
2056 			cb->size = optint;
2057 			if ((cb->size < 1) ||
2058 			    (cb->size > RPING_BUFSIZE)) {
2059 				printk(KERN_ERR PFX "Invalid size %d "
2060 				       "(valid range is 1 to %d)\n",
2061 				       cb->size, RPING_BUFSIZE);
2062 				ret = EINVAL;
2063 			} else
2064 				DEBUG_LOG("size %d\n", (int)optint);
2065 			break;
2066 		case 'C':
2067 			cb->count = optint;
2068 			if (cb->count < 0) {
2069 				printk(KERN_ERR PFX "Invalid count %d\n",
2070 					cb->count);
2071 				ret = EINVAL;
2072 			} else
2073 				DEBUG_LOG("count %d\n", (int) cb->count);
2074 			break;
2075 		case 'v':
2076 			cb->verbose++;
2077 			DEBUG_LOG("verbose\n");
2078 			break;
2079 		case 'V':
2080 			cb->validate++;
2081 			DEBUG_LOG("validate data\n");
2082 			break;
2083 		case 'l':
2084 			cb->wlat++;
2085 			break;
2086 		case 'L':
2087 			cb->rlat++;
2088 			break;
2089 		case 'B':
2090 			cb->bw++;
2091 			break;
2092 		case 'd':
2093 			cb->duplex++;
2094 			break;
2095 		case 'I':
2096 			cb->server_invalidate = 1;
2097 			break;
2098 		case 'T':
2099 			cb->txdepth = optint;
2100 			DEBUG_LOG("txdepth %d\n", (int) cb->txdepth);
2101 			break;
2102 		case 'Z':
2103 			cb->local_dma_lkey = 1;
2104 			DEBUG_LOG("using local dma lkey\n");
2105 			break;
2106 		case 'R':
2107 			cb->read_inv = 1;
2108 			DEBUG_LOG("using read-with-inv\n");
2109 			break;
2110 		case 'f':
2111 			cb->frtest = 1;
2112 			DEBUG_LOG("fast-reg test!\n");
2113 			break;
2114 		default:
2115 			printk(KERN_ERR PFX "unknown opt %s\n", optarg);
2116 			ret = -EINVAL;
2117 			break;
2118 		}
2119 	}
2120 	if (ret)
2121 		goto out;
2122 
2123 	if (cb->server == -1) {
2124 		printk(KERN_ERR PFX "must be either client or server\n");
2125 		ret = -EINVAL;
2126 		goto out;
2127 	}
2128 
2129 	if (cb->server && cb->frtest) {
2130 		printk(KERN_ERR PFX "must be client to run frtest\n");
2131 		ret = -EINVAL;
2132 		goto out;
2133 	}
2134 
2135 	if ((cb->frtest + cb->bw + cb->rlat + cb->wlat) > 1) {
2136 		printk(KERN_ERR PFX "Pick only one test: fr, bw, rlat, wlat\n");
2137 		ret = -EINVAL;
2138 		goto out;
2139 	}
2140 
2141 	if (cb->wlat || cb->rlat || cb->bw) {
2142 		printk(KERN_ERR PFX "wlat, rlat, and bw tests only support mem_mode MR - which is no longer supported\n");
2143 		ret = -EINVAL;
2144 		goto out;
2145 	}
2146 
2147 	cb->cm_id = rdma_create_id(&init_net, krping_cma_event_handler, cb, RDMA_PS_TCP, IB_QPT_RC);
2148 	if (IS_ERR(cb->cm_id)) {
2149 		ret = PTR_ERR(cb->cm_id);
2150 		printk(KERN_ERR PFX "rdma_create_id error %d\n", ret);
2151 		goto out;
2152 	}
2153 	DEBUG_LOG("created cm_id %p\n", cb->cm_id);
2154 
2155 	if (cb->server)
2156 		krping_run_server(cb);
2157 	else
2158 		krping_run_client(cb);
2159 
2160 	DEBUG_LOG("destroy cm_id %p\n", cb->cm_id);
2161 	rdma_destroy_id(cb->cm_id);
2162 out:
2163 	mutex_lock(&krping_mutex);
2164 	list_del(&cb->list);
2165 	mutex_unlock(&krping_mutex);
2166 	kfree(cb);
2167 	return ret;
2168 }
2169 
2170 void
2171 krping_walk_cb_list(void (*f)(struct krping_stats *, void *), void *arg)
2172 {
2173 	struct krping_cb *cb;
2174 
2175 	mutex_lock(&krping_mutex);
2176 	list_for_each_entry(cb, &krping_cbs, list)
2177 	    (*f)(cb->pd ? &cb->stats : NULL, arg);
2178 	mutex_unlock(&krping_mutex);
2179 }
2180