xref: /linux/include/trace/events/rpcrdma.h (revision fd7d598270724cc787982ea48bbe17ad383a8b7f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
4  *
5  * Trace point definitions for the "rpcrdma" subsystem.
6  */
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM rpcrdma
9 
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
12 
13 #include <linux/scatterlist.h>
14 #include <linux/sunrpc/rpc_rdma_cid.h>
15 #include <linux/tracepoint.h>
16 #include <rdma/ib_cm.h>
17 
18 #include <trace/misc/rdma.h>
19 #include <trace/misc/sunrpc.h>
20 
21 /**
22  ** Event classes
23  **/
24 
25 DECLARE_EVENT_CLASS(rpcrdma_completion_class,
26 	TP_PROTO(
27 		const struct ib_wc *wc,
28 		const struct rpc_rdma_cid *cid
29 	),
30 
31 	TP_ARGS(wc, cid),
32 
33 	TP_STRUCT__entry(
34 		__field(u32, cq_id)
35 		__field(int, completion_id)
36 		__field(unsigned long, status)
37 		__field(unsigned int, vendor_err)
38 	),
39 
40 	TP_fast_assign(
41 		__entry->cq_id = cid->ci_queue_id;
42 		__entry->completion_id = cid->ci_completion_id;
43 		__entry->status = wc->status;
44 		if (wc->status)
45 			__entry->vendor_err = wc->vendor_err;
46 		else
47 			__entry->vendor_err = 0;
48 	),
49 
50 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
51 		__entry->cq_id, __entry->completion_id,
52 		rdma_show_wc_status(__entry->status),
53 		__entry->status, __entry->vendor_err
54 	)
55 );
56 
57 #define DEFINE_COMPLETION_EVENT(name)					\
58 		DEFINE_EVENT(rpcrdma_completion_class, name,		\
59 				TP_PROTO(				\
60 					const struct ib_wc *wc,		\
61 					const struct rpc_rdma_cid *cid	\
62 				),					\
63 				TP_ARGS(wc, cid))
64 
65 DECLARE_EVENT_CLASS(rpcrdma_send_completion_class,
66 	TP_PROTO(
67 		const struct ib_wc *wc,
68 		const struct rpc_rdma_cid *cid
69 	),
70 
71 	TP_ARGS(wc, cid),
72 
73 	TP_STRUCT__entry(
74 		__field(u32, cq_id)
75 		__field(int, completion_id)
76 	),
77 
78 	TP_fast_assign(
79 		__entry->cq_id = cid->ci_queue_id;
80 		__entry->completion_id = cid->ci_completion_id;
81 	),
82 
83 	TP_printk("cq.id=%u cid=%d",
84 		__entry->cq_id, __entry->completion_id
85 	)
86 );
87 
88 #define DEFINE_SEND_COMPLETION_EVENT(name)				\
89 		DEFINE_EVENT(rpcrdma_send_completion_class, name,	\
90 				TP_PROTO(				\
91 					const struct ib_wc *wc,		\
92 					const struct rpc_rdma_cid *cid	\
93 				),					\
94 				TP_ARGS(wc, cid))
95 
96 DECLARE_EVENT_CLASS(rpcrdma_send_flush_class,
97 	TP_PROTO(
98 		const struct ib_wc *wc,
99 		const struct rpc_rdma_cid *cid
100 	),
101 
102 	TP_ARGS(wc, cid),
103 
104 	TP_STRUCT__entry(
105 		__field(u32, cq_id)
106 		__field(int, completion_id)
107 		__field(unsigned long, status)
108 		__field(unsigned int, vendor_err)
109 	),
110 
111 	TP_fast_assign(
112 		__entry->cq_id = cid->ci_queue_id;
113 		__entry->completion_id = cid->ci_completion_id;
114 		__entry->status = wc->status;
115 		__entry->vendor_err = wc->vendor_err;
116 	),
117 
118 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
119 		__entry->cq_id, __entry->completion_id,
120 		rdma_show_wc_status(__entry->status),
121 		__entry->status, __entry->vendor_err
122 	)
123 );
124 
125 #define DEFINE_SEND_FLUSH_EVENT(name)					\
126 		DEFINE_EVENT(rpcrdma_send_flush_class, name,		\
127 				TP_PROTO(				\
128 					const struct ib_wc *wc,		\
129 					const struct rpc_rdma_cid *cid	\
130 				),					\
131 				TP_ARGS(wc, cid))
132 
133 DECLARE_EVENT_CLASS(rpcrdma_mr_completion_class,
134 	TP_PROTO(
135 		const struct ib_wc *wc,
136 		const struct rpc_rdma_cid *cid
137 	),
138 
139 	TP_ARGS(wc, cid),
140 
141 	TP_STRUCT__entry(
142 		__field(u32, cq_id)
143 		__field(int, completion_id)
144 		__field(unsigned long, status)
145 		__field(unsigned int, vendor_err)
146 	),
147 
148 	TP_fast_assign(
149 		__entry->cq_id = cid->ci_queue_id;
150 		__entry->completion_id = cid->ci_completion_id;
151 		__entry->status = wc->status;
152 		if (wc->status)
153 			__entry->vendor_err = wc->vendor_err;
154 		else
155 			__entry->vendor_err = 0;
156 	),
157 
158 	TP_printk("cq.id=%u mr.id=%d status=%s (%lu/0x%x)",
159 		__entry->cq_id, __entry->completion_id,
160 		rdma_show_wc_status(__entry->status),
161 		__entry->status, __entry->vendor_err
162 	)
163 );
164 
165 #define DEFINE_MR_COMPLETION_EVENT(name)				\
166 		DEFINE_EVENT(rpcrdma_mr_completion_class, name,		\
167 				TP_PROTO(				\
168 					const struct ib_wc *wc,		\
169 					const struct rpc_rdma_cid *cid	\
170 				),					\
171 				TP_ARGS(wc, cid))
172 
173 DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class,
174 	TP_PROTO(
175 		const struct ib_wc *wc,
176 		const struct rpc_rdma_cid *cid
177 	),
178 
179 	TP_ARGS(wc, cid),
180 
181 	TP_STRUCT__entry(
182 		__field(u32, cq_id)
183 		__field(int, completion_id)
184 		__field(u32, received)
185 		__field(unsigned long, status)
186 		__field(unsigned int, vendor_err)
187 	),
188 
189 	TP_fast_assign(
190 		__entry->cq_id = cid->ci_queue_id;
191 		__entry->completion_id = cid->ci_completion_id;
192 		__entry->status = wc->status;
193 		if (wc->status) {
194 			__entry->received = 0;
195 			__entry->vendor_err = wc->vendor_err;
196 		} else {
197 			__entry->received = wc->byte_len;
198 			__entry->vendor_err = 0;
199 		}
200 	),
201 
202 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x) received=%u",
203 		__entry->cq_id, __entry->completion_id,
204 		rdma_show_wc_status(__entry->status),
205 		__entry->status, __entry->vendor_err,
206 		__entry->received
207 	)
208 );
209 
210 #define DEFINE_RECEIVE_COMPLETION_EVENT(name)				\
211 		DEFINE_EVENT(rpcrdma_receive_completion_class, name,	\
212 				TP_PROTO(				\
213 					const struct ib_wc *wc,		\
214 					const struct rpc_rdma_cid *cid	\
215 				),					\
216 				TP_ARGS(wc, cid))
217 
218 DECLARE_EVENT_CLASS(rpcrdma_receive_success_class,
219 	TP_PROTO(
220 		const struct ib_wc *wc,
221 		const struct rpc_rdma_cid *cid
222 	),
223 
224 	TP_ARGS(wc, cid),
225 
226 	TP_STRUCT__entry(
227 		__field(u32, cq_id)
228 		__field(int, completion_id)
229 		__field(u32, received)
230 	),
231 
232 	TP_fast_assign(
233 		__entry->cq_id = cid->ci_queue_id;
234 		__entry->completion_id = cid->ci_completion_id;
235 		__entry->received = wc->byte_len;
236 	),
237 
238 	TP_printk("cq.id=%u cid=%d received=%u",
239 		__entry->cq_id, __entry->completion_id,
240 		__entry->received
241 	)
242 );
243 
244 #define DEFINE_RECEIVE_SUCCESS_EVENT(name)				\
245 		DEFINE_EVENT(rpcrdma_receive_success_class, name,	\
246 				TP_PROTO(				\
247 					const struct ib_wc *wc,		\
248 					const struct rpc_rdma_cid *cid	\
249 				),					\
250 				TP_ARGS(wc, cid))
251 
252 DECLARE_EVENT_CLASS(rpcrdma_receive_flush_class,
253 	TP_PROTO(
254 		const struct ib_wc *wc,
255 		const struct rpc_rdma_cid *cid
256 	),
257 
258 	TP_ARGS(wc, cid),
259 
260 	TP_STRUCT__entry(
261 		__field(u32, cq_id)
262 		__field(int, completion_id)
263 		__field(unsigned long, status)
264 		__field(unsigned int, vendor_err)
265 	),
266 
267 	TP_fast_assign(
268 		__entry->cq_id = cid->ci_queue_id;
269 		__entry->completion_id = cid->ci_completion_id;
270 		__entry->status = wc->status;
271 		__entry->vendor_err = wc->vendor_err;
272 	),
273 
274 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
275 		__entry->cq_id, __entry->completion_id,
276 		rdma_show_wc_status(__entry->status),
277 		__entry->status, __entry->vendor_err
278 	)
279 );
280 
281 #define DEFINE_RECEIVE_FLUSH_EVENT(name)				\
282 		DEFINE_EVENT(rpcrdma_receive_flush_class, name,		\
283 				TP_PROTO(				\
284 					const struct ib_wc *wc,		\
285 					const struct rpc_rdma_cid *cid	\
286 				),					\
287 				TP_ARGS(wc, cid))
288 
289 DECLARE_EVENT_CLASS(xprtrdma_reply_class,
290 	TP_PROTO(
291 		const struct rpcrdma_rep *rep
292 	),
293 
294 	TP_ARGS(rep),
295 
296 	TP_STRUCT__entry(
297 		__field(u32, xid)
298 		__field(u32, version)
299 		__field(u32, proc)
300 		__string(addr, rpcrdma_addrstr(rep->rr_rxprt))
301 		__string(port, rpcrdma_portstr(rep->rr_rxprt))
302 	),
303 
304 	TP_fast_assign(
305 		__entry->xid = be32_to_cpu(rep->rr_xid);
306 		__entry->version = be32_to_cpu(rep->rr_vers);
307 		__entry->proc = be32_to_cpu(rep->rr_proc);
308 		__assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt));
309 		__assign_str(port, rpcrdma_portstr(rep->rr_rxprt));
310 	),
311 
312 	TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
313 		__get_str(addr), __get_str(port),
314 		__entry->xid, __entry->version, __entry->proc
315 	)
316 );
317 
318 #define DEFINE_REPLY_EVENT(name)					\
319 		DEFINE_EVENT(xprtrdma_reply_class,			\
320 				xprtrdma_reply_##name##_err,		\
321 				TP_PROTO(				\
322 					const struct rpcrdma_rep *rep	\
323 				),					\
324 				TP_ARGS(rep))
325 
326 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
327 	TP_PROTO(
328 		const struct rpcrdma_xprt *r_xprt
329 	),
330 
331 	TP_ARGS(r_xprt),
332 
333 	TP_STRUCT__entry(
334 		__string(addr, rpcrdma_addrstr(r_xprt))
335 		__string(port, rpcrdma_portstr(r_xprt))
336 	),
337 
338 	TP_fast_assign(
339 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
340 		__assign_str(port, rpcrdma_portstr(r_xprt));
341 	),
342 
343 	TP_printk("peer=[%s]:%s",
344 		__get_str(addr), __get_str(port)
345 	)
346 );
347 
348 #define DEFINE_RXPRT_EVENT(name)					\
349 		DEFINE_EVENT(xprtrdma_rxprt, name,			\
350 				TP_PROTO(				\
351 					const struct rpcrdma_xprt *r_xprt \
352 				),					\
353 				TP_ARGS(r_xprt))
354 
355 DECLARE_EVENT_CLASS(xprtrdma_connect_class,
356 	TP_PROTO(
357 		const struct rpcrdma_xprt *r_xprt,
358 		int rc
359 	),
360 
361 	TP_ARGS(r_xprt, rc),
362 
363 	TP_STRUCT__entry(
364 		__field(int, rc)
365 		__field(int, connect_status)
366 		__string(addr, rpcrdma_addrstr(r_xprt))
367 		__string(port, rpcrdma_portstr(r_xprt))
368 	),
369 
370 	TP_fast_assign(
371 		__entry->rc = rc;
372 		__entry->connect_status = r_xprt->rx_ep->re_connect_status;
373 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
374 		__assign_str(port, rpcrdma_portstr(r_xprt));
375 	),
376 
377 	TP_printk("peer=[%s]:%s rc=%d connection status=%d",
378 		__get_str(addr), __get_str(port),
379 		__entry->rc, __entry->connect_status
380 	)
381 );
382 
383 #define DEFINE_CONN_EVENT(name)						\
384 		DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name,	\
385 				TP_PROTO(				\
386 					const struct rpcrdma_xprt *r_xprt, \
387 					int rc				\
388 				),					\
389 				TP_ARGS(r_xprt, rc))
390 
391 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
392 	TP_PROTO(
393 		const struct rpc_task *task,
394 		unsigned int pos,
395 		struct rpcrdma_mr *mr,
396 		int nsegs
397 	),
398 
399 	TP_ARGS(task, pos, mr, nsegs),
400 
401 	TP_STRUCT__entry(
402 		__field(unsigned int, task_id)
403 		__field(unsigned int, client_id)
404 		__field(unsigned int, pos)
405 		__field(int, nents)
406 		__field(u32, handle)
407 		__field(u32, length)
408 		__field(u64, offset)
409 		__field(int, nsegs)
410 	),
411 
412 	TP_fast_assign(
413 		__entry->task_id = task->tk_pid;
414 		__entry->client_id = task->tk_client->cl_clid;
415 		__entry->pos = pos;
416 		__entry->nents = mr->mr_nents;
417 		__entry->handle = mr->mr_handle;
418 		__entry->length = mr->mr_length;
419 		__entry->offset = mr->mr_offset;
420 		__entry->nsegs = nsegs;
421 	),
422 
423 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
424 		  " pos=%u %u@0x%016llx:0x%08x (%s)",
425 		__entry->task_id, __entry->client_id,
426 		__entry->pos, __entry->length,
427 		(unsigned long long)__entry->offset, __entry->handle,
428 		__entry->nents < __entry->nsegs ? "more" : "last"
429 	)
430 );
431 
432 #define DEFINE_RDCH_EVENT(name)						\
433 		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
434 				TP_PROTO(				\
435 					const struct rpc_task *task,	\
436 					unsigned int pos,		\
437 					struct rpcrdma_mr *mr,		\
438 					int nsegs			\
439 				),					\
440 				TP_ARGS(task, pos, mr, nsegs))
441 
442 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
443 	TP_PROTO(
444 		const struct rpc_task *task,
445 		struct rpcrdma_mr *mr,
446 		int nsegs
447 	),
448 
449 	TP_ARGS(task, mr, nsegs),
450 
451 	TP_STRUCT__entry(
452 		__field(unsigned int, task_id)
453 		__field(unsigned int, client_id)
454 		__field(int, nents)
455 		__field(u32, handle)
456 		__field(u32, length)
457 		__field(u64, offset)
458 		__field(int, nsegs)
459 	),
460 
461 	TP_fast_assign(
462 		__entry->task_id = task->tk_pid;
463 		__entry->client_id = task->tk_client->cl_clid;
464 		__entry->nents = mr->mr_nents;
465 		__entry->handle = mr->mr_handle;
466 		__entry->length = mr->mr_length;
467 		__entry->offset = mr->mr_offset;
468 		__entry->nsegs = nsegs;
469 	),
470 
471 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
472 		  " %u@0x%016llx:0x%08x (%s)",
473 		__entry->task_id, __entry->client_id,
474 		__entry->length, (unsigned long long)__entry->offset,
475 		__entry->handle,
476 		__entry->nents < __entry->nsegs ? "more" : "last"
477 	)
478 );
479 
480 #define DEFINE_WRCH_EVENT(name)						\
481 		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
482 				TP_PROTO(				\
483 					const struct rpc_task *task,	\
484 					struct rpcrdma_mr *mr,		\
485 					int nsegs			\
486 				),					\
487 				TP_ARGS(task, mr, nsegs))
488 
489 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
490 TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
491 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
492 TRACE_DEFINE_ENUM(DMA_NONE);
493 
494 #define xprtrdma_show_direction(x)					\
495 		__print_symbolic(x,					\
496 				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
497 				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
498 				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
499 				{ DMA_NONE, "NONE" })
500 
501 DECLARE_EVENT_CLASS(xprtrdma_mr_class,
502 	TP_PROTO(
503 		const struct rpcrdma_mr *mr
504 	),
505 
506 	TP_ARGS(mr),
507 
508 	TP_STRUCT__entry(
509 		__field(unsigned int, task_id)
510 		__field(unsigned int, client_id)
511 		__field(u32, mr_id)
512 		__field(int, nents)
513 		__field(u32, handle)
514 		__field(u32, length)
515 		__field(u64, offset)
516 		__field(u32, dir)
517 	),
518 
519 	TP_fast_assign(
520 		const struct rpcrdma_req *req = mr->mr_req;
521 
522 		if (req) {
523 			const struct rpc_task *task = req->rl_slot.rq_task;
524 
525 			__entry->task_id = task->tk_pid;
526 			__entry->client_id = task->tk_client->cl_clid;
527 		} else {
528 			__entry->task_id = 0;
529 			__entry->client_id = -1;
530 		}
531 		__entry->mr_id  = mr->mr_ibmr->res.id;
532 		__entry->nents  = mr->mr_nents;
533 		__entry->handle = mr->mr_handle;
534 		__entry->length = mr->mr_length;
535 		__entry->offset = mr->mr_offset;
536 		__entry->dir    = mr->mr_dir;
537 	),
538 
539 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
540 		  " mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
541 		__entry->task_id, __entry->client_id,
542 		__entry->mr_id, __entry->nents, __entry->length,
543 		(unsigned long long)__entry->offset, __entry->handle,
544 		xprtrdma_show_direction(__entry->dir)
545 	)
546 );
547 
548 #define DEFINE_MR_EVENT(name)						\
549 		DEFINE_EVENT(xprtrdma_mr_class,				\
550 				xprtrdma_mr_##name,			\
551 				TP_PROTO(				\
552 					const struct rpcrdma_mr *mr	\
553 				),					\
554 				TP_ARGS(mr))
555 
556 DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class,
557 	TP_PROTO(
558 		const struct rpcrdma_mr *mr
559 	),
560 
561 	TP_ARGS(mr),
562 
563 	TP_STRUCT__entry(
564 		__field(u32, mr_id)
565 		__field(int, nents)
566 		__field(u32, handle)
567 		__field(u32, length)
568 		__field(u64, offset)
569 		__field(u32, dir)
570 	),
571 
572 	TP_fast_assign(
573 		__entry->mr_id  = mr->mr_ibmr->res.id;
574 		__entry->nents  = mr->mr_nents;
575 		__entry->handle = mr->mr_handle;
576 		__entry->length = mr->mr_length;
577 		__entry->offset = mr->mr_offset;
578 		__entry->dir    = mr->mr_dir;
579 	),
580 
581 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
582 		__entry->mr_id, __entry->nents, __entry->length,
583 		(unsigned long long)__entry->offset, __entry->handle,
584 		xprtrdma_show_direction(__entry->dir)
585 	)
586 );
587 
588 #define DEFINE_ANON_MR_EVENT(name)					\
589 		DEFINE_EVENT(xprtrdma_anonymous_mr_class,		\
590 				xprtrdma_mr_##name,			\
591 				TP_PROTO(				\
592 					const struct rpcrdma_mr *mr	\
593 				),					\
594 				TP_ARGS(mr))
595 
596 DECLARE_EVENT_CLASS(xprtrdma_callback_class,
597 	TP_PROTO(
598 		const struct rpcrdma_xprt *r_xprt,
599 		const struct rpc_rqst *rqst
600 	),
601 
602 	TP_ARGS(r_xprt, rqst),
603 
604 	TP_STRUCT__entry(
605 		__field(u32, xid)
606 		__string(addr, rpcrdma_addrstr(r_xprt))
607 		__string(port, rpcrdma_portstr(r_xprt))
608 	),
609 
610 	TP_fast_assign(
611 		__entry->xid = be32_to_cpu(rqst->rq_xid);
612 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
613 		__assign_str(port, rpcrdma_portstr(r_xprt));
614 	),
615 
616 	TP_printk("peer=[%s]:%s xid=0x%08x",
617 		__get_str(addr), __get_str(port), __entry->xid
618 	)
619 );
620 
621 #define DEFINE_CALLBACK_EVENT(name)					\
622 		DEFINE_EVENT(xprtrdma_callback_class,			\
623 				xprtrdma_cb_##name,			\
624 				TP_PROTO(				\
625 					const struct rpcrdma_xprt *r_xprt, \
626 					const struct rpc_rqst *rqst	\
627 				),					\
628 				TP_ARGS(r_xprt, rqst))
629 
630 /**
631  ** Connection events
632  **/
633 
634 TRACE_EVENT(xprtrdma_inline_thresh,
635 	TP_PROTO(
636 		const struct rpcrdma_ep *ep
637 	),
638 
639 	TP_ARGS(ep),
640 
641 	TP_STRUCT__entry(
642 		__field(unsigned int, inline_send)
643 		__field(unsigned int, inline_recv)
644 		__field(unsigned int, max_send)
645 		__field(unsigned int, max_recv)
646 		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
647 		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
648 	),
649 
650 	TP_fast_assign(
651 		const struct rdma_cm_id *id = ep->re_id;
652 
653 		__entry->inline_send = ep->re_inline_send;
654 		__entry->inline_recv = ep->re_inline_recv;
655 		__entry->max_send = ep->re_max_inline_send;
656 		__entry->max_recv = ep->re_max_inline_recv;
657 		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
658 		       sizeof(struct sockaddr_in6));
659 		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
660 		       sizeof(struct sockaddr_in6));
661 	),
662 
663 	TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
664 		__entry->srcaddr, __entry->dstaddr,
665 		__entry->inline_send, __entry->inline_recv,
666 		__entry->max_send, __entry->max_recv
667 	)
668 );
669 
670 DEFINE_CONN_EVENT(connect);
671 DEFINE_CONN_EVENT(disconnect);
672 
673 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
674 
675 TRACE_EVENT(xprtrdma_op_connect,
676 	TP_PROTO(
677 		const struct rpcrdma_xprt *r_xprt,
678 		unsigned long delay
679 	),
680 
681 	TP_ARGS(r_xprt, delay),
682 
683 	TP_STRUCT__entry(
684 		__field(unsigned long, delay)
685 		__string(addr, rpcrdma_addrstr(r_xprt))
686 		__string(port, rpcrdma_portstr(r_xprt))
687 	),
688 
689 	TP_fast_assign(
690 		__entry->delay = delay;
691 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
692 		__assign_str(port, rpcrdma_portstr(r_xprt));
693 	),
694 
695 	TP_printk("peer=[%s]:%s delay=%lu",
696 		__get_str(addr), __get_str(port), __entry->delay
697 	)
698 );
699 
700 
701 TRACE_EVENT(xprtrdma_op_set_cto,
702 	TP_PROTO(
703 		const struct rpcrdma_xprt *r_xprt,
704 		unsigned long connect,
705 		unsigned long reconnect
706 	),
707 
708 	TP_ARGS(r_xprt, connect, reconnect),
709 
710 	TP_STRUCT__entry(
711 		__field(unsigned long, connect)
712 		__field(unsigned long, reconnect)
713 		__string(addr, rpcrdma_addrstr(r_xprt))
714 		__string(port, rpcrdma_portstr(r_xprt))
715 	),
716 
717 	TP_fast_assign(
718 		__entry->connect = connect;
719 		__entry->reconnect = reconnect;
720 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
721 		__assign_str(port, rpcrdma_portstr(r_xprt));
722 	),
723 
724 	TP_printk("peer=[%s]:%s connect=%lu reconnect=%lu",
725 		__get_str(addr), __get_str(port),
726 		__entry->connect / HZ, __entry->reconnect / HZ
727 	)
728 );
729 
730 /**
731  ** Call events
732  **/
733 
734 TRACE_EVENT(xprtrdma_createmrs,
735 	TP_PROTO(
736 		const struct rpcrdma_xprt *r_xprt,
737 		unsigned int count
738 	),
739 
740 	TP_ARGS(r_xprt, count),
741 
742 	TP_STRUCT__entry(
743 		__string(addr, rpcrdma_addrstr(r_xprt))
744 		__string(port, rpcrdma_portstr(r_xprt))
745 		__field(unsigned int, count)
746 	),
747 
748 	TP_fast_assign(
749 		__entry->count = count;
750 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
751 		__assign_str(port, rpcrdma_portstr(r_xprt));
752 	),
753 
754 	TP_printk("peer=[%s]:%s created %u MRs",
755 		__get_str(addr), __get_str(port), __entry->count
756 	)
757 );
758 
759 TRACE_EVENT(xprtrdma_nomrs_err,
760 	TP_PROTO(
761 		const struct rpcrdma_xprt *r_xprt,
762 		const struct rpcrdma_req *req
763 	),
764 
765 	TP_ARGS(r_xprt, req),
766 
767 	TP_STRUCT__entry(
768 		__field(unsigned int, task_id)
769 		__field(unsigned int, client_id)
770 		__string(addr, rpcrdma_addrstr(r_xprt))
771 		__string(port, rpcrdma_portstr(r_xprt))
772 	),
773 
774 	TP_fast_assign(
775 		const struct rpc_rqst *rqst = &req->rl_slot;
776 
777 		__entry->task_id = rqst->rq_task->tk_pid;
778 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
779 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
780 		__assign_str(port, rpcrdma_portstr(r_xprt));
781 	),
782 
783 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " peer=[%s]:%s",
784 		__entry->task_id, __entry->client_id,
785 		__get_str(addr), __get_str(port)
786 	)
787 );
788 
789 DEFINE_RDCH_EVENT(read);
790 DEFINE_WRCH_EVENT(write);
791 DEFINE_WRCH_EVENT(reply);
792 DEFINE_WRCH_EVENT(wp);
793 
794 TRACE_DEFINE_ENUM(rpcrdma_noch);
795 TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
796 TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
797 TRACE_DEFINE_ENUM(rpcrdma_readch);
798 TRACE_DEFINE_ENUM(rpcrdma_areadch);
799 TRACE_DEFINE_ENUM(rpcrdma_writech);
800 TRACE_DEFINE_ENUM(rpcrdma_replych);
801 
802 #define xprtrdma_show_chunktype(x)					\
803 		__print_symbolic(x,					\
804 				{ rpcrdma_noch, "inline" },		\
805 				{ rpcrdma_noch_pullup, "pullup" },	\
806 				{ rpcrdma_noch_mapped, "mapped" },	\
807 				{ rpcrdma_readch, "read list" },	\
808 				{ rpcrdma_areadch, "*read list" },	\
809 				{ rpcrdma_writech, "write list" },	\
810 				{ rpcrdma_replych, "reply chunk" })
811 
812 TRACE_EVENT(xprtrdma_marshal,
813 	TP_PROTO(
814 		const struct rpcrdma_req *req,
815 		unsigned int rtype,
816 		unsigned int wtype
817 	),
818 
819 	TP_ARGS(req, rtype, wtype),
820 
821 	TP_STRUCT__entry(
822 		__field(unsigned int, task_id)
823 		__field(unsigned int, client_id)
824 		__field(u32, xid)
825 		__field(unsigned int, hdrlen)
826 		__field(unsigned int, headlen)
827 		__field(unsigned int, pagelen)
828 		__field(unsigned int, taillen)
829 		__field(unsigned int, rtype)
830 		__field(unsigned int, wtype)
831 	),
832 
833 	TP_fast_assign(
834 		const struct rpc_rqst *rqst = &req->rl_slot;
835 
836 		__entry->task_id = rqst->rq_task->tk_pid;
837 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
838 		__entry->xid = be32_to_cpu(rqst->rq_xid);
839 		__entry->hdrlen = req->rl_hdrbuf.len;
840 		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
841 		__entry->pagelen = rqst->rq_snd_buf.page_len;
842 		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
843 		__entry->rtype = rtype;
844 		__entry->wtype = wtype;
845 	),
846 
847 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
848 		  " xid=0x%08x hdr=%u xdr=%u/%u/%u %s/%s",
849 		__entry->task_id, __entry->client_id, __entry->xid,
850 		__entry->hdrlen,
851 		__entry->headlen, __entry->pagelen, __entry->taillen,
852 		xprtrdma_show_chunktype(__entry->rtype),
853 		xprtrdma_show_chunktype(__entry->wtype)
854 	)
855 );
856 
857 TRACE_EVENT(xprtrdma_marshal_failed,
858 	TP_PROTO(const struct rpc_rqst *rqst,
859 		 int ret
860 	),
861 
862 	TP_ARGS(rqst, ret),
863 
864 	TP_STRUCT__entry(
865 		__field(unsigned int, task_id)
866 		__field(unsigned int, client_id)
867 		__field(u32, xid)
868 		__field(int, ret)
869 	),
870 
871 	TP_fast_assign(
872 		__entry->task_id = rqst->rq_task->tk_pid;
873 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
874 		__entry->xid = be32_to_cpu(rqst->rq_xid);
875 		__entry->ret = ret;
876 	),
877 
878 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x ret=%d",
879 		__entry->task_id, __entry->client_id, __entry->xid,
880 		__entry->ret
881 	)
882 );
883 
884 TRACE_EVENT(xprtrdma_prepsend_failed,
885 	TP_PROTO(const struct rpc_rqst *rqst,
886 		 int ret
887 	),
888 
889 	TP_ARGS(rqst, ret),
890 
891 	TP_STRUCT__entry(
892 		__field(unsigned int, task_id)
893 		__field(unsigned int, client_id)
894 		__field(u32, xid)
895 		__field(int, ret)
896 	),
897 
898 	TP_fast_assign(
899 		__entry->task_id = rqst->rq_task->tk_pid;
900 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
901 		__entry->xid = be32_to_cpu(rqst->rq_xid);
902 		__entry->ret = ret;
903 	),
904 
905 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x ret=%d",
906 		__entry->task_id, __entry->client_id, __entry->xid,
907 		__entry->ret
908 	)
909 );
910 
911 TRACE_EVENT(xprtrdma_post_send,
912 	TP_PROTO(
913 		const struct rpcrdma_req *req
914 	),
915 
916 	TP_ARGS(req),
917 
918 	TP_STRUCT__entry(
919 		__field(u32, cq_id)
920 		__field(int, completion_id)
921 		__field(unsigned int, task_id)
922 		__field(unsigned int, client_id)
923 		__field(int, num_sge)
924 		__field(int, signaled)
925 	),
926 
927 	TP_fast_assign(
928 		const struct rpc_rqst *rqst = &req->rl_slot;
929 		const struct rpcrdma_sendctx *sc = req->rl_sendctx;
930 
931 		__entry->cq_id = sc->sc_cid.ci_queue_id;
932 		__entry->completion_id = sc->sc_cid.ci_completion_id;
933 		__entry->task_id = rqst->rq_task->tk_pid;
934 		__entry->client_id = rqst->rq_task->tk_client ?
935 				     rqst->rq_task->tk_client->cl_clid : -1;
936 		__entry->num_sge = req->rl_wr.num_sge;
937 		__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
938 	),
939 
940 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " cq.id=%u cid=%d (%d SGE%s) %s",
941 		__entry->task_id, __entry->client_id,
942 		__entry->cq_id, __entry->completion_id,
943 		__entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
944 		(__entry->signaled ? "signaled" : "")
945 	)
946 );
947 
948 TRACE_EVENT(xprtrdma_post_send_err,
949 	TP_PROTO(
950 		const struct rpcrdma_xprt *r_xprt,
951 		const struct rpcrdma_req *req,
952 		int rc
953 	),
954 
955 	TP_ARGS(r_xprt, req, rc),
956 
957 	TP_STRUCT__entry(
958 		__field(u32, cq_id)
959 		__field(unsigned int, task_id)
960 		__field(unsigned int, client_id)
961 		__field(int, rc)
962 	),
963 
964 	TP_fast_assign(
965 		const struct rpc_rqst *rqst = &req->rl_slot;
966 		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
967 
968 		__entry->cq_id = ep ? ep->re_attr.recv_cq->res.id : 0;
969 		__entry->task_id = rqst->rq_task->tk_pid;
970 		__entry->client_id = rqst->rq_task->tk_client ?
971 				     rqst->rq_task->tk_client->cl_clid : -1;
972 		__entry->rc = rc;
973 	),
974 
975 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " cq.id=%u rc=%d",
976 		__entry->task_id, __entry->client_id,
977 		__entry->cq_id, __entry->rc
978 	)
979 );
980 
981 TRACE_EVENT(xprtrdma_post_recv,
982 	TP_PROTO(
983 		const struct rpcrdma_rep *rep
984 	),
985 
986 	TP_ARGS(rep),
987 
988 	TP_STRUCT__entry(
989 		__field(u32, cq_id)
990 		__field(int, completion_id)
991 	),
992 
993 	TP_fast_assign(
994 		__entry->cq_id = rep->rr_cid.ci_queue_id;
995 		__entry->completion_id = rep->rr_cid.ci_completion_id;
996 	),
997 
998 	TP_printk("cq.id=%d cid=%d",
999 		__entry->cq_id, __entry->completion_id
1000 	)
1001 );
1002 
1003 TRACE_EVENT(xprtrdma_post_recvs,
1004 	TP_PROTO(
1005 		const struct rpcrdma_xprt *r_xprt,
1006 		unsigned int count
1007 	),
1008 
1009 	TP_ARGS(r_xprt, count),
1010 
1011 	TP_STRUCT__entry(
1012 		__field(u32, cq_id)
1013 		__field(unsigned int, count)
1014 		__field(int, posted)
1015 		__string(addr, rpcrdma_addrstr(r_xprt))
1016 		__string(port, rpcrdma_portstr(r_xprt))
1017 	),
1018 
1019 	TP_fast_assign(
1020 		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
1021 
1022 		__entry->cq_id = ep->re_attr.recv_cq->res.id;
1023 		__entry->count = count;
1024 		__entry->posted = ep->re_receive_count;
1025 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1026 		__assign_str(port, rpcrdma_portstr(r_xprt));
1027 	),
1028 
1029 	TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active",
1030 		__get_str(addr), __get_str(port), __entry->cq_id,
1031 		__entry->count, __entry->posted
1032 	)
1033 );
1034 
1035 TRACE_EVENT(xprtrdma_post_recvs_err,
1036 	TP_PROTO(
1037 		const struct rpcrdma_xprt *r_xprt,
1038 		int status
1039 	),
1040 
1041 	TP_ARGS(r_xprt, status),
1042 
1043 	TP_STRUCT__entry(
1044 		__field(u32, cq_id)
1045 		__field(int, status)
1046 		__string(addr, rpcrdma_addrstr(r_xprt))
1047 		__string(port, rpcrdma_portstr(r_xprt))
1048 	),
1049 
1050 	TP_fast_assign(
1051 		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
1052 
1053 		__entry->cq_id = ep->re_attr.recv_cq->res.id;
1054 		__entry->status = status;
1055 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1056 		__assign_str(port, rpcrdma_portstr(r_xprt));
1057 	),
1058 
1059 	TP_printk("peer=[%s]:%s cq.id=%d rc=%d",
1060 		__get_str(addr), __get_str(port), __entry->cq_id,
1061 		__entry->status
1062 	)
1063 );
1064 
1065 TRACE_EVENT(xprtrdma_post_linv_err,
1066 	TP_PROTO(
1067 		const struct rpcrdma_req *req,
1068 		int status
1069 	),
1070 
1071 	TP_ARGS(req, status),
1072 
1073 	TP_STRUCT__entry(
1074 		__field(unsigned int, task_id)
1075 		__field(unsigned int, client_id)
1076 		__field(int, status)
1077 	),
1078 
1079 	TP_fast_assign(
1080 		const struct rpc_task *task = req->rl_slot.rq_task;
1081 
1082 		__entry->task_id = task->tk_pid;
1083 		__entry->client_id = task->tk_client->cl_clid;
1084 		__entry->status = status;
1085 	),
1086 
1087 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " status=%d",
1088 		__entry->task_id, __entry->client_id, __entry->status
1089 	)
1090 );
1091 
1092 /**
1093  ** Completion events
1094  **/
1095 
1096 DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive);
1097 
1098 DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
1099 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_fastreg);
1100 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li);
1101 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_wake);
1102 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_done);
1103 
1104 TRACE_EVENT(xprtrdma_frwr_alloc,
1105 	TP_PROTO(
1106 		const struct rpcrdma_mr *mr,
1107 		int rc
1108 	),
1109 
1110 	TP_ARGS(mr, rc),
1111 
1112 	TP_STRUCT__entry(
1113 		__field(u32, mr_id)
1114 		__field(int, rc)
1115 	),
1116 
1117 	TP_fast_assign(
1118 		__entry->mr_id = mr->mr_ibmr->res.id;
1119 		__entry->rc = rc;
1120 	),
1121 
1122 	TP_printk("mr.id=%u: rc=%d",
1123 		__entry->mr_id, __entry->rc
1124 	)
1125 );
1126 
1127 TRACE_EVENT(xprtrdma_frwr_dereg,
1128 	TP_PROTO(
1129 		const struct rpcrdma_mr *mr,
1130 		int rc
1131 	),
1132 
1133 	TP_ARGS(mr, rc),
1134 
1135 	TP_STRUCT__entry(
1136 		__field(u32, mr_id)
1137 		__field(int, nents)
1138 		__field(u32, handle)
1139 		__field(u32, length)
1140 		__field(u64, offset)
1141 		__field(u32, dir)
1142 		__field(int, rc)
1143 	),
1144 
1145 	TP_fast_assign(
1146 		__entry->mr_id  = mr->mr_ibmr->res.id;
1147 		__entry->nents  = mr->mr_nents;
1148 		__entry->handle = mr->mr_handle;
1149 		__entry->length = mr->mr_length;
1150 		__entry->offset = mr->mr_offset;
1151 		__entry->dir    = mr->mr_dir;
1152 		__entry->rc	= rc;
1153 	),
1154 
1155 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
1156 		__entry->mr_id, __entry->nents, __entry->length,
1157 		(unsigned long long)__entry->offset, __entry->handle,
1158 		xprtrdma_show_direction(__entry->dir),
1159 		__entry->rc
1160 	)
1161 );
1162 
1163 TRACE_EVENT(xprtrdma_frwr_sgerr,
1164 	TP_PROTO(
1165 		const struct rpcrdma_mr *mr,
1166 		int sg_nents
1167 	),
1168 
1169 	TP_ARGS(mr, sg_nents),
1170 
1171 	TP_STRUCT__entry(
1172 		__field(u32, mr_id)
1173 		__field(u64, addr)
1174 		__field(u32, dir)
1175 		__field(int, nents)
1176 	),
1177 
1178 	TP_fast_assign(
1179 		__entry->mr_id = mr->mr_ibmr->res.id;
1180 		__entry->addr = mr->mr_sg->dma_address;
1181 		__entry->dir = mr->mr_dir;
1182 		__entry->nents = sg_nents;
1183 	),
1184 
1185 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
1186 		__entry->mr_id, __entry->addr,
1187 		xprtrdma_show_direction(__entry->dir),
1188 		__entry->nents
1189 	)
1190 );
1191 
1192 TRACE_EVENT(xprtrdma_frwr_maperr,
1193 	TP_PROTO(
1194 		const struct rpcrdma_mr *mr,
1195 		int num_mapped
1196 	),
1197 
1198 	TP_ARGS(mr, num_mapped),
1199 
1200 	TP_STRUCT__entry(
1201 		__field(u32, mr_id)
1202 		__field(u64, addr)
1203 		__field(u32, dir)
1204 		__field(int, num_mapped)
1205 		__field(int, nents)
1206 	),
1207 
1208 	TP_fast_assign(
1209 		__entry->mr_id = mr->mr_ibmr->res.id;
1210 		__entry->addr = mr->mr_sg->dma_address;
1211 		__entry->dir = mr->mr_dir;
1212 		__entry->num_mapped = num_mapped;
1213 		__entry->nents = mr->mr_nents;
1214 	),
1215 
1216 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
1217 		__entry->mr_id, __entry->addr,
1218 		xprtrdma_show_direction(__entry->dir),
1219 		__entry->num_mapped, __entry->nents
1220 	)
1221 );
1222 
1223 DEFINE_MR_EVENT(fastreg);
1224 DEFINE_MR_EVENT(localinv);
1225 DEFINE_MR_EVENT(reminv);
1226 DEFINE_MR_EVENT(map);
1227 
1228 DEFINE_ANON_MR_EVENT(unmap);
1229 
1230 TRACE_EVENT(xprtrdma_dma_maperr,
1231 	TP_PROTO(
1232 		u64 addr
1233 	),
1234 
1235 	TP_ARGS(addr),
1236 
1237 	TP_STRUCT__entry(
1238 		__field(u64, addr)
1239 	),
1240 
1241 	TP_fast_assign(
1242 		__entry->addr = addr;
1243 	),
1244 
1245 	TP_printk("dma addr=0x%llx\n", __entry->addr)
1246 );
1247 
1248 /**
1249  ** Reply events
1250  **/
1251 
1252 TRACE_EVENT(xprtrdma_reply,
1253 	TP_PROTO(
1254 		const struct rpc_task *task,
1255 		const struct rpcrdma_rep *rep,
1256 		unsigned int credits
1257 	),
1258 
1259 	TP_ARGS(task, rep, credits),
1260 
1261 	TP_STRUCT__entry(
1262 		__field(unsigned int, task_id)
1263 		__field(unsigned int, client_id)
1264 		__field(u32, xid)
1265 		__field(unsigned int, credits)
1266 	),
1267 
1268 	TP_fast_assign(
1269 		__entry->task_id = task->tk_pid;
1270 		__entry->client_id = task->tk_client->cl_clid;
1271 		__entry->xid = be32_to_cpu(rep->rr_xid);
1272 		__entry->credits = credits;
1273 	),
1274 
1275 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x credits=%u",
1276 		__entry->task_id, __entry->client_id, __entry->xid,
1277 		__entry->credits
1278 	)
1279 );
1280 
1281 DEFINE_REPLY_EVENT(vers);
1282 DEFINE_REPLY_EVENT(rqst);
1283 DEFINE_REPLY_EVENT(short);
1284 DEFINE_REPLY_EVENT(hdr);
1285 
1286 TRACE_EVENT(xprtrdma_err_vers,
1287 	TP_PROTO(
1288 		const struct rpc_rqst *rqst,
1289 		__be32 *min,
1290 		__be32 *max
1291 	),
1292 
1293 	TP_ARGS(rqst, min, max),
1294 
1295 	TP_STRUCT__entry(
1296 		__field(unsigned int, task_id)
1297 		__field(unsigned int, client_id)
1298 		__field(u32, xid)
1299 		__field(u32, min)
1300 		__field(u32, max)
1301 	),
1302 
1303 	TP_fast_assign(
1304 		__entry->task_id = rqst->rq_task->tk_pid;
1305 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1306 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1307 		__entry->min = be32_to_cpup(min);
1308 		__entry->max = be32_to_cpup(max);
1309 	),
1310 
1311 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x versions=[%u, %u]",
1312 		__entry->task_id, __entry->client_id, __entry->xid,
1313 		__entry->min, __entry->max
1314 	)
1315 );
1316 
1317 TRACE_EVENT(xprtrdma_err_chunk,
1318 	TP_PROTO(
1319 		const struct rpc_rqst *rqst
1320 	),
1321 
1322 	TP_ARGS(rqst),
1323 
1324 	TP_STRUCT__entry(
1325 		__field(unsigned int, task_id)
1326 		__field(unsigned int, client_id)
1327 		__field(u32, xid)
1328 	),
1329 
1330 	TP_fast_assign(
1331 		__entry->task_id = rqst->rq_task->tk_pid;
1332 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1333 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1334 	),
1335 
1336 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x",
1337 		__entry->task_id, __entry->client_id, __entry->xid
1338 	)
1339 );
1340 
1341 TRACE_EVENT(xprtrdma_err_unrecognized,
1342 	TP_PROTO(
1343 		const struct rpc_rqst *rqst,
1344 		__be32 *procedure
1345 	),
1346 
1347 	TP_ARGS(rqst, procedure),
1348 
1349 	TP_STRUCT__entry(
1350 		__field(unsigned int, task_id)
1351 		__field(unsigned int, client_id)
1352 		__field(u32, xid)
1353 		__field(u32, procedure)
1354 	),
1355 
1356 	TP_fast_assign(
1357 		__entry->task_id = rqst->rq_task->tk_pid;
1358 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1359 		__entry->procedure = be32_to_cpup(procedure);
1360 	),
1361 
1362 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x procedure=%u",
1363 		__entry->task_id, __entry->client_id, __entry->xid,
1364 		__entry->procedure
1365 	)
1366 );
1367 
1368 TRACE_EVENT(xprtrdma_fixup,
1369 	TP_PROTO(
1370 		const struct rpc_rqst *rqst,
1371 		unsigned long fixup
1372 	),
1373 
1374 	TP_ARGS(rqst, fixup),
1375 
1376 	TP_STRUCT__entry(
1377 		__field(unsigned int, task_id)
1378 		__field(unsigned int, client_id)
1379 		__field(unsigned long, fixup)
1380 		__field(size_t, headlen)
1381 		__field(unsigned int, pagelen)
1382 		__field(size_t, taillen)
1383 	),
1384 
1385 	TP_fast_assign(
1386 		__entry->task_id = rqst->rq_task->tk_pid;
1387 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1388 		__entry->fixup = fixup;
1389 		__entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
1390 		__entry->pagelen = rqst->rq_rcv_buf.page_len;
1391 		__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
1392 	),
1393 
1394 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " fixup=%lu xdr=%zu/%u/%zu",
1395 		__entry->task_id, __entry->client_id, __entry->fixup,
1396 		__entry->headlen, __entry->pagelen, __entry->taillen
1397 	)
1398 );
1399 
1400 TRACE_EVENT(xprtrdma_decode_seg,
1401 	TP_PROTO(
1402 		u32 handle,
1403 		u32 length,
1404 		u64 offset
1405 	),
1406 
1407 	TP_ARGS(handle, length, offset),
1408 
1409 	TP_STRUCT__entry(
1410 		__field(u32, handle)
1411 		__field(u32, length)
1412 		__field(u64, offset)
1413 	),
1414 
1415 	TP_fast_assign(
1416 		__entry->handle = handle;
1417 		__entry->length = length;
1418 		__entry->offset = offset;
1419 	),
1420 
1421 	TP_printk("%u@0x%016llx:0x%08x",
1422 		__entry->length, (unsigned long long)__entry->offset,
1423 		__entry->handle
1424 	)
1425 );
1426 
1427 TRACE_EVENT(xprtrdma_mrs_zap,
1428 	TP_PROTO(
1429 		const struct rpc_task *task
1430 	),
1431 
1432 	TP_ARGS(task),
1433 
1434 	TP_STRUCT__entry(
1435 		__field(unsigned int, task_id)
1436 		__field(unsigned int, client_id)
1437 	),
1438 
1439 	TP_fast_assign(
1440 		__entry->task_id = task->tk_pid;
1441 		__entry->client_id = task->tk_client->cl_clid;
1442 	),
1443 
1444 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER,
1445 		__entry->task_id, __entry->client_id
1446 	)
1447 );
1448 
1449 /**
1450  ** Callback events
1451  **/
1452 
1453 TRACE_EVENT(xprtrdma_cb_setup,
1454 	TP_PROTO(
1455 		const struct rpcrdma_xprt *r_xprt,
1456 		unsigned int reqs
1457 	),
1458 
1459 	TP_ARGS(r_xprt, reqs),
1460 
1461 	TP_STRUCT__entry(
1462 		__field(unsigned int, reqs)
1463 		__string(addr, rpcrdma_addrstr(r_xprt))
1464 		__string(port, rpcrdma_portstr(r_xprt))
1465 	),
1466 
1467 	TP_fast_assign(
1468 		__entry->reqs = reqs;
1469 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1470 		__assign_str(port, rpcrdma_portstr(r_xprt));
1471 	),
1472 
1473 	TP_printk("peer=[%s]:%s %u reqs",
1474 		__get_str(addr), __get_str(port), __entry->reqs
1475 	)
1476 );
1477 
1478 DEFINE_CALLBACK_EVENT(call);
1479 DEFINE_CALLBACK_EVENT(reply);
1480 
1481 /**
1482  ** Server-side RPC/RDMA events
1483  **/
1484 
1485 DECLARE_EVENT_CLASS(svcrdma_accept_class,
1486 	TP_PROTO(
1487 		const struct svcxprt_rdma *rdma,
1488 		long status
1489 	),
1490 
1491 	TP_ARGS(rdma, status),
1492 
1493 	TP_STRUCT__entry(
1494 		__field(long, status)
1495 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1496 	),
1497 
1498 	TP_fast_assign(
1499 		__entry->status = status;
1500 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1501 	),
1502 
1503 	TP_printk("addr=%s status=%ld",
1504 		__get_str(addr), __entry->status
1505 	)
1506 );
1507 
1508 #define DEFINE_ACCEPT_EVENT(name) \
1509 		DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
1510 				TP_PROTO( \
1511 					const struct svcxprt_rdma *rdma, \
1512 					long status \
1513 				), \
1514 				TP_ARGS(rdma, status))
1515 
1516 DEFINE_ACCEPT_EVENT(pd);
1517 DEFINE_ACCEPT_EVENT(qp);
1518 DEFINE_ACCEPT_EVENT(fabric);
1519 DEFINE_ACCEPT_EVENT(initdepth);
1520 DEFINE_ACCEPT_EVENT(accept);
1521 
1522 TRACE_DEFINE_ENUM(RDMA_MSG);
1523 TRACE_DEFINE_ENUM(RDMA_NOMSG);
1524 TRACE_DEFINE_ENUM(RDMA_MSGP);
1525 TRACE_DEFINE_ENUM(RDMA_DONE);
1526 TRACE_DEFINE_ENUM(RDMA_ERROR);
1527 
1528 #define show_rpcrdma_proc(x)						\
1529 		__print_symbolic(x,					\
1530 				{ RDMA_MSG, "RDMA_MSG" },		\
1531 				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1532 				{ RDMA_MSGP, "RDMA_MSGP" },		\
1533 				{ RDMA_DONE, "RDMA_DONE" },		\
1534 				{ RDMA_ERROR, "RDMA_ERROR" })
1535 
1536 TRACE_EVENT(svcrdma_decode_rqst,
1537 	TP_PROTO(
1538 		const struct svc_rdma_recv_ctxt *ctxt,
1539 		__be32 *p,
1540 		unsigned int hdrlen
1541 	),
1542 
1543 	TP_ARGS(ctxt, p, hdrlen),
1544 
1545 	TP_STRUCT__entry(
1546 		__field(u32, cq_id)
1547 		__field(int, completion_id)
1548 		__field(u32, xid)
1549 		__field(u32, vers)
1550 		__field(u32, proc)
1551 		__field(u32, credits)
1552 		__field(unsigned int, hdrlen)
1553 	),
1554 
1555 	TP_fast_assign(
1556 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1557 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1558 		__entry->xid = be32_to_cpup(p++);
1559 		__entry->vers = be32_to_cpup(p++);
1560 		__entry->credits = be32_to_cpup(p++);
1561 		__entry->proc = be32_to_cpup(p);
1562 		__entry->hdrlen = hdrlen;
1563 	),
1564 
1565 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1566 		__entry->cq_id, __entry->completion_id,
1567 		__entry->xid, __entry->vers, __entry->credits,
1568 		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1569 );
1570 
1571 TRACE_EVENT(svcrdma_decode_short_err,
1572 	TP_PROTO(
1573 		const struct svc_rdma_recv_ctxt *ctxt,
1574 		unsigned int hdrlen
1575 	),
1576 
1577 	TP_ARGS(ctxt, hdrlen),
1578 
1579 	TP_STRUCT__entry(
1580 		__field(u32, cq_id)
1581 		__field(int, completion_id)
1582 		__field(unsigned int, hdrlen)
1583 	),
1584 
1585 	TP_fast_assign(
1586 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1587 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1588 		__entry->hdrlen = hdrlen;
1589 	),
1590 
1591 	TP_printk("cq.id=%u cid=%d hdrlen=%u",
1592 		__entry->cq_id, __entry->completion_id,
1593 		__entry->hdrlen)
1594 );
1595 
1596 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1597 	TP_PROTO(
1598 		const struct svc_rdma_recv_ctxt *ctxt,
1599 		__be32 *p
1600 	),
1601 
1602 	TP_ARGS(ctxt, p),
1603 
1604 	TP_STRUCT__entry(
1605 		__field(u32, cq_id)
1606 		__field(int, completion_id)
1607 		__field(u32, xid)
1608 		__field(u32, vers)
1609 		__field(u32, proc)
1610 		__field(u32, credits)
1611 	),
1612 
1613 	TP_fast_assign(
1614 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1615 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1616 		__entry->xid = be32_to_cpup(p++);
1617 		__entry->vers = be32_to_cpup(p++);
1618 		__entry->credits = be32_to_cpup(p++);
1619 		__entry->proc = be32_to_cpup(p);
1620 	),
1621 
1622 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
1623 		__entry->cq_id, __entry->completion_id,
1624 		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1625 );
1626 
1627 #define DEFINE_BADREQ_EVENT(name)					\
1628 		DEFINE_EVENT(svcrdma_badreq_event,			\
1629 			     svcrdma_decode_##name##_err,		\
1630 				TP_PROTO(				\
1631 					const struct svc_rdma_recv_ctxt *ctxt,	\
1632 					__be32 *p			\
1633 				),					\
1634 				TP_ARGS(ctxt, p))
1635 
1636 DEFINE_BADREQ_EVENT(badvers);
1637 DEFINE_BADREQ_EVENT(drop);
1638 DEFINE_BADREQ_EVENT(badproc);
1639 DEFINE_BADREQ_EVENT(parse);
1640 
1641 TRACE_EVENT(svcrdma_encode_wseg,
1642 	TP_PROTO(
1643 		const struct svc_rdma_send_ctxt *ctxt,
1644 		u32 segno,
1645 		u32 handle,
1646 		u32 length,
1647 		u64 offset
1648 	),
1649 
1650 	TP_ARGS(ctxt, segno, handle, length, offset),
1651 
1652 	TP_STRUCT__entry(
1653 		__field(u32, cq_id)
1654 		__field(int, completion_id)
1655 		__field(u32, segno)
1656 		__field(u32, handle)
1657 		__field(u32, length)
1658 		__field(u64, offset)
1659 	),
1660 
1661 	TP_fast_assign(
1662 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1663 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1664 		__entry->segno = segno;
1665 		__entry->handle = handle;
1666 		__entry->length = length;
1667 		__entry->offset = offset;
1668 	),
1669 
1670 	TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1671 		__entry->cq_id, __entry->completion_id,
1672 		__entry->segno, __entry->length,
1673 		(unsigned long long)__entry->offset, __entry->handle
1674 	)
1675 );
1676 
1677 TRACE_EVENT(svcrdma_decode_rseg,
1678 	TP_PROTO(
1679 		const struct rpc_rdma_cid *cid,
1680 		const struct svc_rdma_chunk *chunk,
1681 		const struct svc_rdma_segment *segment
1682 	),
1683 
1684 	TP_ARGS(cid, chunk, segment),
1685 
1686 	TP_STRUCT__entry(
1687 		__field(u32, cq_id)
1688 		__field(int, completion_id)
1689 		__field(u32, segno)
1690 		__field(u32, position)
1691 		__field(u32, handle)
1692 		__field(u32, length)
1693 		__field(u64, offset)
1694 	),
1695 
1696 	TP_fast_assign(
1697 		__entry->cq_id = cid->ci_queue_id;
1698 		__entry->completion_id = cid->ci_completion_id;
1699 		__entry->segno = chunk->ch_segcount;
1700 		__entry->position = chunk->ch_position;
1701 		__entry->handle = segment->rs_handle;
1702 		__entry->length = segment->rs_length;
1703 		__entry->offset = segment->rs_offset;
1704 	),
1705 
1706 	TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
1707 		__entry->cq_id, __entry->completion_id,
1708 		__entry->segno, __entry->position, __entry->length,
1709 		(unsigned long long)__entry->offset, __entry->handle
1710 	)
1711 );
1712 
1713 TRACE_EVENT(svcrdma_decode_wseg,
1714 	TP_PROTO(
1715 		const struct rpc_rdma_cid *cid,
1716 		const struct svc_rdma_chunk *chunk,
1717 		u32 segno
1718 	),
1719 
1720 	TP_ARGS(cid, chunk, segno),
1721 
1722 	TP_STRUCT__entry(
1723 		__field(u32, cq_id)
1724 		__field(int, completion_id)
1725 		__field(u32, segno)
1726 		__field(u32, handle)
1727 		__field(u32, length)
1728 		__field(u64, offset)
1729 	),
1730 
1731 	TP_fast_assign(
1732 		const struct svc_rdma_segment *segment =
1733 			&chunk->ch_segments[segno];
1734 
1735 		__entry->cq_id = cid->ci_queue_id;
1736 		__entry->completion_id = cid->ci_completion_id;
1737 		__entry->segno = segno;
1738 		__entry->handle = segment->rs_handle;
1739 		__entry->length = segment->rs_length;
1740 		__entry->offset = segment->rs_offset;
1741 	),
1742 
1743 	TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1744 		__entry->cq_id, __entry->completion_id,
1745 		__entry->segno, __entry->length,
1746 		(unsigned long long)__entry->offset, __entry->handle
1747 	)
1748 );
1749 
1750 DECLARE_EVENT_CLASS(svcrdma_error_event,
1751 	TP_PROTO(
1752 		__be32 xid
1753 	),
1754 
1755 	TP_ARGS(xid),
1756 
1757 	TP_STRUCT__entry(
1758 		__field(u32, xid)
1759 	),
1760 
1761 	TP_fast_assign(
1762 		__entry->xid = be32_to_cpu(xid);
1763 	),
1764 
1765 	TP_printk("xid=0x%08x",
1766 		__entry->xid
1767 	)
1768 );
1769 
1770 #define DEFINE_ERROR_EVENT(name)					\
1771 		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1772 				TP_PROTO(				\
1773 					__be32 xid			\
1774 				),					\
1775 				TP_ARGS(xid))
1776 
1777 DEFINE_ERROR_EVENT(vers);
1778 DEFINE_ERROR_EVENT(chunk);
1779 
1780 /**
1781  ** Server-side RDMA API events
1782  **/
1783 
1784 DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
1785 	TP_PROTO(
1786 		const struct svcxprt_rdma *rdma,
1787 		u64 dma_addr,
1788 		u32 length
1789 	),
1790 
1791 	TP_ARGS(rdma, dma_addr, length),
1792 
1793 	TP_STRUCT__entry(
1794 		__field(u64, dma_addr)
1795 		__field(u32, length)
1796 		__string(device, rdma->sc_cm_id->device->name)
1797 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1798 	),
1799 
1800 	TP_fast_assign(
1801 		__entry->dma_addr = dma_addr;
1802 		__entry->length = length;
1803 		__assign_str(device, rdma->sc_cm_id->device->name);
1804 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1805 	),
1806 
1807 	TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
1808 		__get_str(addr), __get_str(device),
1809 		__entry->dma_addr, __entry->length
1810 	)
1811 );
1812 
1813 #define DEFINE_SVC_DMA_EVENT(name)					\
1814 		DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name,	\
1815 				TP_PROTO(				\
1816 					const struct svcxprt_rdma *rdma,\
1817 					u64 dma_addr,			\
1818 					u32 length			\
1819 				),					\
1820 				TP_ARGS(rdma, dma_addr, length))
1821 
1822 DEFINE_SVC_DMA_EVENT(dma_map_page);
1823 DEFINE_SVC_DMA_EVENT(dma_map_err);
1824 DEFINE_SVC_DMA_EVENT(dma_unmap_page);
1825 
1826 TRACE_EVENT(svcrdma_dma_map_rw_err,
1827 	TP_PROTO(
1828 		const struct svcxprt_rdma *rdma,
1829 		unsigned int nents,
1830 		int status
1831 	),
1832 
1833 	TP_ARGS(rdma, nents, status),
1834 
1835 	TP_STRUCT__entry(
1836 		__field(int, status)
1837 		__field(unsigned int, nents)
1838 		__string(device, rdma->sc_cm_id->device->name)
1839 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1840 	),
1841 
1842 	TP_fast_assign(
1843 		__entry->status = status;
1844 		__entry->nents = nents;
1845 		__assign_str(device, rdma->sc_cm_id->device->name);
1846 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1847 	),
1848 
1849 	TP_printk("addr=%s device=%s nents=%u status=%d",
1850 		__get_str(addr), __get_str(device), __entry->nents,
1851 		__entry->status
1852 	)
1853 );
1854 
1855 TRACE_EVENT(svcrdma_no_rwctx_err,
1856 	TP_PROTO(
1857 		const struct svcxprt_rdma *rdma,
1858 		unsigned int num_sges
1859 	),
1860 
1861 	TP_ARGS(rdma, num_sges),
1862 
1863 	TP_STRUCT__entry(
1864 		__field(unsigned int, num_sges)
1865 		__string(device, rdma->sc_cm_id->device->name)
1866 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1867 	),
1868 
1869 	TP_fast_assign(
1870 		__entry->num_sges = num_sges;
1871 		__assign_str(device, rdma->sc_cm_id->device->name);
1872 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1873 	),
1874 
1875 	TP_printk("addr=%s device=%s num_sges=%d",
1876 		__get_str(addr), __get_str(device), __entry->num_sges
1877 	)
1878 );
1879 
1880 TRACE_EVENT(svcrdma_page_overrun_err,
1881 	TP_PROTO(
1882 		const struct svcxprt_rdma *rdma,
1883 		const struct svc_rqst *rqst,
1884 		unsigned int pageno
1885 	),
1886 
1887 	TP_ARGS(rdma, rqst, pageno),
1888 
1889 	TP_STRUCT__entry(
1890 		__field(unsigned int, pageno)
1891 		__field(u32, xid)
1892 		__string(device, rdma->sc_cm_id->device->name)
1893 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1894 	),
1895 
1896 	TP_fast_assign(
1897 		__entry->pageno = pageno;
1898 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1899 		__assign_str(device, rdma->sc_cm_id->device->name);
1900 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1901 	),
1902 
1903 	TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
1904 		__get_str(device), __entry->xid, __entry->pageno
1905 	)
1906 );
1907 
1908 TRACE_EVENT(svcrdma_small_wrch_err,
1909 	TP_PROTO(
1910 		const struct svcxprt_rdma *rdma,
1911 		unsigned int remaining,
1912 		unsigned int seg_no,
1913 		unsigned int num_segs
1914 	),
1915 
1916 	TP_ARGS(rdma, remaining, seg_no, num_segs),
1917 
1918 	TP_STRUCT__entry(
1919 		__field(unsigned int, remaining)
1920 		__field(unsigned int, seg_no)
1921 		__field(unsigned int, num_segs)
1922 		__string(device, rdma->sc_cm_id->device->name)
1923 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1924 	),
1925 
1926 	TP_fast_assign(
1927 		__entry->remaining = remaining;
1928 		__entry->seg_no = seg_no;
1929 		__entry->num_segs = num_segs;
1930 		__assign_str(device, rdma->sc_cm_id->device->name);
1931 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1932 	),
1933 
1934 	TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
1935 		__get_str(addr), __get_str(device), __entry->remaining,
1936 		__entry->seg_no, __entry->num_segs
1937 	)
1938 );
1939 
1940 TRACE_EVENT(svcrdma_send_pullup,
1941 	TP_PROTO(
1942 		const struct svc_rdma_send_ctxt *ctxt,
1943 		unsigned int msglen
1944 	),
1945 
1946 	TP_ARGS(ctxt, msglen),
1947 
1948 	TP_STRUCT__entry(
1949 		__field(u32, cq_id)
1950 		__field(int, completion_id)
1951 		__field(unsigned int, hdrlen)
1952 		__field(unsigned int, msglen)
1953 	),
1954 
1955 	TP_fast_assign(
1956 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1957 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1958 		__entry->hdrlen = ctxt->sc_hdrbuf.len,
1959 		__entry->msglen = msglen;
1960 	),
1961 
1962 	TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)",
1963 		__entry->cq_id, __entry->completion_id,
1964 		__entry->hdrlen, __entry->msglen,
1965 		__entry->hdrlen + __entry->msglen)
1966 );
1967 
1968 TRACE_EVENT(svcrdma_send_err,
1969 	TP_PROTO(
1970 		const struct svc_rqst *rqst,
1971 		int status
1972 	),
1973 
1974 	TP_ARGS(rqst, status),
1975 
1976 	TP_STRUCT__entry(
1977 		__field(int, status)
1978 		__field(u32, xid)
1979 		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1980 	),
1981 
1982 	TP_fast_assign(
1983 		__entry->status = status;
1984 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1985 		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1986 	),
1987 
1988 	TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
1989 		__entry->xid, __entry->status
1990 	)
1991 );
1992 
1993 TRACE_EVENT(svcrdma_post_send,
1994 	TP_PROTO(
1995 		const struct svc_rdma_send_ctxt *ctxt
1996 	),
1997 
1998 	TP_ARGS(ctxt),
1999 
2000 	TP_STRUCT__entry(
2001 		__field(u32, cq_id)
2002 		__field(int, completion_id)
2003 		__field(unsigned int, num_sge)
2004 		__field(u32, inv_rkey)
2005 	),
2006 
2007 	TP_fast_assign(
2008 		const struct ib_send_wr *wr = &ctxt->sc_send_wr;
2009 
2010 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
2011 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
2012 		__entry->num_sge = wr->num_sge;
2013 		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
2014 					wr->ex.invalidate_rkey : 0;
2015 	),
2016 
2017 	TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
2018 		__entry->cq_id, __entry->completion_id,
2019 		__entry->num_sge, __entry->inv_rkey
2020 	)
2021 );
2022 
2023 DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_send);
2024 DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_flush);
2025 DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_err);
2026 
2027 TRACE_EVENT(svcrdma_post_recv,
2028 	TP_PROTO(
2029 		const struct svc_rdma_recv_ctxt *ctxt
2030 	),
2031 
2032 	TP_ARGS(ctxt),
2033 
2034 	TP_STRUCT__entry(
2035 		__field(u32, cq_id)
2036 		__field(int, completion_id)
2037 	),
2038 
2039 	TP_fast_assign(
2040 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
2041 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
2042 	),
2043 
2044 	TP_printk("cq.id=%d cid=%d",
2045 		__entry->cq_id, __entry->completion_id
2046 	)
2047 );
2048 
2049 DEFINE_RECEIVE_SUCCESS_EVENT(svcrdma_wc_recv);
2050 DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_flush);
2051 DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_err);
2052 
2053 TRACE_EVENT(svcrdma_rq_post_err,
2054 	TP_PROTO(
2055 		const struct svcxprt_rdma *rdma,
2056 		int status
2057 	),
2058 
2059 	TP_ARGS(rdma, status),
2060 
2061 	TP_STRUCT__entry(
2062 		__field(int, status)
2063 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
2064 	),
2065 
2066 	TP_fast_assign(
2067 		__entry->status = status;
2068 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
2069 	),
2070 
2071 	TP_printk("addr=%s status=%d",
2072 		__get_str(addr), __entry->status
2073 	)
2074 );
2075 
2076 DECLARE_EVENT_CLASS(svcrdma_post_chunk_class,
2077 	TP_PROTO(
2078 		const struct rpc_rdma_cid *cid,
2079 		int sqecount
2080 	),
2081 
2082 	TP_ARGS(cid, sqecount),
2083 
2084 	TP_STRUCT__entry(
2085 		__field(u32, cq_id)
2086 		__field(int, completion_id)
2087 		__field(int, sqecount)
2088 	),
2089 
2090 	TP_fast_assign(
2091 		__entry->cq_id = cid->ci_queue_id;
2092 		__entry->completion_id = cid->ci_completion_id;
2093 		__entry->sqecount = sqecount;
2094 	),
2095 
2096 	TP_printk("cq.id=%u cid=%d sqecount=%d",
2097 		__entry->cq_id, __entry->completion_id,
2098 		__entry->sqecount
2099 	)
2100 );
2101 
2102 #define DEFINE_POST_CHUNK_EVENT(name)					\
2103 		DEFINE_EVENT(svcrdma_post_chunk_class,			\
2104 				svcrdma_post_##name##_chunk,		\
2105 				TP_PROTO(				\
2106 					const struct rpc_rdma_cid *cid,	\
2107 					int sqecount			\
2108 				),					\
2109 				TP_ARGS(cid, sqecount))
2110 
2111 DEFINE_POST_CHUNK_EVENT(read);
2112 DEFINE_POST_CHUNK_EVENT(write);
2113 DEFINE_POST_CHUNK_EVENT(reply);
2114 
2115 DEFINE_EVENT(svcrdma_post_chunk_class, svcrdma_cc_release,
2116 	TP_PROTO(
2117 		const struct rpc_rdma_cid *cid,
2118 		int sqecount
2119 	),
2120 	TP_ARGS(cid, sqecount)
2121 );
2122 
2123 TRACE_EVENT(svcrdma_wc_read,
2124 	TP_PROTO(
2125 		const struct ib_wc *wc,
2126 		const struct rpc_rdma_cid *cid,
2127 		unsigned int totalbytes,
2128 		const ktime_t posttime
2129 	),
2130 
2131 	TP_ARGS(wc, cid, totalbytes, posttime),
2132 
2133 	TP_STRUCT__entry(
2134 		__field(u32, cq_id)
2135 		__field(int, completion_id)
2136 		__field(s64, read_latency)
2137 		__field(unsigned int, totalbytes)
2138 	),
2139 
2140 	TP_fast_assign(
2141 		__entry->cq_id = cid->ci_queue_id;
2142 		__entry->completion_id = cid->ci_completion_id;
2143 		__entry->totalbytes = totalbytes;
2144 		__entry->read_latency = ktime_us_delta(ktime_get(), posttime);
2145 	),
2146 
2147 	TP_printk("cq.id=%u cid=%d totalbytes=%u latency-us=%lld",
2148 		__entry->cq_id, __entry->completion_id,
2149 		__entry->totalbytes, __entry->read_latency
2150 	)
2151 );
2152 
2153 DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_flush);
2154 DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_err);
2155 
2156 DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_write);
2157 DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_flush);
2158 DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_err);
2159 
2160 TRACE_EVENT(svcrdma_qp_error,
2161 	TP_PROTO(
2162 		const struct ib_event *event,
2163 		const struct sockaddr *sap
2164 	),
2165 
2166 	TP_ARGS(event, sap),
2167 
2168 	TP_STRUCT__entry(
2169 		__field(unsigned int, event)
2170 		__string(device, event->device->name)
2171 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
2172 	),
2173 
2174 	TP_fast_assign(
2175 		__entry->event = event->event;
2176 		__assign_str(device, event->device->name);
2177 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
2178 			 "%pISpc", sap);
2179 	),
2180 
2181 	TP_printk("addr=%s dev=%s event=%s (%u)",
2182 		__entry->addr, __get_str(device),
2183 		rdma_show_ib_event(__entry->event), __entry->event
2184 	)
2185 );
2186 
2187 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
2188 	TP_PROTO(
2189 		const struct svcxprt_rdma *rdma
2190 	),
2191 
2192 	TP_ARGS(rdma),
2193 
2194 	TP_STRUCT__entry(
2195 		__field(int, avail)
2196 		__field(int, depth)
2197 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
2198 	),
2199 
2200 	TP_fast_assign(
2201 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
2202 		__entry->depth = rdma->sc_sq_depth;
2203 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
2204 	),
2205 
2206 	TP_printk("addr=%s sc_sq_avail=%d/%d",
2207 		__get_str(addr), __entry->avail, __entry->depth
2208 	)
2209 );
2210 
2211 #define DEFINE_SQ_EVENT(name)						\
2212 		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
2213 				TP_PROTO(				\
2214 					const struct svcxprt_rdma *rdma \
2215 				),					\
2216 				TP_ARGS(rdma))
2217 
2218 DEFINE_SQ_EVENT(full);
2219 DEFINE_SQ_EVENT(retry);
2220 
2221 TRACE_EVENT(svcrdma_sq_post_err,
2222 	TP_PROTO(
2223 		const struct svcxprt_rdma *rdma,
2224 		int status
2225 	),
2226 
2227 	TP_ARGS(rdma, status),
2228 
2229 	TP_STRUCT__entry(
2230 		__field(int, avail)
2231 		__field(int, depth)
2232 		__field(int, status)
2233 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
2234 	),
2235 
2236 	TP_fast_assign(
2237 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
2238 		__entry->depth = rdma->sc_sq_depth;
2239 		__entry->status = status;
2240 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
2241 	),
2242 
2243 	TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
2244 		__get_str(addr), __entry->avail, __entry->depth,
2245 		__entry->status
2246 	)
2247 );
2248 
2249 #endif /* _TRACE_RPCRDMA_H */
2250 
2251 #include <trace/define_trace.h>
2252