xref: /linux/include/trace/events/rpcrdma.h (revision a4fd8414659bf470e2146b352574bbd274e54b7a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
4  *
5  * Trace point definitions for the "rpcrdma" subsystem.
6  */
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM rpcrdma
9 
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
12 
13 #include <linux/scatterlist.h>
14 #include <linux/sunrpc/rpc_rdma_cid.h>
15 #include <linux/tracepoint.h>
16 #include <rdma/ib_cm.h>
17 #include <trace/events/rdma.h>
18 
19 /**
20  ** Event classes
21  **/
22 
23 DECLARE_EVENT_CLASS(rpcrdma_completion_class,
24 	TP_PROTO(
25 		const struct ib_wc *wc,
26 		const struct rpc_rdma_cid *cid
27 	),
28 
29 	TP_ARGS(wc, cid),
30 
31 	TP_STRUCT__entry(
32 		__field(u32, cq_id)
33 		__field(int, completion_id)
34 		__field(unsigned long, status)
35 		__field(unsigned int, vendor_err)
36 	),
37 
38 	TP_fast_assign(
39 		__entry->cq_id = cid->ci_queue_id;
40 		__entry->completion_id = cid->ci_completion_id;
41 		__entry->status = wc->status;
42 		if (wc->status)
43 			__entry->vendor_err = wc->vendor_err;
44 		else
45 			__entry->vendor_err = 0;
46 	),
47 
48 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
49 		__entry->cq_id, __entry->completion_id,
50 		rdma_show_wc_status(__entry->status),
51 		__entry->status, __entry->vendor_err
52 	)
53 );
54 
55 #define DEFINE_COMPLETION_EVENT(name)					\
56 		DEFINE_EVENT(rpcrdma_completion_class, name,		\
57 				TP_PROTO(				\
58 					const struct ib_wc *wc,		\
59 					const struct rpc_rdma_cid *cid	\
60 				),					\
61 				TP_ARGS(wc, cid))
62 
63 DECLARE_EVENT_CLASS(xprtrdma_reply_event,
64 	TP_PROTO(
65 		const struct rpcrdma_rep *rep
66 	),
67 
68 	TP_ARGS(rep),
69 
70 	TP_STRUCT__entry(
71 		__field(const void *, rep)
72 		__field(const void *, r_xprt)
73 		__field(u32, xid)
74 		__field(u32, version)
75 		__field(u32, proc)
76 	),
77 
78 	TP_fast_assign(
79 		__entry->rep = rep;
80 		__entry->r_xprt = rep->rr_rxprt;
81 		__entry->xid = be32_to_cpu(rep->rr_xid);
82 		__entry->version = be32_to_cpu(rep->rr_vers);
83 		__entry->proc = be32_to_cpu(rep->rr_proc);
84 	),
85 
86 	TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
87 		__entry->r_xprt, __entry->xid, __entry->rep,
88 		__entry->version, __entry->proc
89 	)
90 );
91 
92 #define DEFINE_REPLY_EVENT(name)					\
93 		DEFINE_EVENT(xprtrdma_reply_event, name,		\
94 				TP_PROTO(				\
95 					const struct rpcrdma_rep *rep	\
96 				),					\
97 				TP_ARGS(rep))
98 
99 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
100 	TP_PROTO(
101 		const struct rpcrdma_xprt *r_xprt
102 	),
103 
104 	TP_ARGS(r_xprt),
105 
106 	TP_STRUCT__entry(
107 		__field(const void *, r_xprt)
108 		__string(addr, rpcrdma_addrstr(r_xprt))
109 		__string(port, rpcrdma_portstr(r_xprt))
110 	),
111 
112 	TP_fast_assign(
113 		__entry->r_xprt = r_xprt;
114 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
115 		__assign_str(port, rpcrdma_portstr(r_xprt));
116 	),
117 
118 	TP_printk("peer=[%s]:%s r_xprt=%p",
119 		__get_str(addr), __get_str(port), __entry->r_xprt
120 	)
121 );
122 
123 #define DEFINE_RXPRT_EVENT(name)					\
124 		DEFINE_EVENT(xprtrdma_rxprt, name,			\
125 				TP_PROTO(				\
126 					const struct rpcrdma_xprt *r_xprt \
127 				),					\
128 				TP_ARGS(r_xprt))
129 
130 DECLARE_EVENT_CLASS(xprtrdma_connect_class,
131 	TP_PROTO(
132 		const struct rpcrdma_xprt *r_xprt,
133 		int rc
134 	),
135 
136 	TP_ARGS(r_xprt, rc),
137 
138 	TP_STRUCT__entry(
139 		__field(const void *, r_xprt)
140 		__field(int, rc)
141 		__field(int, connect_status)
142 		__string(addr, rpcrdma_addrstr(r_xprt))
143 		__string(port, rpcrdma_portstr(r_xprt))
144 	),
145 
146 	TP_fast_assign(
147 		__entry->r_xprt = r_xprt;
148 		__entry->rc = rc;
149 		__entry->connect_status = r_xprt->rx_ep->re_connect_status;
150 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
151 		__assign_str(port, rpcrdma_portstr(r_xprt));
152 	),
153 
154 	TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connection status=%d",
155 		__get_str(addr), __get_str(port), __entry->r_xprt,
156 		__entry->rc, __entry->connect_status
157 	)
158 );
159 
160 #define DEFINE_CONN_EVENT(name)						\
161 		DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name,	\
162 				TP_PROTO(				\
163 					const struct rpcrdma_xprt *r_xprt, \
164 					int rc				\
165 				),					\
166 				TP_ARGS(r_xprt, rc))
167 
168 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
169 	TP_PROTO(
170 		const struct rpc_task *task,
171 		unsigned int pos,
172 		struct rpcrdma_mr *mr,
173 		int nsegs
174 	),
175 
176 	TP_ARGS(task, pos, mr, nsegs),
177 
178 	TP_STRUCT__entry(
179 		__field(unsigned int, task_id)
180 		__field(unsigned int, client_id)
181 		__field(unsigned int, pos)
182 		__field(int, nents)
183 		__field(u32, handle)
184 		__field(u32, length)
185 		__field(u64, offset)
186 		__field(int, nsegs)
187 	),
188 
189 	TP_fast_assign(
190 		__entry->task_id = task->tk_pid;
191 		__entry->client_id = task->tk_client->cl_clid;
192 		__entry->pos = pos;
193 		__entry->nents = mr->mr_nents;
194 		__entry->handle = mr->mr_handle;
195 		__entry->length = mr->mr_length;
196 		__entry->offset = mr->mr_offset;
197 		__entry->nsegs = nsegs;
198 	),
199 
200 	TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
201 		__entry->task_id, __entry->client_id,
202 		__entry->pos, __entry->length,
203 		(unsigned long long)__entry->offset, __entry->handle,
204 		__entry->nents < __entry->nsegs ? "more" : "last"
205 	)
206 );
207 
208 #define DEFINE_RDCH_EVENT(name)						\
209 		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
210 				TP_PROTO(				\
211 					const struct rpc_task *task,	\
212 					unsigned int pos,		\
213 					struct rpcrdma_mr *mr,		\
214 					int nsegs			\
215 				),					\
216 				TP_ARGS(task, pos, mr, nsegs))
217 
218 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
219 	TP_PROTO(
220 		const struct rpc_task *task,
221 		struct rpcrdma_mr *mr,
222 		int nsegs
223 	),
224 
225 	TP_ARGS(task, mr, nsegs),
226 
227 	TP_STRUCT__entry(
228 		__field(unsigned int, task_id)
229 		__field(unsigned int, client_id)
230 		__field(int, nents)
231 		__field(u32, handle)
232 		__field(u32, length)
233 		__field(u64, offset)
234 		__field(int, nsegs)
235 	),
236 
237 	TP_fast_assign(
238 		__entry->task_id = task->tk_pid;
239 		__entry->client_id = task->tk_client->cl_clid;
240 		__entry->nents = mr->mr_nents;
241 		__entry->handle = mr->mr_handle;
242 		__entry->length = mr->mr_length;
243 		__entry->offset = mr->mr_offset;
244 		__entry->nsegs = nsegs;
245 	),
246 
247 	TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
248 		__entry->task_id, __entry->client_id,
249 		__entry->length, (unsigned long long)__entry->offset,
250 		__entry->handle,
251 		__entry->nents < __entry->nsegs ? "more" : "last"
252 	)
253 );
254 
255 #define DEFINE_WRCH_EVENT(name)						\
256 		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
257 				TP_PROTO(				\
258 					const struct rpc_task *task,	\
259 					struct rpcrdma_mr *mr,		\
260 					int nsegs			\
261 				),					\
262 				TP_ARGS(task, mr, nsegs))
263 
264 DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
265 	TP_PROTO(
266 		const struct ib_wc *wc,
267 		const struct rpcrdma_frwr *frwr
268 	),
269 
270 	TP_ARGS(wc, frwr),
271 
272 	TP_STRUCT__entry(
273 		__field(u32, mr_id)
274 		__field(unsigned int, status)
275 		__field(unsigned int, vendor_err)
276 	),
277 
278 	TP_fast_assign(
279 		__entry->mr_id = frwr->fr_mr->res.id;
280 		__entry->status = wc->status;
281 		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
282 	),
283 
284 	TP_printk(
285 		"mr.id=%u: %s (%u/0x%x)",
286 		__entry->mr_id, rdma_show_wc_status(__entry->status),
287 		__entry->status, __entry->vendor_err
288 	)
289 );
290 
291 #define DEFINE_FRWR_DONE_EVENT(name)					\
292 		DEFINE_EVENT(xprtrdma_frwr_done, name,			\
293 				TP_PROTO(				\
294 					const struct ib_wc *wc,		\
295 					const struct rpcrdma_frwr *frwr	\
296 				),					\
297 				TP_ARGS(wc, frwr))
298 
299 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
300 TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
301 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
302 TRACE_DEFINE_ENUM(DMA_NONE);
303 
304 #define xprtrdma_show_direction(x)					\
305 		__print_symbolic(x,					\
306 				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
307 				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
308 				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
309 				{ DMA_NONE, "NONE" })
310 
311 DECLARE_EVENT_CLASS(xprtrdma_mr,
312 	TP_PROTO(
313 		const struct rpcrdma_mr *mr
314 	),
315 
316 	TP_ARGS(mr),
317 
318 	TP_STRUCT__entry(
319 		__field(u32, mr_id)
320 		__field(int, nents)
321 		__field(u32, handle)
322 		__field(u32, length)
323 		__field(u64, offset)
324 		__field(u32, dir)
325 	),
326 
327 	TP_fast_assign(
328 		__entry->mr_id  = mr->frwr.fr_mr->res.id;
329 		__entry->nents  = mr->mr_nents;
330 		__entry->handle = mr->mr_handle;
331 		__entry->length = mr->mr_length;
332 		__entry->offset = mr->mr_offset;
333 		__entry->dir    = mr->mr_dir;
334 	),
335 
336 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
337 		__entry->mr_id, __entry->nents, __entry->length,
338 		(unsigned long long)__entry->offset, __entry->handle,
339 		xprtrdma_show_direction(__entry->dir)
340 	)
341 );
342 
343 #define DEFINE_MR_EVENT(name) \
344 		DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \
345 				TP_PROTO( \
346 					const struct rpcrdma_mr *mr \
347 				), \
348 				TP_ARGS(mr))
349 
350 DECLARE_EVENT_CLASS(xprtrdma_cb_event,
351 	TP_PROTO(
352 		const struct rpc_rqst *rqst
353 	),
354 
355 	TP_ARGS(rqst),
356 
357 	TP_STRUCT__entry(
358 		__field(const void *, rqst)
359 		__field(const void *, rep)
360 		__field(const void *, req)
361 		__field(u32, xid)
362 	),
363 
364 	TP_fast_assign(
365 		__entry->rqst = rqst;
366 		__entry->req = rpcr_to_rdmar(rqst);
367 		__entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
368 		__entry->xid = be32_to_cpu(rqst->rq_xid);
369 	),
370 
371 	TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
372 		__entry->xid, __entry->rqst, __entry->req, __entry->rep
373 	)
374 );
375 
376 #define DEFINE_CB_EVENT(name)						\
377 		DEFINE_EVENT(xprtrdma_cb_event, name,			\
378 				TP_PROTO(				\
379 					const struct rpc_rqst *rqst	\
380 				),					\
381 				TP_ARGS(rqst))
382 
383 /**
384  ** Connection events
385  **/
386 
387 TRACE_EVENT(xprtrdma_inline_thresh,
388 	TP_PROTO(
389 		const struct rpcrdma_ep *ep
390 	),
391 
392 	TP_ARGS(ep),
393 
394 	TP_STRUCT__entry(
395 		__field(unsigned int, inline_send)
396 		__field(unsigned int, inline_recv)
397 		__field(unsigned int, max_send)
398 		__field(unsigned int, max_recv)
399 		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
400 		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
401 	),
402 
403 	TP_fast_assign(
404 		const struct rdma_cm_id *id = ep->re_id;
405 
406 		__entry->inline_send = ep->re_inline_send;
407 		__entry->inline_recv = ep->re_inline_recv;
408 		__entry->max_send = ep->re_max_inline_send;
409 		__entry->max_recv = ep->re_max_inline_recv;
410 		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
411 		       sizeof(struct sockaddr_in6));
412 		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
413 		       sizeof(struct sockaddr_in6));
414 	),
415 
416 	TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
417 		__entry->srcaddr, __entry->dstaddr,
418 		__entry->inline_send, __entry->inline_recv,
419 		__entry->max_send, __entry->max_recv
420 	)
421 );
422 
423 DEFINE_CONN_EVENT(connect);
424 DEFINE_CONN_EVENT(disconnect);
425 
426 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
427 DEFINE_RXPRT_EVENT(xprtrdma_op_setport);
428 
429 TRACE_EVENT(xprtrdma_op_connect,
430 	TP_PROTO(
431 		const struct rpcrdma_xprt *r_xprt,
432 		unsigned long delay
433 	),
434 
435 	TP_ARGS(r_xprt, delay),
436 
437 	TP_STRUCT__entry(
438 		__field(const void *, r_xprt)
439 		__field(unsigned long, delay)
440 		__string(addr, rpcrdma_addrstr(r_xprt))
441 		__string(port, rpcrdma_portstr(r_xprt))
442 	),
443 
444 	TP_fast_assign(
445 		__entry->r_xprt = r_xprt;
446 		__entry->delay = delay;
447 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
448 		__assign_str(port, rpcrdma_portstr(r_xprt));
449 	),
450 
451 	TP_printk("peer=[%s]:%s r_xprt=%p delay=%lu",
452 		__get_str(addr), __get_str(port), __entry->r_xprt,
453 		__entry->delay
454 	)
455 );
456 
457 
458 TRACE_EVENT(xprtrdma_op_set_cto,
459 	TP_PROTO(
460 		const struct rpcrdma_xprt *r_xprt,
461 		unsigned long connect,
462 		unsigned long reconnect
463 	),
464 
465 	TP_ARGS(r_xprt, connect, reconnect),
466 
467 	TP_STRUCT__entry(
468 		__field(const void *, r_xprt)
469 		__field(unsigned long, connect)
470 		__field(unsigned long, reconnect)
471 		__string(addr, rpcrdma_addrstr(r_xprt))
472 		__string(port, rpcrdma_portstr(r_xprt))
473 	),
474 
475 	TP_fast_assign(
476 		__entry->r_xprt = r_xprt;
477 		__entry->connect = connect;
478 		__entry->reconnect = reconnect;
479 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
480 		__assign_str(port, rpcrdma_portstr(r_xprt));
481 	),
482 
483 	TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu",
484 		__get_str(addr), __get_str(port), __entry->r_xprt,
485 		__entry->connect / HZ, __entry->reconnect / HZ
486 	)
487 );
488 
489 TRACE_EVENT(xprtrdma_qp_event,
490 	TP_PROTO(
491 		const struct rpcrdma_ep *ep,
492 		const struct ib_event *event
493 	),
494 
495 	TP_ARGS(ep, event),
496 
497 	TP_STRUCT__entry(
498 		__field(unsigned long, event)
499 		__string(name, event->device->name)
500 		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
501 		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
502 	),
503 
504 	TP_fast_assign(
505 		const struct rdma_cm_id *id = ep->re_id;
506 
507 		__entry->event = event->event;
508 		__assign_str(name, event->device->name);
509 		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
510 		       sizeof(struct sockaddr_in6));
511 		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
512 		       sizeof(struct sockaddr_in6));
513 	),
514 
515 	TP_printk("%pISpc -> %pISpc device=%s %s (%lu)",
516 		__entry->srcaddr, __entry->dstaddr, __get_str(name),
517 		rdma_show_ib_event(__entry->event), __entry->event
518 	)
519 );
520 
521 /**
522  ** Call events
523  **/
524 
525 TRACE_EVENT(xprtrdma_createmrs,
526 	TP_PROTO(
527 		const struct rpcrdma_xprt *r_xprt,
528 		unsigned int count
529 	),
530 
531 	TP_ARGS(r_xprt, count),
532 
533 	TP_STRUCT__entry(
534 		__field(const void *, r_xprt)
535 		__string(addr, rpcrdma_addrstr(r_xprt))
536 		__string(port, rpcrdma_portstr(r_xprt))
537 		__field(unsigned int, count)
538 	),
539 
540 	TP_fast_assign(
541 		__entry->r_xprt = r_xprt;
542 		__entry->count = count;
543 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
544 		__assign_str(port, rpcrdma_portstr(r_xprt));
545 	),
546 
547 	TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs",
548 		__get_str(addr), __get_str(port), __entry->r_xprt,
549 		__entry->count
550 	)
551 );
552 
553 TRACE_EVENT(xprtrdma_mr_get,
554 	TP_PROTO(
555 		const struct rpcrdma_req *req
556 	),
557 
558 	TP_ARGS(req),
559 
560 	TP_STRUCT__entry(
561 		__field(const void *, req)
562 		__field(unsigned int, task_id)
563 		__field(unsigned int, client_id)
564 		__field(u32, xid)
565 	),
566 
567 	TP_fast_assign(
568 		const struct rpc_rqst *rqst = &req->rl_slot;
569 
570 		__entry->req = req;
571 		__entry->task_id = rqst->rq_task->tk_pid;
572 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
573 		__entry->xid = be32_to_cpu(rqst->rq_xid);
574 	),
575 
576 	TP_printk("task:%u@%u xid=0x%08x req=%p",
577 		__entry->task_id, __entry->client_id, __entry->xid,
578 		__entry->req
579 	)
580 );
581 
582 TRACE_EVENT(xprtrdma_nomrs,
583 	TP_PROTO(
584 		const struct rpcrdma_req *req
585 	),
586 
587 	TP_ARGS(req),
588 
589 	TP_STRUCT__entry(
590 		__field(const void *, req)
591 		__field(unsigned int, task_id)
592 		__field(unsigned int, client_id)
593 		__field(u32, xid)
594 	),
595 
596 	TP_fast_assign(
597 		const struct rpc_rqst *rqst = &req->rl_slot;
598 
599 		__entry->req = req;
600 		__entry->task_id = rqst->rq_task->tk_pid;
601 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
602 		__entry->xid = be32_to_cpu(rqst->rq_xid);
603 	),
604 
605 	TP_printk("task:%u@%u xid=0x%08x req=%p",
606 		__entry->task_id, __entry->client_id, __entry->xid,
607 		__entry->req
608 	)
609 );
610 
611 DEFINE_RDCH_EVENT(read);
612 DEFINE_WRCH_EVENT(write);
613 DEFINE_WRCH_EVENT(reply);
614 
615 TRACE_DEFINE_ENUM(rpcrdma_noch);
616 TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
617 TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
618 TRACE_DEFINE_ENUM(rpcrdma_readch);
619 TRACE_DEFINE_ENUM(rpcrdma_areadch);
620 TRACE_DEFINE_ENUM(rpcrdma_writech);
621 TRACE_DEFINE_ENUM(rpcrdma_replych);
622 
623 #define xprtrdma_show_chunktype(x)					\
624 		__print_symbolic(x,					\
625 				{ rpcrdma_noch, "inline" },		\
626 				{ rpcrdma_noch_pullup, "pullup" },	\
627 				{ rpcrdma_noch_mapped, "mapped" },	\
628 				{ rpcrdma_readch, "read list" },	\
629 				{ rpcrdma_areadch, "*read list" },	\
630 				{ rpcrdma_writech, "write list" },	\
631 				{ rpcrdma_replych, "reply chunk" })
632 
633 TRACE_EVENT(xprtrdma_marshal,
634 	TP_PROTO(
635 		const struct rpcrdma_req *req,
636 		unsigned int rtype,
637 		unsigned int wtype
638 	),
639 
640 	TP_ARGS(req, rtype, wtype),
641 
642 	TP_STRUCT__entry(
643 		__field(unsigned int, task_id)
644 		__field(unsigned int, client_id)
645 		__field(u32, xid)
646 		__field(unsigned int, hdrlen)
647 		__field(unsigned int, headlen)
648 		__field(unsigned int, pagelen)
649 		__field(unsigned int, taillen)
650 		__field(unsigned int, rtype)
651 		__field(unsigned int, wtype)
652 	),
653 
654 	TP_fast_assign(
655 		const struct rpc_rqst *rqst = &req->rl_slot;
656 
657 		__entry->task_id = rqst->rq_task->tk_pid;
658 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
659 		__entry->xid = be32_to_cpu(rqst->rq_xid);
660 		__entry->hdrlen = req->rl_hdrbuf.len;
661 		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
662 		__entry->pagelen = rqst->rq_snd_buf.page_len;
663 		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
664 		__entry->rtype = rtype;
665 		__entry->wtype = wtype;
666 	),
667 
668 	TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
669 		__entry->task_id, __entry->client_id, __entry->xid,
670 		__entry->hdrlen,
671 		__entry->headlen, __entry->pagelen, __entry->taillen,
672 		xprtrdma_show_chunktype(__entry->rtype),
673 		xprtrdma_show_chunktype(__entry->wtype)
674 	)
675 );
676 
677 TRACE_EVENT(xprtrdma_marshal_failed,
678 	TP_PROTO(const struct rpc_rqst *rqst,
679 		 int ret
680 	),
681 
682 	TP_ARGS(rqst, ret),
683 
684 	TP_STRUCT__entry(
685 		__field(unsigned int, task_id)
686 		__field(unsigned int, client_id)
687 		__field(u32, xid)
688 		__field(int, ret)
689 	),
690 
691 	TP_fast_assign(
692 		__entry->task_id = rqst->rq_task->tk_pid;
693 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
694 		__entry->xid = be32_to_cpu(rqst->rq_xid);
695 		__entry->ret = ret;
696 	),
697 
698 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
699 		__entry->task_id, __entry->client_id, __entry->xid,
700 		__entry->ret
701 	)
702 );
703 
704 TRACE_EVENT(xprtrdma_prepsend_failed,
705 	TP_PROTO(const struct rpc_rqst *rqst,
706 		 int ret
707 	),
708 
709 	TP_ARGS(rqst, ret),
710 
711 	TP_STRUCT__entry(
712 		__field(unsigned int, task_id)
713 		__field(unsigned int, client_id)
714 		__field(u32, xid)
715 		__field(int, ret)
716 	),
717 
718 	TP_fast_assign(
719 		__entry->task_id = rqst->rq_task->tk_pid;
720 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
721 		__entry->xid = be32_to_cpu(rqst->rq_xid);
722 		__entry->ret = ret;
723 	),
724 
725 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
726 		__entry->task_id, __entry->client_id, __entry->xid,
727 		__entry->ret
728 	)
729 );
730 
731 TRACE_EVENT(xprtrdma_post_send,
732 	TP_PROTO(
733 		const struct rpcrdma_req *req
734 	),
735 
736 	TP_ARGS(req),
737 
738 	TP_STRUCT__entry(
739 		__field(const void *, req)
740 		__field(const void *, sc)
741 		__field(unsigned int, task_id)
742 		__field(unsigned int, client_id)
743 		__field(int, num_sge)
744 		__field(int, signaled)
745 	),
746 
747 	TP_fast_assign(
748 		const struct rpc_rqst *rqst = &req->rl_slot;
749 
750 		__entry->task_id = rqst->rq_task->tk_pid;
751 		__entry->client_id = rqst->rq_task->tk_client ?
752 				     rqst->rq_task->tk_client->cl_clid : -1;
753 		__entry->req = req;
754 		__entry->sc = req->rl_sendctx;
755 		__entry->num_sge = req->rl_wr.num_sge;
756 		__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
757 	),
758 
759 	TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %s",
760 		__entry->task_id, __entry->client_id,
761 		__entry->req, __entry->sc, __entry->num_sge,
762 		(__entry->num_sge == 1 ? "" : "s"),
763 		(__entry->signaled ? "signaled" : "")
764 	)
765 );
766 
767 TRACE_EVENT(xprtrdma_post_recv,
768 	TP_PROTO(
769 		const struct rpcrdma_rep *rep
770 	),
771 
772 	TP_ARGS(rep),
773 
774 	TP_STRUCT__entry(
775 		__field(const void *, rep)
776 	),
777 
778 	TP_fast_assign(
779 		__entry->rep = rep;
780 	),
781 
782 	TP_printk("rep=%p",
783 		__entry->rep
784 	)
785 );
786 
787 TRACE_EVENT(xprtrdma_post_recvs,
788 	TP_PROTO(
789 		const struct rpcrdma_xprt *r_xprt,
790 		unsigned int count,
791 		int status
792 	),
793 
794 	TP_ARGS(r_xprt, count, status),
795 
796 	TP_STRUCT__entry(
797 		__field(const void *, r_xprt)
798 		__field(unsigned int, count)
799 		__field(int, status)
800 		__field(int, posted)
801 		__string(addr, rpcrdma_addrstr(r_xprt))
802 		__string(port, rpcrdma_portstr(r_xprt))
803 	),
804 
805 	TP_fast_assign(
806 		__entry->r_xprt = r_xprt;
807 		__entry->count = count;
808 		__entry->status = status;
809 		__entry->posted = r_xprt->rx_ep->re_receive_count;
810 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
811 		__assign_str(port, rpcrdma_portstr(r_xprt));
812 	),
813 
814 	TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
815 		__get_str(addr), __get_str(port), __entry->r_xprt,
816 		__entry->count, __entry->posted, __entry->status
817 	)
818 );
819 
820 TRACE_EVENT(xprtrdma_post_linv,
821 	TP_PROTO(
822 		const struct rpcrdma_req *req,
823 		int status
824 	),
825 
826 	TP_ARGS(req, status),
827 
828 	TP_STRUCT__entry(
829 		__field(const void *, req)
830 		__field(int, status)
831 		__field(u32, xid)
832 	),
833 
834 	TP_fast_assign(
835 		__entry->req = req;
836 		__entry->status = status;
837 		__entry->xid = be32_to_cpu(req->rl_slot.rq_xid);
838 	),
839 
840 	TP_printk("req=%p xid=0x%08x status=%d",
841 		__entry->req, __entry->xid, __entry->status
842 	)
843 );
844 
845 /**
846  ** Completion events
847  **/
848 
849 TRACE_EVENT(xprtrdma_wc_send,
850 	TP_PROTO(
851 		const struct rpcrdma_sendctx *sc,
852 		const struct ib_wc *wc
853 	),
854 
855 	TP_ARGS(sc, wc),
856 
857 	TP_STRUCT__entry(
858 		__field(const void *, req)
859 		__field(const void *, sc)
860 		__field(unsigned int, unmap_count)
861 		__field(unsigned int, status)
862 		__field(unsigned int, vendor_err)
863 	),
864 
865 	TP_fast_assign(
866 		__entry->req = sc->sc_req;
867 		__entry->sc = sc;
868 		__entry->unmap_count = sc->sc_unmap_count;
869 		__entry->status = wc->status;
870 		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
871 	),
872 
873 	TP_printk("req=%p sc=%p unmapped=%u: %s (%u/0x%x)",
874 		__entry->req, __entry->sc, __entry->unmap_count,
875 		rdma_show_wc_status(__entry->status),
876 		__entry->status, __entry->vendor_err
877 	)
878 );
879 
880 TRACE_EVENT(xprtrdma_wc_receive,
881 	TP_PROTO(
882 		const struct ib_wc *wc
883 	),
884 
885 	TP_ARGS(wc),
886 
887 	TP_STRUCT__entry(
888 		__field(const void *, rep)
889 		__field(u32, byte_len)
890 		__field(unsigned int, status)
891 		__field(u32, vendor_err)
892 	),
893 
894 	TP_fast_assign(
895 		__entry->rep = container_of(wc->wr_cqe, struct rpcrdma_rep,
896 					    rr_cqe);
897 		__entry->status = wc->status;
898 		if (wc->status) {
899 			__entry->byte_len = 0;
900 			__entry->vendor_err = wc->vendor_err;
901 		} else {
902 			__entry->byte_len = wc->byte_len;
903 			__entry->vendor_err = 0;
904 		}
905 	),
906 
907 	TP_printk("rep=%p %u bytes: %s (%u/0x%x)",
908 		__entry->rep, __entry->byte_len,
909 		rdma_show_wc_status(__entry->status),
910 		__entry->status, __entry->vendor_err
911 	)
912 );
913 
914 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
915 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
916 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
917 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_done);
918 
919 TRACE_EVENT(xprtrdma_frwr_alloc,
920 	TP_PROTO(
921 		const struct rpcrdma_mr *mr,
922 		int rc
923 	),
924 
925 	TP_ARGS(mr, rc),
926 
927 	TP_STRUCT__entry(
928 		__field(u32, mr_id)
929 		__field(int, rc)
930 	),
931 
932 	TP_fast_assign(
933 		__entry->mr_id = mr->frwr.fr_mr->res.id;
934 		__entry->rc = rc;
935 	),
936 
937 	TP_printk("mr.id=%u: rc=%d",
938 		__entry->mr_id, __entry->rc
939 	)
940 );
941 
942 TRACE_EVENT(xprtrdma_frwr_dereg,
943 	TP_PROTO(
944 		const struct rpcrdma_mr *mr,
945 		int rc
946 	),
947 
948 	TP_ARGS(mr, rc),
949 
950 	TP_STRUCT__entry(
951 		__field(u32, mr_id)
952 		__field(int, nents)
953 		__field(u32, handle)
954 		__field(u32, length)
955 		__field(u64, offset)
956 		__field(u32, dir)
957 		__field(int, rc)
958 	),
959 
960 	TP_fast_assign(
961 		__entry->mr_id  = mr->frwr.fr_mr->res.id;
962 		__entry->nents  = mr->mr_nents;
963 		__entry->handle = mr->mr_handle;
964 		__entry->length = mr->mr_length;
965 		__entry->offset = mr->mr_offset;
966 		__entry->dir    = mr->mr_dir;
967 		__entry->rc	= rc;
968 	),
969 
970 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
971 		__entry->mr_id, __entry->nents, __entry->length,
972 		(unsigned long long)__entry->offset, __entry->handle,
973 		xprtrdma_show_direction(__entry->dir),
974 		__entry->rc
975 	)
976 );
977 
978 TRACE_EVENT(xprtrdma_frwr_sgerr,
979 	TP_PROTO(
980 		const struct rpcrdma_mr *mr,
981 		int sg_nents
982 	),
983 
984 	TP_ARGS(mr, sg_nents),
985 
986 	TP_STRUCT__entry(
987 		__field(u32, mr_id)
988 		__field(u64, addr)
989 		__field(u32, dir)
990 		__field(int, nents)
991 	),
992 
993 	TP_fast_assign(
994 		__entry->mr_id = mr->frwr.fr_mr->res.id;
995 		__entry->addr = mr->mr_sg->dma_address;
996 		__entry->dir = mr->mr_dir;
997 		__entry->nents = sg_nents;
998 	),
999 
1000 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
1001 		__entry->mr_id, __entry->addr,
1002 		xprtrdma_show_direction(__entry->dir),
1003 		__entry->nents
1004 	)
1005 );
1006 
1007 TRACE_EVENT(xprtrdma_frwr_maperr,
1008 	TP_PROTO(
1009 		const struct rpcrdma_mr *mr,
1010 		int num_mapped
1011 	),
1012 
1013 	TP_ARGS(mr, num_mapped),
1014 
1015 	TP_STRUCT__entry(
1016 		__field(u32, mr_id)
1017 		__field(u64, addr)
1018 		__field(u32, dir)
1019 		__field(int, num_mapped)
1020 		__field(int, nents)
1021 	),
1022 
1023 	TP_fast_assign(
1024 		__entry->mr_id = mr->frwr.fr_mr->res.id;
1025 		__entry->addr = mr->mr_sg->dma_address;
1026 		__entry->dir = mr->mr_dir;
1027 		__entry->num_mapped = num_mapped;
1028 		__entry->nents = mr->mr_nents;
1029 	),
1030 
1031 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
1032 		__entry->mr_id, __entry->addr,
1033 		xprtrdma_show_direction(__entry->dir),
1034 		__entry->num_mapped, __entry->nents
1035 	)
1036 );
1037 
1038 DEFINE_MR_EVENT(localinv);
1039 DEFINE_MR_EVENT(map);
1040 DEFINE_MR_EVENT(unmap);
1041 DEFINE_MR_EVENT(reminv);
1042 DEFINE_MR_EVENT(recycle);
1043 
1044 TRACE_EVENT(xprtrdma_dma_maperr,
1045 	TP_PROTO(
1046 		u64 addr
1047 	),
1048 
1049 	TP_ARGS(addr),
1050 
1051 	TP_STRUCT__entry(
1052 		__field(u64, addr)
1053 	),
1054 
1055 	TP_fast_assign(
1056 		__entry->addr = addr;
1057 	),
1058 
1059 	TP_printk("dma addr=0x%llx\n", __entry->addr)
1060 );
1061 
1062 /**
1063  ** Reply events
1064  **/
1065 
1066 TRACE_EVENT(xprtrdma_reply,
1067 	TP_PROTO(
1068 		const struct rpc_task *task,
1069 		const struct rpcrdma_rep *rep,
1070 		const struct rpcrdma_req *req,
1071 		unsigned int credits
1072 	),
1073 
1074 	TP_ARGS(task, rep, req, credits),
1075 
1076 	TP_STRUCT__entry(
1077 		__field(unsigned int, task_id)
1078 		__field(unsigned int, client_id)
1079 		__field(const void *, rep)
1080 		__field(const void *, req)
1081 		__field(u32, xid)
1082 		__field(unsigned int, credits)
1083 	),
1084 
1085 	TP_fast_assign(
1086 		__entry->task_id = task->tk_pid;
1087 		__entry->client_id = task->tk_client->cl_clid;
1088 		__entry->rep = rep;
1089 		__entry->req = req;
1090 		__entry->xid = be32_to_cpu(rep->rr_xid);
1091 		__entry->credits = credits;
1092 	),
1093 
1094 	TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
1095 		__entry->task_id, __entry->client_id, __entry->xid,
1096 		__entry->credits, __entry->rep, __entry->req
1097 	)
1098 );
1099 
1100 TRACE_EVENT(xprtrdma_defer_cmp,
1101 	TP_PROTO(
1102 		const struct rpcrdma_rep *rep
1103 	),
1104 
1105 	TP_ARGS(rep),
1106 
1107 	TP_STRUCT__entry(
1108 		__field(unsigned int, task_id)
1109 		__field(unsigned int, client_id)
1110 		__field(const void *, rep)
1111 		__field(u32, xid)
1112 	),
1113 
1114 	TP_fast_assign(
1115 		__entry->task_id = rep->rr_rqst->rq_task->tk_pid;
1116 		__entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
1117 		__entry->rep = rep;
1118 		__entry->xid = be32_to_cpu(rep->rr_xid);
1119 	),
1120 
1121 	TP_printk("task:%u@%u xid=0x%08x rep=%p",
1122 		__entry->task_id, __entry->client_id, __entry->xid,
1123 		__entry->rep
1124 	)
1125 );
1126 
1127 DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
1128 DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
1129 DEFINE_REPLY_EVENT(xprtrdma_reply_short);
1130 DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
1131 
1132 TRACE_EVENT(xprtrdma_fixup,
1133 	TP_PROTO(
1134 		const struct rpc_rqst *rqst,
1135 		unsigned long fixup
1136 	),
1137 
1138 	TP_ARGS(rqst, fixup),
1139 
1140 	TP_STRUCT__entry(
1141 		__field(unsigned int, task_id)
1142 		__field(unsigned int, client_id)
1143 		__field(unsigned long, fixup)
1144 		__field(size_t, headlen)
1145 		__field(unsigned int, pagelen)
1146 		__field(size_t, taillen)
1147 	),
1148 
1149 	TP_fast_assign(
1150 		__entry->task_id = rqst->rq_task->tk_pid;
1151 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1152 		__entry->fixup = fixup;
1153 		__entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
1154 		__entry->pagelen = rqst->rq_rcv_buf.page_len;
1155 		__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
1156 	),
1157 
1158 	TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu",
1159 		__entry->task_id, __entry->client_id, __entry->fixup,
1160 		__entry->headlen, __entry->pagelen, __entry->taillen
1161 	)
1162 );
1163 
1164 TRACE_EVENT(xprtrdma_decode_seg,
1165 	TP_PROTO(
1166 		u32 handle,
1167 		u32 length,
1168 		u64 offset
1169 	),
1170 
1171 	TP_ARGS(handle, length, offset),
1172 
1173 	TP_STRUCT__entry(
1174 		__field(u32, handle)
1175 		__field(u32, length)
1176 		__field(u64, offset)
1177 	),
1178 
1179 	TP_fast_assign(
1180 		__entry->handle = handle;
1181 		__entry->length = length;
1182 		__entry->offset = offset;
1183 	),
1184 
1185 	TP_printk("%u@0x%016llx:0x%08x",
1186 		__entry->length, (unsigned long long)__entry->offset,
1187 		__entry->handle
1188 	)
1189 );
1190 
1191 /**
1192  ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
1193  **/
1194 
1195 TRACE_EVENT(xprtrdma_op_allocate,
1196 	TP_PROTO(
1197 		const struct rpc_task *task,
1198 		const struct rpcrdma_req *req
1199 	),
1200 
1201 	TP_ARGS(task, req),
1202 
1203 	TP_STRUCT__entry(
1204 		__field(unsigned int, task_id)
1205 		__field(unsigned int, client_id)
1206 		__field(const void *, req)
1207 		__field(size_t, callsize)
1208 		__field(size_t, rcvsize)
1209 	),
1210 
1211 	TP_fast_assign(
1212 		__entry->task_id = task->tk_pid;
1213 		__entry->client_id = task->tk_client->cl_clid;
1214 		__entry->req = req;
1215 		__entry->callsize = task->tk_rqstp->rq_callsize;
1216 		__entry->rcvsize = task->tk_rqstp->rq_rcvsize;
1217 	),
1218 
1219 	TP_printk("task:%u@%u req=%p (%zu, %zu)",
1220 		__entry->task_id, __entry->client_id,
1221 		__entry->req, __entry->callsize, __entry->rcvsize
1222 	)
1223 );
1224 
1225 TRACE_EVENT(xprtrdma_op_free,
1226 	TP_PROTO(
1227 		const struct rpc_task *task,
1228 		const struct rpcrdma_req *req
1229 	),
1230 
1231 	TP_ARGS(task, req),
1232 
1233 	TP_STRUCT__entry(
1234 		__field(unsigned int, task_id)
1235 		__field(unsigned int, client_id)
1236 		__field(const void *, req)
1237 		__field(const void *, rep)
1238 	),
1239 
1240 	TP_fast_assign(
1241 		__entry->task_id = task->tk_pid;
1242 		__entry->client_id = task->tk_client->cl_clid;
1243 		__entry->req = req;
1244 		__entry->rep = req->rl_reply;
1245 	),
1246 
1247 	TP_printk("task:%u@%u req=%p rep=%p",
1248 		__entry->task_id, __entry->client_id,
1249 		__entry->req, __entry->rep
1250 	)
1251 );
1252 
1253 /**
1254  ** Callback events
1255  **/
1256 
1257 TRACE_EVENT(xprtrdma_cb_setup,
1258 	TP_PROTO(
1259 		const struct rpcrdma_xprt *r_xprt,
1260 		unsigned int reqs
1261 	),
1262 
1263 	TP_ARGS(r_xprt, reqs),
1264 
1265 	TP_STRUCT__entry(
1266 		__field(const void *, r_xprt)
1267 		__field(unsigned int, reqs)
1268 		__string(addr, rpcrdma_addrstr(r_xprt))
1269 		__string(port, rpcrdma_portstr(r_xprt))
1270 	),
1271 
1272 	TP_fast_assign(
1273 		__entry->r_xprt = r_xprt;
1274 		__entry->reqs = reqs;
1275 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1276 		__assign_str(port, rpcrdma_portstr(r_xprt));
1277 	),
1278 
1279 	TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
1280 		__get_str(addr), __get_str(port),
1281 		__entry->r_xprt, __entry->reqs
1282 	)
1283 );
1284 
1285 DEFINE_CB_EVENT(xprtrdma_cb_call);
1286 DEFINE_CB_EVENT(xprtrdma_cb_reply);
1287 
1288 TRACE_EVENT(xprtrdma_leaked_rep,
1289 	TP_PROTO(
1290 		const struct rpc_rqst *rqst,
1291 		const struct rpcrdma_rep *rep
1292 	),
1293 
1294 	TP_ARGS(rqst, rep),
1295 
1296 	TP_STRUCT__entry(
1297 		__field(unsigned int, task_id)
1298 		__field(unsigned int, client_id)
1299 		__field(u32, xid)
1300 		__field(const void *, rep)
1301 	),
1302 
1303 	TP_fast_assign(
1304 		__entry->task_id = rqst->rq_task->tk_pid;
1305 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1306 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1307 		__entry->rep = rep;
1308 	),
1309 
1310 	TP_printk("task:%u@%u xid=0x%08x rep=%p",
1311 		__entry->task_id, __entry->client_id, __entry->xid,
1312 		__entry->rep
1313 	)
1314 );
1315 
1316 /**
1317  ** Server-side RPC/RDMA events
1318  **/
1319 
1320 DECLARE_EVENT_CLASS(svcrdma_accept_class,
1321 	TP_PROTO(
1322 		const struct svcxprt_rdma *rdma,
1323 		long status
1324 	),
1325 
1326 	TP_ARGS(rdma, status),
1327 
1328 	TP_STRUCT__entry(
1329 		__field(long, status)
1330 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1331 	),
1332 
1333 	TP_fast_assign(
1334 		__entry->status = status;
1335 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1336 	),
1337 
1338 	TP_printk("addr=%s status=%ld",
1339 		__get_str(addr), __entry->status
1340 	)
1341 );
1342 
1343 #define DEFINE_ACCEPT_EVENT(name) \
1344 		DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
1345 				TP_PROTO( \
1346 					const struct svcxprt_rdma *rdma, \
1347 					long status \
1348 				), \
1349 				TP_ARGS(rdma, status))
1350 
1351 DEFINE_ACCEPT_EVENT(pd);
1352 DEFINE_ACCEPT_EVENT(qp);
1353 DEFINE_ACCEPT_EVENT(fabric);
1354 DEFINE_ACCEPT_EVENT(initdepth);
1355 DEFINE_ACCEPT_EVENT(accept);
1356 
1357 TRACE_DEFINE_ENUM(RDMA_MSG);
1358 TRACE_DEFINE_ENUM(RDMA_NOMSG);
1359 TRACE_DEFINE_ENUM(RDMA_MSGP);
1360 TRACE_DEFINE_ENUM(RDMA_DONE);
1361 TRACE_DEFINE_ENUM(RDMA_ERROR);
1362 
1363 #define show_rpcrdma_proc(x)						\
1364 		__print_symbolic(x,					\
1365 				{ RDMA_MSG, "RDMA_MSG" },		\
1366 				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1367 				{ RDMA_MSGP, "RDMA_MSGP" },		\
1368 				{ RDMA_DONE, "RDMA_DONE" },		\
1369 				{ RDMA_ERROR, "RDMA_ERROR" })
1370 
1371 TRACE_EVENT(svcrdma_decode_rqst,
1372 	TP_PROTO(
1373 		const struct svc_rdma_recv_ctxt *ctxt,
1374 		__be32 *p,
1375 		unsigned int hdrlen
1376 	),
1377 
1378 	TP_ARGS(ctxt, p, hdrlen),
1379 
1380 	TP_STRUCT__entry(
1381 		__field(u32, cq_id)
1382 		__field(int, completion_id)
1383 		__field(u32, xid)
1384 		__field(u32, vers)
1385 		__field(u32, proc)
1386 		__field(u32, credits)
1387 		__field(unsigned int, hdrlen)
1388 	),
1389 
1390 	TP_fast_assign(
1391 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1392 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1393 		__entry->xid = be32_to_cpup(p++);
1394 		__entry->vers = be32_to_cpup(p++);
1395 		__entry->credits = be32_to_cpup(p++);
1396 		__entry->proc = be32_to_cpup(p);
1397 		__entry->hdrlen = hdrlen;
1398 	),
1399 
1400 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1401 		__entry->cq_id, __entry->completion_id,
1402 		__entry->xid, __entry->vers, __entry->credits,
1403 		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1404 );
1405 
1406 TRACE_EVENT(svcrdma_decode_short_err,
1407 	TP_PROTO(
1408 		const struct svc_rdma_recv_ctxt *ctxt,
1409 		unsigned int hdrlen
1410 	),
1411 
1412 	TP_ARGS(ctxt, hdrlen),
1413 
1414 	TP_STRUCT__entry(
1415 		__field(u32, cq_id)
1416 		__field(int, completion_id)
1417 		__field(unsigned int, hdrlen)
1418 	),
1419 
1420 	TP_fast_assign(
1421 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1422 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1423 		__entry->hdrlen = hdrlen;
1424 	),
1425 
1426 	TP_printk("cq.id=%u cid=%d hdrlen=%u",
1427 		__entry->cq_id, __entry->completion_id,
1428 		__entry->hdrlen)
1429 );
1430 
1431 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1432 	TP_PROTO(
1433 		const struct svc_rdma_recv_ctxt *ctxt,
1434 		__be32 *p
1435 	),
1436 
1437 	TP_ARGS(ctxt, p),
1438 
1439 	TP_STRUCT__entry(
1440 		__field(u32, cq_id)
1441 		__field(int, completion_id)
1442 		__field(u32, xid)
1443 		__field(u32, vers)
1444 		__field(u32, proc)
1445 		__field(u32, credits)
1446 	),
1447 
1448 	TP_fast_assign(
1449 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1450 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1451 		__entry->xid = be32_to_cpup(p++);
1452 		__entry->vers = be32_to_cpup(p++);
1453 		__entry->credits = be32_to_cpup(p++);
1454 		__entry->proc = be32_to_cpup(p);
1455 	),
1456 
1457 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
1458 		__entry->cq_id, __entry->completion_id,
1459 		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1460 );
1461 
1462 #define DEFINE_BADREQ_EVENT(name)					\
1463 		DEFINE_EVENT(svcrdma_badreq_event,			\
1464 			     svcrdma_decode_##name##_err,		\
1465 				TP_PROTO(				\
1466 					const struct svc_rdma_recv_ctxt *ctxt,	\
1467 					__be32 *p			\
1468 				),					\
1469 				TP_ARGS(ctxt, p))
1470 
1471 DEFINE_BADREQ_EVENT(badvers);
1472 DEFINE_BADREQ_EVENT(drop);
1473 DEFINE_BADREQ_EVENT(badproc);
1474 DEFINE_BADREQ_EVENT(parse);
1475 
1476 DECLARE_EVENT_CLASS(svcrdma_segment_event,
1477 	TP_PROTO(
1478 		u32 handle,
1479 		u32 length,
1480 		u64 offset
1481 	),
1482 
1483 	TP_ARGS(handle, length, offset),
1484 
1485 	TP_STRUCT__entry(
1486 		__field(u32, handle)
1487 		__field(u32, length)
1488 		__field(u64, offset)
1489 	),
1490 
1491 	TP_fast_assign(
1492 		__entry->handle = handle;
1493 		__entry->length = length;
1494 		__entry->offset = offset;
1495 	),
1496 
1497 	TP_printk("%u@0x%016llx:0x%08x",
1498 		__entry->length, (unsigned long long)__entry->offset,
1499 		__entry->handle
1500 	)
1501 );
1502 
1503 #define DEFINE_SEGMENT_EVENT(name)					\
1504 		DEFINE_EVENT(svcrdma_segment_event, svcrdma_##name,\
1505 				TP_PROTO(				\
1506 					u32 handle,			\
1507 					u32 length,			\
1508 					u64 offset			\
1509 				),					\
1510 				TP_ARGS(handle, length, offset))
1511 
1512 DEFINE_SEGMENT_EVENT(decode_wseg);
1513 DEFINE_SEGMENT_EVENT(encode_rseg);
1514 DEFINE_SEGMENT_EVENT(send_rseg);
1515 DEFINE_SEGMENT_EVENT(encode_wseg);
1516 DEFINE_SEGMENT_EVENT(send_wseg);
1517 
1518 DECLARE_EVENT_CLASS(svcrdma_chunk_event,
1519 	TP_PROTO(
1520 		u32 length
1521 	),
1522 
1523 	TP_ARGS(length),
1524 
1525 	TP_STRUCT__entry(
1526 		__field(u32, length)
1527 	),
1528 
1529 	TP_fast_assign(
1530 		__entry->length = length;
1531 	),
1532 
1533 	TP_printk("length=%u",
1534 		__entry->length
1535 	)
1536 );
1537 
1538 #define DEFINE_CHUNK_EVENT(name)					\
1539 		DEFINE_EVENT(svcrdma_chunk_event, svcrdma_##name,	\
1540 				TP_PROTO(				\
1541 					u32 length			\
1542 				),					\
1543 				TP_ARGS(length))
1544 
1545 DEFINE_CHUNK_EVENT(send_pzr);
1546 DEFINE_CHUNK_EVENT(encode_write_chunk);
1547 DEFINE_CHUNK_EVENT(send_write_chunk);
1548 DEFINE_CHUNK_EVENT(encode_read_chunk);
1549 DEFINE_CHUNK_EVENT(send_reply_chunk);
1550 
1551 TRACE_EVENT(svcrdma_send_read_chunk,
1552 	TP_PROTO(
1553 		u32 length,
1554 		u32 position
1555 	),
1556 
1557 	TP_ARGS(length, position),
1558 
1559 	TP_STRUCT__entry(
1560 		__field(u32, length)
1561 		__field(u32, position)
1562 	),
1563 
1564 	TP_fast_assign(
1565 		__entry->length = length;
1566 		__entry->position = position;
1567 	),
1568 
1569 	TP_printk("length=%u position=%u",
1570 		__entry->length, __entry->position
1571 	)
1572 );
1573 
1574 DECLARE_EVENT_CLASS(svcrdma_error_event,
1575 	TP_PROTO(
1576 		__be32 xid
1577 	),
1578 
1579 	TP_ARGS(xid),
1580 
1581 	TP_STRUCT__entry(
1582 		__field(u32, xid)
1583 	),
1584 
1585 	TP_fast_assign(
1586 		__entry->xid = be32_to_cpu(xid);
1587 	),
1588 
1589 	TP_printk("xid=0x%08x",
1590 		__entry->xid
1591 	)
1592 );
1593 
1594 #define DEFINE_ERROR_EVENT(name)					\
1595 		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1596 				TP_PROTO(				\
1597 					__be32 xid			\
1598 				),					\
1599 				TP_ARGS(xid))
1600 
1601 DEFINE_ERROR_EVENT(vers);
1602 DEFINE_ERROR_EVENT(chunk);
1603 
1604 /**
1605  ** Server-side RDMA API events
1606  **/
1607 
1608 DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
1609 	TP_PROTO(
1610 		const struct svcxprt_rdma *rdma,
1611 		u64 dma_addr,
1612 		u32 length
1613 	),
1614 
1615 	TP_ARGS(rdma, dma_addr, length),
1616 
1617 	TP_STRUCT__entry(
1618 		__field(u64, dma_addr)
1619 		__field(u32, length)
1620 		__string(device, rdma->sc_cm_id->device->name)
1621 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1622 	),
1623 
1624 	TP_fast_assign(
1625 		__entry->dma_addr = dma_addr;
1626 		__entry->length = length;
1627 		__assign_str(device, rdma->sc_cm_id->device->name);
1628 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1629 	),
1630 
1631 	TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
1632 		__get_str(addr), __get_str(device),
1633 		__entry->dma_addr, __entry->length
1634 	)
1635 );
1636 
1637 #define DEFINE_SVC_DMA_EVENT(name)					\
1638 		DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name,	\
1639 				TP_PROTO(				\
1640 					const struct svcxprt_rdma *rdma,\
1641 					u64 dma_addr,			\
1642 					u32 length			\
1643 				),					\
1644 				TP_ARGS(rdma, dma_addr, length))
1645 
1646 DEFINE_SVC_DMA_EVENT(dma_map_page);
1647 DEFINE_SVC_DMA_EVENT(dma_unmap_page);
1648 
1649 TRACE_EVENT(svcrdma_dma_map_rw_err,
1650 	TP_PROTO(
1651 		const struct svcxprt_rdma *rdma,
1652 		unsigned int nents,
1653 		int status
1654 	),
1655 
1656 	TP_ARGS(rdma, nents, status),
1657 
1658 	TP_STRUCT__entry(
1659 		__field(int, status)
1660 		__field(unsigned int, nents)
1661 		__string(device, rdma->sc_cm_id->device->name)
1662 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1663 	),
1664 
1665 	TP_fast_assign(
1666 		__entry->status = status;
1667 		__entry->nents = nents;
1668 		__assign_str(device, rdma->sc_cm_id->device->name);
1669 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1670 	),
1671 
1672 	TP_printk("addr=%s device=%s nents=%u status=%d",
1673 		__get_str(addr), __get_str(device), __entry->nents,
1674 		__entry->status
1675 	)
1676 );
1677 
1678 TRACE_EVENT(svcrdma_no_rwctx_err,
1679 	TP_PROTO(
1680 		const struct svcxprt_rdma *rdma,
1681 		unsigned int num_sges
1682 	),
1683 
1684 	TP_ARGS(rdma, num_sges),
1685 
1686 	TP_STRUCT__entry(
1687 		__field(unsigned int, num_sges)
1688 		__string(device, rdma->sc_cm_id->device->name)
1689 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1690 	),
1691 
1692 	TP_fast_assign(
1693 		__entry->num_sges = num_sges;
1694 		__assign_str(device, rdma->sc_cm_id->device->name);
1695 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1696 	),
1697 
1698 	TP_printk("addr=%s device=%s num_sges=%d",
1699 		__get_str(addr), __get_str(device), __entry->num_sges
1700 	)
1701 );
1702 
1703 TRACE_EVENT(svcrdma_page_overrun_err,
1704 	TP_PROTO(
1705 		const struct svcxprt_rdma *rdma,
1706 		const struct svc_rqst *rqst,
1707 		unsigned int pageno
1708 	),
1709 
1710 	TP_ARGS(rdma, rqst, pageno),
1711 
1712 	TP_STRUCT__entry(
1713 		__field(unsigned int, pageno)
1714 		__field(u32, xid)
1715 		__string(device, rdma->sc_cm_id->device->name)
1716 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1717 	),
1718 
1719 	TP_fast_assign(
1720 		__entry->pageno = pageno;
1721 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1722 		__assign_str(device, rdma->sc_cm_id->device->name);
1723 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1724 	),
1725 
1726 	TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
1727 		__get_str(device), __entry->xid, __entry->pageno
1728 	)
1729 );
1730 
1731 TRACE_EVENT(svcrdma_small_wrch_err,
1732 	TP_PROTO(
1733 		const struct svcxprt_rdma *rdma,
1734 		unsigned int remaining,
1735 		unsigned int seg_no,
1736 		unsigned int num_segs
1737 	),
1738 
1739 	TP_ARGS(rdma, remaining, seg_no, num_segs),
1740 
1741 	TP_STRUCT__entry(
1742 		__field(unsigned int, remaining)
1743 		__field(unsigned int, seg_no)
1744 		__field(unsigned int, num_segs)
1745 		__string(device, rdma->sc_cm_id->device->name)
1746 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1747 	),
1748 
1749 	TP_fast_assign(
1750 		__entry->remaining = remaining;
1751 		__entry->seg_no = seg_no;
1752 		__entry->num_segs = num_segs;
1753 		__assign_str(device, rdma->sc_cm_id->device->name);
1754 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1755 	),
1756 
1757 	TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
1758 		__get_str(addr), __get_str(device), __entry->remaining,
1759 		__entry->seg_no, __entry->num_segs
1760 	)
1761 );
1762 
1763 TRACE_EVENT(svcrdma_send_pullup,
1764 	TP_PROTO(
1765 		unsigned int len
1766 	),
1767 
1768 	TP_ARGS(len),
1769 
1770 	TP_STRUCT__entry(
1771 		__field(unsigned int, len)
1772 	),
1773 
1774 	TP_fast_assign(
1775 		__entry->len = len;
1776 	),
1777 
1778 	TP_printk("len=%u", __entry->len)
1779 );
1780 
1781 TRACE_EVENT(svcrdma_send_err,
1782 	TP_PROTO(
1783 		const struct svc_rqst *rqst,
1784 		int status
1785 	),
1786 
1787 	TP_ARGS(rqst, status),
1788 
1789 	TP_STRUCT__entry(
1790 		__field(int, status)
1791 		__field(u32, xid)
1792 		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1793 	),
1794 
1795 	TP_fast_assign(
1796 		__entry->status = status;
1797 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1798 		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1799 	),
1800 
1801 	TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
1802 		__entry->xid, __entry->status
1803 	)
1804 );
1805 
1806 TRACE_EVENT(svcrdma_post_send,
1807 	TP_PROTO(
1808 		const struct svc_rdma_send_ctxt *ctxt
1809 	),
1810 
1811 	TP_ARGS(ctxt),
1812 
1813 	TP_STRUCT__entry(
1814 		__field(u32, cq_id)
1815 		__field(int, completion_id)
1816 		__field(unsigned int, num_sge)
1817 		__field(u32, inv_rkey)
1818 	),
1819 
1820 	TP_fast_assign(
1821 		const struct ib_send_wr *wr = &ctxt->sc_send_wr;
1822 
1823 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1824 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1825 		__entry->num_sge = wr->num_sge;
1826 		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1827 					wr->ex.invalidate_rkey : 0;
1828 	),
1829 
1830 	TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
1831 		__entry->cq_id, __entry->completion_id,
1832 		__entry->num_sge, __entry->inv_rkey
1833 	)
1834 );
1835 
1836 DEFINE_COMPLETION_EVENT(svcrdma_wc_send);
1837 
1838 TRACE_EVENT(svcrdma_post_recv,
1839 	TP_PROTO(
1840 		const struct svc_rdma_recv_ctxt *ctxt
1841 	),
1842 
1843 	TP_ARGS(ctxt),
1844 
1845 	TP_STRUCT__entry(
1846 		__field(u32, cq_id)
1847 		__field(int, completion_id)
1848 	),
1849 
1850 	TP_fast_assign(
1851 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1852 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1853 	),
1854 
1855 	TP_printk("cq.id=%d cid=%d",
1856 		__entry->cq_id, __entry->completion_id
1857 	)
1858 );
1859 
1860 DEFINE_COMPLETION_EVENT(svcrdma_wc_receive);
1861 
1862 TRACE_EVENT(svcrdma_rq_post_err,
1863 	TP_PROTO(
1864 		const struct svcxprt_rdma *rdma,
1865 		int status
1866 	),
1867 
1868 	TP_ARGS(rdma, status),
1869 
1870 	TP_STRUCT__entry(
1871 		__field(int, status)
1872 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1873 	),
1874 
1875 	TP_fast_assign(
1876 		__entry->status = status;
1877 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1878 	),
1879 
1880 	TP_printk("addr=%s status=%d",
1881 		__get_str(addr), __entry->status
1882 	)
1883 );
1884 
1885 TRACE_EVENT(svcrdma_post_chunk,
1886 	TP_PROTO(
1887 		const struct rpc_rdma_cid *cid,
1888 		int sqecount
1889 	),
1890 
1891 	TP_ARGS(cid, sqecount),
1892 
1893 	TP_STRUCT__entry(
1894 		__field(u32, cq_id)
1895 		__field(int, completion_id)
1896 		__field(int, sqecount)
1897 	),
1898 
1899 	TP_fast_assign(
1900 		__entry->cq_id = cid->ci_queue_id;
1901 		__entry->completion_id = cid->ci_completion_id;
1902 		__entry->sqecount = sqecount;
1903 	),
1904 
1905 	TP_printk("cq.id=%u cid=%d sqecount=%d",
1906 		__entry->cq_id, __entry->completion_id,
1907 		__entry->sqecount
1908 	)
1909 );
1910 
1911 DEFINE_COMPLETION_EVENT(svcrdma_wc_read);
1912 DEFINE_COMPLETION_EVENT(svcrdma_wc_write);
1913 
1914 TRACE_EVENT(svcrdma_qp_error,
1915 	TP_PROTO(
1916 		const struct ib_event *event,
1917 		const struct sockaddr *sap
1918 	),
1919 
1920 	TP_ARGS(event, sap),
1921 
1922 	TP_STRUCT__entry(
1923 		__field(unsigned int, event)
1924 		__string(device, event->device->name)
1925 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1926 	),
1927 
1928 	TP_fast_assign(
1929 		__entry->event = event->event;
1930 		__assign_str(device, event->device->name);
1931 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1932 			 "%pISpc", sap);
1933 	),
1934 
1935 	TP_printk("addr=%s dev=%s event=%s (%u)",
1936 		__entry->addr, __get_str(device),
1937 		rdma_show_ib_event(__entry->event), __entry->event
1938 	)
1939 );
1940 
1941 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1942 	TP_PROTO(
1943 		const struct svcxprt_rdma *rdma
1944 	),
1945 
1946 	TP_ARGS(rdma),
1947 
1948 	TP_STRUCT__entry(
1949 		__field(int, avail)
1950 		__field(int, depth)
1951 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1952 	),
1953 
1954 	TP_fast_assign(
1955 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1956 		__entry->depth = rdma->sc_sq_depth;
1957 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1958 	),
1959 
1960 	TP_printk("addr=%s sc_sq_avail=%d/%d",
1961 		__get_str(addr), __entry->avail, __entry->depth
1962 	)
1963 );
1964 
1965 #define DEFINE_SQ_EVENT(name)						\
1966 		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1967 				TP_PROTO(				\
1968 					const struct svcxprt_rdma *rdma \
1969 				),					\
1970 				TP_ARGS(rdma))
1971 
1972 DEFINE_SQ_EVENT(full);
1973 DEFINE_SQ_EVENT(retry);
1974 
1975 TRACE_EVENT(svcrdma_sq_post_err,
1976 	TP_PROTO(
1977 		const struct svcxprt_rdma *rdma,
1978 		int status
1979 	),
1980 
1981 	TP_ARGS(rdma, status),
1982 
1983 	TP_STRUCT__entry(
1984 		__field(int, avail)
1985 		__field(int, depth)
1986 		__field(int, status)
1987 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1988 	),
1989 
1990 	TP_fast_assign(
1991 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1992 		__entry->depth = rdma->sc_sq_depth;
1993 		__entry->status = status;
1994 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1995 	),
1996 
1997 	TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
1998 		__get_str(addr), __entry->avail, __entry->depth,
1999 		__entry->status
2000 	)
2001 );
2002 
2003 #endif /* _TRACE_RPCRDMA_H */
2004 
2005 #include <trace/define_trace.h>
2006