xref: /linux/include/trace/events/ib_mad.h (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 
3 /*
4  * Copyright (c) 2018 Intel Corporation.  All rights reserved.
5  */
6 
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM ib_mad
9 
10 #if !defined(_TRACE_IB_MAD_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_IB_MAD_H
12 
13 #include <linux/tracepoint.h>
14 #include <rdma/ib_mad.h>
15 
16 #ifdef CONFIG_TRACEPOINTS
17 struct trace_event_raw_ib_mad_send_template;
18 static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr,
19 			  struct ib_mad_qp_info *qp_info,
20 			  struct trace_event_raw_ib_mad_send_template *entry);
21 #endif
22 
23 DECLARE_EVENT_CLASS(ib_mad_send_template,
24 	TP_PROTO(struct ib_mad_send_wr_private *wr,
25 		 struct ib_mad_qp_info *qp_info),
26 	TP_ARGS(wr, qp_info),
27 
28 	TP_STRUCT__entry(
29 		__field(u8,             base_version)
30 		__field(u8,             mgmt_class)
31 		__field(u8,             class_version)
32 		__field(u8,             port_num)
33 		__field(u32,            qp_num)
34 		__field(u8,             method)
35 		__field(u8,             sl)
36 		__field(u16,            attr_id)
37 		__field(u32,            attr_mod)
38 		__field(u64,            wrtid)
39 		__field(u64,            tid)
40 		__field(u16,            status)
41 		__field(u16,            class_specific)
42 		__field(u32,            length)
43 		__field(u32,            dlid)
44 		__field(u32,            rqpn)
45 		__field(u32,            rqkey)
46 		__field(u32,            dev_index)
47 		__field(void *,         agent_priv)
48 		__field(unsigned long,  timeout)
49 		__field(int,            retries_left)
50 		__field(int,            max_retries)
51 		__field(int,            retry)
52 	),
53 
54 	TP_fast_assign(
55 		__entry->dev_index = wr->mad_agent_priv->agent.device->index;
56 		__entry->port_num = wr->mad_agent_priv->agent.port_num;
57 		__entry->qp_num = wr->mad_agent_priv->qp_info->qp->qp_num;
58 		__entry->agent_priv = wr->mad_agent_priv;
59 		__entry->wrtid = wr->tid;
60 		__entry->max_retries = wr->max_retries;
61 		__entry->retries_left = wr->retries_left;
62 		__entry->retry = wr->retry;
63 		__entry->timeout = wr->timeout;
64 		__entry->length = wr->send_buf.hdr_len +
65 				  wr->send_buf.data_len;
66 		__entry->base_version =
67 			((struct ib_mad_hdr *)wr->send_buf.mad)->base_version;
68 		__entry->mgmt_class =
69 			((struct ib_mad_hdr *)wr->send_buf.mad)->mgmt_class;
70 		__entry->class_version =
71 			((struct ib_mad_hdr *)wr->send_buf.mad)->class_version;
72 		__entry->method =
73 			((struct ib_mad_hdr *)wr->send_buf.mad)->method;
74 		__entry->status =
75 			((struct ib_mad_hdr *)wr->send_buf.mad)->status;
76 		__entry->class_specific =
77 			((struct ib_mad_hdr *)wr->send_buf.mad)->class_specific;
78 		__entry->tid = ((struct ib_mad_hdr *)wr->send_buf.mad)->tid;
79 		__entry->attr_id =
80 			((struct ib_mad_hdr *)wr->send_buf.mad)->attr_id;
81 		__entry->attr_mod =
82 			((struct ib_mad_hdr *)wr->send_buf.mad)->attr_mod;
83 		create_mad_addr_info(wr, qp_info, __entry);
84 	),
85 
86 	TP_printk("%d:%d QP%d agent %p: " \
87 		  "wrtid 0x%llx; %d/%d retries(%d); timeout %lu length %d : " \
88 		  "hdr : base_ver 0x%x class 0x%x class_ver 0x%x " \
89 		  "method 0x%x status 0x%x class_specific 0x%x tid 0x%llx " \
90 		  "attr_id 0x%x attr_mod 0x%x  => dlid 0x%08x sl %d "\
91 		  "rpqn 0x%x rqpkey 0x%x",
92 		__entry->dev_index, __entry->port_num, __entry->qp_num,
93 		__entry->agent_priv, be64_to_cpu(__entry->wrtid),
94 		__entry->retries_left, __entry->max_retries,
95 		__entry->retry, __entry->timeout, __entry->length,
96 		__entry->base_version, __entry->mgmt_class,
97 		__entry->class_version,
98 		__entry->method, be16_to_cpu(__entry->status),
99 		be16_to_cpu(__entry->class_specific),
100 		be64_to_cpu(__entry->tid), be16_to_cpu(__entry->attr_id),
101 		be32_to_cpu(__entry->attr_mod),
102 		be32_to_cpu(__entry->dlid), __entry->sl,
103 		__entry->rqpn, __entry->rqkey
104 	)
105 );
106 
107 DEFINE_EVENT(ib_mad_send_template, ib_mad_error_handler,
108 	TP_PROTO(struct ib_mad_send_wr_private *wr,
109 		 struct ib_mad_qp_info *qp_info),
110 	TP_ARGS(wr, qp_info));
111 DEFINE_EVENT(ib_mad_send_template, ib_mad_ib_send_mad,
112 	TP_PROTO(struct ib_mad_send_wr_private *wr,
113 		 struct ib_mad_qp_info *qp_info),
114 	TP_ARGS(wr, qp_info));
115 DEFINE_EVENT(ib_mad_send_template, ib_mad_send_done_resend,
116 	TP_PROTO(struct ib_mad_send_wr_private *wr,
117 		 struct ib_mad_qp_info *qp_info),
118 	TP_ARGS(wr, qp_info));
119 
120 TRACE_EVENT(ib_mad_send_done_handler,
121 	TP_PROTO(struct ib_mad_send_wr_private *wr, struct ib_wc *wc),
122 	TP_ARGS(wr, wc),
123 
124 	TP_STRUCT__entry(
125 		__field(u8,             port_num)
126 		__field(u8,             base_version)
127 		__field(u8,             mgmt_class)
128 		__field(u8,             class_version)
129 		__field(u32,            qp_num)
130 		__field(u64,            wrtid)
131 		__field(u16,            status)
132 		__field(u16,            wc_status)
133 		__field(u32,            length)
134 		__field(void *,         agent_priv)
135 		__field(unsigned long,  timeout)
136 		__field(u32,            dev_index)
137 		__field(int,            retries_left)
138 		__field(int,            max_retries)
139 		__field(int,            retry)
140 		__field(u8,             method)
141 	),
142 
143 	TP_fast_assign(
144 		__entry->dev_index = wr->mad_agent_priv->agent.device->index;
145 		__entry->port_num = wr->mad_agent_priv->agent.port_num;
146 		__entry->qp_num = wr->mad_agent_priv->qp_info->qp->qp_num;
147 		__entry->agent_priv = wr->mad_agent_priv;
148 		__entry->wrtid = wr->tid;
149 		__entry->max_retries = wr->max_retries;
150 		__entry->retries_left = wr->retries_left;
151 		__entry->retry = wr->retry;
152 		__entry->timeout = wr->timeout;
153 		__entry->base_version =
154 			((struct ib_mad_hdr *)wr->send_buf.mad)->base_version;
155 		__entry->mgmt_class =
156 			((struct ib_mad_hdr *)wr->send_buf.mad)->mgmt_class;
157 		__entry->class_version =
158 			((struct ib_mad_hdr *)wr->send_buf.mad)->class_version;
159 		__entry->method =
160 			((struct ib_mad_hdr *)wr->send_buf.mad)->method;
161 		__entry->status =
162 			((struct ib_mad_hdr *)wr->send_buf.mad)->status;
163 		__entry->wc_status = wc->status;
164 		__entry->length = wc->byte_len;
165 	),
166 
167 	TP_printk("%d:%d QP%d : SEND WC Status %d : agent %p: " \
168 		  "wrtid 0x%llx %d/%d retries(%d) timeout %lu length %d: " \
169 		  "hdr : base_ver 0x%x class 0x%x class_ver 0x%x " \
170 		  "method 0x%x status 0x%x",
171 		__entry->dev_index, __entry->port_num, __entry->qp_num,
172 		__entry->wc_status,
173 		__entry->agent_priv, be64_to_cpu(__entry->wrtid),
174 		__entry->retries_left, __entry->max_retries,
175 		__entry->retry, __entry->timeout,
176 		__entry->length,
177 		__entry->base_version, __entry->mgmt_class,
178 		__entry->class_version, __entry->method,
179 		be16_to_cpu(__entry->status)
180 	)
181 );
182 
183 TRACE_EVENT(ib_mad_recv_done_handler,
184 	TP_PROTO(struct ib_mad_qp_info *qp_info, struct ib_wc *wc,
185 		 struct ib_mad_hdr *mad_hdr),
186 	TP_ARGS(qp_info, wc, mad_hdr),
187 
188 	TP_STRUCT__entry(
189 		__field(u8,             base_version)
190 		__field(u8,             mgmt_class)
191 		__field(u8,             class_version)
192 		__field(u8,             port_num)
193 		__field(u32,            qp_num)
194 		__field(u16,            status)
195 		__field(u16,            class_specific)
196 		__field(u32,            length)
197 		__field(u64,            tid)
198 		__field(u8,             method)
199 		__field(u8,             sl)
200 		__field(u16,            attr_id)
201 		__field(u32,            attr_mod)
202 		__field(u16,            src_qp)
203 		__field(u16,            wc_status)
204 		__field(u32,            slid)
205 		__field(u32,            dev_index)
206 	),
207 
208 	TP_fast_assign(
209 		__entry->dev_index = qp_info->port_priv->device->index;
210 		__entry->port_num = qp_info->port_priv->port_num;
211 		__entry->qp_num = qp_info->qp->qp_num;
212 		__entry->length = wc->byte_len;
213 		__entry->base_version = mad_hdr->base_version;
214 		__entry->mgmt_class = mad_hdr->mgmt_class;
215 		__entry->class_version = mad_hdr->class_version;
216 		__entry->method = mad_hdr->method;
217 		__entry->status = mad_hdr->status;
218 		__entry->class_specific = mad_hdr->class_specific;
219 		__entry->tid = mad_hdr->tid;
220 		__entry->attr_id = mad_hdr->attr_id;
221 		__entry->attr_mod = mad_hdr->attr_mod;
222 		__entry->slid = wc->slid;
223 		__entry->src_qp = wc->src_qp;
224 		__entry->sl = wc->sl;
225 		__entry->wc_status = wc->status;
226 	),
227 
228 	TP_printk("%d:%d QP%d : RECV WC Status %d : length %d : hdr : " \
229 		  "base_ver 0x%02x class 0x%02x class_ver 0x%02x " \
230 		  "method 0x%02x status 0x%04x class_specific 0x%04x " \
231 		  "tid 0x%016llx attr_id 0x%04x attr_mod 0x%08x " \
232 		  "slid 0x%08x src QP%d, sl %d",
233 		__entry->dev_index, __entry->port_num, __entry->qp_num,
234 		__entry->wc_status,
235 		__entry->length,
236 		__entry->base_version, __entry->mgmt_class,
237 		__entry->class_version, __entry->method,
238 		be16_to_cpu(__entry->status),
239 		be16_to_cpu(__entry->class_specific),
240 		be64_to_cpu(__entry->tid), be16_to_cpu(__entry->attr_id),
241 		be32_to_cpu(__entry->attr_mod),
242 		__entry->slid, __entry->src_qp, __entry->sl
243 	)
244 );
245 
246 DECLARE_EVENT_CLASS(ib_mad_agent_template,
247 	TP_PROTO(struct ib_mad_agent_private *agent),
248 	TP_ARGS(agent),
249 
250 	TP_STRUCT__entry(
251 		__field(u32,            dev_index)
252 		__field(u32,            hi_tid)
253 		__field(u8,             port_num)
254 		__field(u8,             mgmt_class)
255 		__field(u8,             mgmt_class_version)
256 	),
257 
258 	TP_fast_assign(
259 		__entry->dev_index = agent->agent.device->index;
260 		__entry->port_num = agent->agent.port_num;
261 		__entry->hi_tid = agent->agent.hi_tid;
262 
263 		if (agent->reg_req) {
264 			__entry->mgmt_class = agent->reg_req->mgmt_class;
265 			__entry->mgmt_class_version =
266 				agent->reg_req->mgmt_class_version;
267 		} else {
268 			__entry->mgmt_class = 0;
269 			__entry->mgmt_class_version = 0;
270 		}
271 	),
272 
273 	TP_printk("%d:%d mad agent : hi_tid 0x%08x class 0x%02x class_ver 0x%02x",
274 		__entry->dev_index, __entry->port_num,
275 		__entry->hi_tid, __entry->mgmt_class,
276 		__entry->mgmt_class_version
277 	)
278 );
279 DEFINE_EVENT(ib_mad_agent_template, ib_mad_recv_done_agent,
280 	TP_PROTO(struct ib_mad_agent_private *agent),
281 	TP_ARGS(agent));
282 DEFINE_EVENT(ib_mad_agent_template, ib_mad_send_done_agent,
283 	TP_PROTO(struct ib_mad_agent_private *agent),
284 	TP_ARGS(agent));
285 DEFINE_EVENT(ib_mad_agent_template, ib_mad_create_agent,
286 	TP_PROTO(struct ib_mad_agent_private *agent),
287 	TP_ARGS(agent));
288 DEFINE_EVENT(ib_mad_agent_template, ib_mad_unregister_agent,
289 	TP_PROTO(struct ib_mad_agent_private *agent),
290 	TP_ARGS(agent));
291 
292 
293 
294 DECLARE_EVENT_CLASS(ib_mad_opa_smi_template,
295 	TP_PROTO(struct opa_smp *smp),
296 	TP_ARGS(smp),
297 
298 	TP_STRUCT__entry(
299 		__field(u64,            mkey)
300 		__field(u32,            dr_slid)
301 		__field(u32,            dr_dlid)
302 		__field(u8,             hop_ptr)
303 		__field(u8,             hop_cnt)
304 		__array(u8,             initial_path, OPA_SMP_MAX_PATH_HOPS)
305 		__array(u8,             return_path, OPA_SMP_MAX_PATH_HOPS)
306 	),
307 
308 	TP_fast_assign(
309 		__entry->hop_ptr = smp->hop_ptr;
310 		__entry->hop_cnt = smp->hop_cnt;
311 		__entry->mkey = smp->mkey;
312 		__entry->dr_slid = smp->route.dr.dr_slid;
313 		__entry->dr_dlid = smp->route.dr.dr_dlid;
314 		memcpy(__entry->initial_path, smp->route.dr.initial_path,
315 			OPA_SMP_MAX_PATH_HOPS);
316 		memcpy(__entry->return_path, smp->route.dr.return_path,
317 			OPA_SMP_MAX_PATH_HOPS);
318 	),
319 
320 	TP_printk("OPA SMP: hop_ptr %d hop_cnt %d " \
321 		  "mkey 0x%016llx dr_slid 0x%08x dr_dlid 0x%08x " \
322 		  "initial_path %*ph return_path %*ph ",
323 		__entry->hop_ptr, __entry->hop_cnt,
324 		be64_to_cpu(__entry->mkey), be32_to_cpu(__entry->dr_slid),
325 		be32_to_cpu(__entry->dr_dlid),
326 		OPA_SMP_MAX_PATH_HOPS, __entry->initial_path,
327 		OPA_SMP_MAX_PATH_HOPS, __entry->return_path
328 	)
329 );
330 
331 DEFINE_EVENT(ib_mad_opa_smi_template, ib_mad_handle_opa_smi,
332 	TP_PROTO(struct opa_smp *smp),
333 	TP_ARGS(smp));
334 DEFINE_EVENT(ib_mad_opa_smi_template, ib_mad_handle_out_opa_smi,
335 	TP_PROTO(struct opa_smp *smp),
336 	TP_ARGS(smp));
337 
338 
339 DECLARE_EVENT_CLASS(ib_mad_opa_ib_template,
340 	TP_PROTO(struct ib_smp *smp),
341 	TP_ARGS(smp),
342 
343 	TP_STRUCT__entry(
344 		__field(u64,            mkey)
345 		__field(u32,            dr_slid)
346 		__field(u32,            dr_dlid)
347 		__field(u8,             hop_ptr)
348 		__field(u8,             hop_cnt)
349 		__array(u8,             initial_path, IB_SMP_MAX_PATH_HOPS)
350 		__array(u8,             return_path, IB_SMP_MAX_PATH_HOPS)
351 	),
352 
353 	TP_fast_assign(
354 		__entry->hop_ptr = smp->hop_ptr;
355 		__entry->hop_cnt = smp->hop_cnt;
356 		__entry->mkey = smp->mkey;
357 		__entry->dr_slid = smp->dr_slid;
358 		__entry->dr_dlid = smp->dr_dlid;
359 		memcpy(__entry->initial_path, smp->initial_path,
360 			IB_SMP_MAX_PATH_HOPS);
361 		memcpy(__entry->return_path, smp->return_path,
362 			IB_SMP_MAX_PATH_HOPS);
363 	),
364 
365 	TP_printk("OPA SMP: hop_ptr %d hop_cnt %d " \
366 		  "mkey 0x%016llx dr_slid 0x%04x dr_dlid 0x%04x " \
367 		  "initial_path %*ph return_path %*ph ",
368 		__entry->hop_ptr, __entry->hop_cnt,
369 		be64_to_cpu(__entry->mkey), be16_to_cpu(__entry->dr_slid),
370 		be16_to_cpu(__entry->dr_dlid),
371 		IB_SMP_MAX_PATH_HOPS, __entry->initial_path,
372 		IB_SMP_MAX_PATH_HOPS, __entry->return_path
373 	)
374 );
375 
376 DEFINE_EVENT(ib_mad_opa_ib_template, ib_mad_handle_ib_smi,
377 	TP_PROTO(struct ib_smp *smp),
378 	TP_ARGS(smp));
379 DEFINE_EVENT(ib_mad_opa_ib_template, ib_mad_handle_out_ib_smi,
380 	TP_PROTO(struct ib_smp *smp),
381 	TP_ARGS(smp));
382 
383 #endif /* _TRACE_IB_MAD_H */
384 
385 #include <trace/define_trace.h>
386