xref: /linux/include/trace/events/dma.h (revision dfecb0c5af3b07ebfa84be63a7a21bfc9e29a872)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM dma
4 
5 #if !defined(_TRACE_DMA_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_DMA_H
7 
8 #include <linux/tracepoint.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <trace/events/mmflags.h>
12 
13 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
14 TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
15 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
16 TRACE_DEFINE_ENUM(DMA_NONE);
17 
18 #define decode_dma_data_direction(dir) \
19 	__print_symbolic(dir, \
20 		{ DMA_BIDIRECTIONAL, "BIDIRECTIONAL" }, \
21 		{ DMA_TO_DEVICE, "TO_DEVICE" }, \
22 		{ DMA_FROM_DEVICE, "FROM_DEVICE" }, \
23 		{ DMA_NONE, "NONE" })
24 
25 #define decode_dma_attrs(attrs) \
26 	__print_flags(attrs, "|", \
27 		{ DMA_ATTR_WEAK_ORDERING, "WEAK_ORDERING" }, \
28 		{ DMA_ATTR_WRITE_COMBINE, "WRITE_COMBINE" }, \
29 		{ DMA_ATTR_NO_KERNEL_MAPPING, "NO_KERNEL_MAPPING" }, \
30 		{ DMA_ATTR_SKIP_CPU_SYNC, "SKIP_CPU_SYNC" }, \
31 		{ DMA_ATTR_FORCE_CONTIGUOUS, "FORCE_CONTIGUOUS" }, \
32 		{ DMA_ATTR_ALLOC_SINGLE_PAGES, "ALLOC_SINGLE_PAGES" }, \
33 		{ DMA_ATTR_NO_WARN, "NO_WARN" }, \
34 		{ DMA_ATTR_PRIVILEGED, "PRIVILEGED" }, \
35 		{ DMA_ATTR_MMIO, "MMIO" }, \
36 		{ DMA_ATTR_DEBUGGING_IGNORE_CACHELINES, "CACHELINES_OVERLAP" }, \
37 		{ DMA_ATTR_REQUIRE_COHERENT, "REQUIRE_COHERENT" })
38 
39 DECLARE_EVENT_CLASS(dma_map,
40 	TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr,
41 		 size_t size, enum dma_data_direction dir, unsigned long attrs),
42 	TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs),
43 
44 	TP_STRUCT__entry(
45 		__string(device, dev_name(dev))
46 		__field(u64, phys_addr)
47 		__field(u64, dma_addr)
48 		__field(size_t, size)
49 		__field(enum dma_data_direction, dir)
50 		__field(unsigned long, attrs)
51 	),
52 
53 	TP_fast_assign(
54 		__assign_str(device);
55 		__entry->phys_addr = phys_addr;
56 		__entry->dma_addr = dma_addr;
57 		__entry->size = size;
58 		__entry->dir = dir;
59 		__entry->attrs = attrs;
60 	),
61 
62 	TP_printk("%s dir=%s dma_addr=%llx size=%zu phys_addr=%llx attrs=%s",
63 		__get_str(device),
64 		decode_dma_data_direction(__entry->dir),
65 		__entry->dma_addr,
66 		__entry->size,
67 		__entry->phys_addr,
68 		decode_dma_attrs(__entry->attrs))
69 );
70 
71 #define DEFINE_MAP_EVENT(name) \
72 DEFINE_EVENT(dma_map, name, \
73 	TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr, \
74 		 size_t size, enum dma_data_direction dir, unsigned long attrs), \
75 	TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs))
76 
77 DEFINE_MAP_EVENT(dma_map_phys);
78 
79 DECLARE_EVENT_CLASS(dma_unmap,
80 	TP_PROTO(struct device *dev, dma_addr_t addr, size_t size,
81 		 enum dma_data_direction dir, unsigned long attrs),
82 	TP_ARGS(dev, addr, size, dir, attrs),
83 
84 	TP_STRUCT__entry(
85 		__string(device, dev_name(dev))
86 		__field(u64, addr)
87 		__field(size_t, size)
88 		__field(enum dma_data_direction, dir)
89 		__field(unsigned long, attrs)
90 	),
91 
92 	TP_fast_assign(
93 		__assign_str(device);
94 		__entry->addr = addr;
95 		__entry->size = size;
96 		__entry->dir = dir;
97 		__entry->attrs = attrs;
98 	),
99 
100 	TP_printk("%s dir=%s dma_addr=%llx size=%zu attrs=%s",
101 		__get_str(device),
102 		decode_dma_data_direction(__entry->dir),
103 		__entry->addr,
104 		__entry->size,
105 		decode_dma_attrs(__entry->attrs))
106 );
107 
108 #define DEFINE_UNMAP_EVENT(name) \
109 DEFINE_EVENT(dma_unmap, name, \
110 	TP_PROTO(struct device *dev, dma_addr_t addr, size_t size, \
111 		 enum dma_data_direction dir, unsigned long attrs), \
112 	TP_ARGS(dev, addr, size, dir, attrs))
113 
114 DEFINE_UNMAP_EVENT(dma_unmap_phys);
115 
116 DECLARE_EVENT_CLASS(dma_alloc_class,
117 	TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
118 		 size_t size, enum dma_data_direction dir, gfp_t flags,
119 		 unsigned long attrs),
120 	TP_ARGS(dev, virt_addr, dma_addr, size, dir, flags, attrs),
121 
122 	TP_STRUCT__entry(
123 		__string(device, dev_name(dev))
124 		__field(void *, virt_addr)
125 		__field(u64, dma_addr)
126 		__field(size_t, size)
127 		__field(gfp_t, flags)
128 		__field(enum dma_data_direction, dir)
129 		__field(unsigned long, attrs)
130 	),
131 
132 	TP_fast_assign(
133 		__assign_str(device);
134 		__entry->virt_addr = virt_addr;
135 		__entry->dma_addr = dma_addr;
136 		__entry->size = size;
137 		__entry->flags = flags;
138 		__entry->dir = dir;
139 		__entry->attrs = attrs;
140 	),
141 
142 	TP_printk("%s dir=%s dma_addr=%llx size=%zu virt_addr=%p flags=%s attrs=%s",
143 		__get_str(device),
144 		decode_dma_data_direction(__entry->dir),
145 		__entry->dma_addr,
146 		__entry->size,
147 		__entry->virt_addr,
148 		show_gfp_flags(__entry->flags),
149 		decode_dma_attrs(__entry->attrs))
150 );
151 
152 #define DEFINE_ALLOC_EVENT(name) \
153 DEFINE_EVENT(dma_alloc_class, name, \
154 	TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr, \
155 		 size_t size, enum dma_data_direction dir, gfp_t flags, \
156 		 unsigned long attrs), \
157 	TP_ARGS(dev, virt_addr, dma_addr, size, dir, flags, attrs))
158 
159 DEFINE_ALLOC_EVENT(dma_alloc);
160 DEFINE_ALLOC_EVENT(dma_alloc_pages);
161 DEFINE_ALLOC_EVENT(dma_alloc_sgt_err);
162 
163 TRACE_EVENT(dma_alloc_sgt,
164 	TP_PROTO(struct device *dev, struct sg_table *sgt, size_t size,
165 		 enum dma_data_direction dir, gfp_t flags, unsigned long attrs),
166 	TP_ARGS(dev, sgt, size, dir, flags, attrs),
167 
168 	TP_STRUCT__entry(
169 		__string(device, dev_name(dev))
170 		__dynamic_array(u64, phys_addrs, sgt->orig_nents)
171 		__field(u64, dma_addr)
172 		__field(size_t, size)
173 		__field(enum dma_data_direction, dir)
174 		__field(gfp_t, flags)
175 		__field(unsigned long, attrs)
176 	),
177 
178 	TP_fast_assign(
179 		struct scatterlist *sg;
180 		int i;
181 
182 		__assign_str(device);
183 		for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
184 			((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
185 		__entry->dma_addr = sg_dma_address(sgt->sgl);
186 		__entry->size = size;
187 		__entry->dir = dir;
188 		__entry->flags = flags;
189 		__entry->attrs = attrs;
190 	),
191 
192 	TP_printk("%s dir=%s dma_addr=%llx size=%zu phys_addrs=%s flags=%s attrs=%s",
193 		__get_str(device),
194 		decode_dma_data_direction(__entry->dir),
195 		__entry->dma_addr,
196 		__entry->size,
197 		__print_array(__get_dynamic_array(phys_addrs),
198 			      __get_dynamic_array_len(phys_addrs) /
199 				sizeof(u64), sizeof(u64)),
200 		show_gfp_flags(__entry->flags),
201 		decode_dma_attrs(__entry->attrs))
202 );
203 
204 DECLARE_EVENT_CLASS(dma_free_class,
205 	TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
206 		 size_t size, enum dma_data_direction dir, unsigned long attrs),
207 	TP_ARGS(dev, virt_addr, dma_addr, size, dir, attrs),
208 
209 	TP_STRUCT__entry(
210 		__string(device, dev_name(dev))
211 		__field(void *, virt_addr)
212 		__field(u64, dma_addr)
213 		__field(size_t, size)
214 		__field(enum dma_data_direction, dir)
215 		__field(unsigned long, attrs)
216 	),
217 
218 	TP_fast_assign(
219 		__assign_str(device);
220 		__entry->virt_addr = virt_addr;
221 		__entry->dma_addr = dma_addr;
222 		__entry->size = size;
223 		__entry->dir = dir;
224 		__entry->attrs = attrs;
225 	),
226 
227 	TP_printk("%s dir=%s dma_addr=%llx size=%zu virt_addr=%p attrs=%s",
228 		__get_str(device),
229 		decode_dma_data_direction(__entry->dir),
230 		__entry->dma_addr,
231 		__entry->size,
232 		__entry->virt_addr,
233 		decode_dma_attrs(__entry->attrs))
234 );
235 
236 #define DEFINE_FREE_EVENT(name) \
237 DEFINE_EVENT(dma_free_class, name, \
238 	TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr, \
239 		 size_t size, enum dma_data_direction dir, unsigned long attrs), \
240 	TP_ARGS(dev, virt_addr, dma_addr, size, dir, attrs))
241 
242 DEFINE_FREE_EVENT(dma_free);
243 DEFINE_FREE_EVENT(dma_free_pages);
244 
245 TRACE_EVENT(dma_free_sgt,
246 	TP_PROTO(struct device *dev, struct sg_table *sgt, size_t size,
247 		 enum dma_data_direction dir),
248 	TP_ARGS(dev, sgt, size, dir),
249 
250 	TP_STRUCT__entry(
251 		__string(device, dev_name(dev))
252 		__dynamic_array(u64, phys_addrs, sgt->orig_nents)
253 		__field(u64, dma_addr)
254 		__field(size_t, size)
255 		__field(enum dma_data_direction, dir)
256 	),
257 
258 	TP_fast_assign(
259 		struct scatterlist *sg;
260 		int i;
261 
262 		__assign_str(device);
263 		for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
264 			((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
265 		__entry->dma_addr = sg_dma_address(sgt->sgl);
266 		__entry->size = size;
267 		__entry->dir = dir;
268 	),
269 
270 	TP_printk("%s dir=%s dma_addr=%llx size=%zu phys_addrs=%s",
271 		__get_str(device),
272 		decode_dma_data_direction(__entry->dir),
273 		__entry->dma_addr,
274 		__entry->size,
275 		__print_array(__get_dynamic_array(phys_addrs),
276 			      __get_dynamic_array_len(phys_addrs) /
277 				sizeof(u64), sizeof(u64)))
278 );
279 
280 #define DMA_TRACE_MAX_ENTRIES 128
281 
282 TRACE_EVENT(dma_map_sg,
283 	TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
284 		 int ents, enum dma_data_direction dir, unsigned long attrs),
285 	TP_ARGS(dev, sgl, nents, ents, dir, attrs),
286 
287 	TP_STRUCT__entry(
288 		__string(device, dev_name(dev))
289 		__field(int, full_nents)
290 		__field(int, full_ents)
291 		__field(bool, truncated)
292 		__dynamic_array(u64, phys_addrs,  min(nents, DMA_TRACE_MAX_ENTRIES))
293 		__dynamic_array(u64, dma_addrs, min(ents, DMA_TRACE_MAX_ENTRIES))
294 		__dynamic_array(unsigned int, lengths, min(ents, DMA_TRACE_MAX_ENTRIES))
295 		__field(enum dma_data_direction, dir)
296 		__field(unsigned long, attrs)
297 	),
298 
299 	TP_fast_assign(
300 		struct scatterlist *sg;
301 		int i;
302 		int traced_nents = min_t(int, nents, DMA_TRACE_MAX_ENTRIES);
303 		int traced_ents = min_t(int, ents, DMA_TRACE_MAX_ENTRIES);
304 
305 		__assign_str(device);
306 		__entry->full_nents = nents;
307 		__entry->full_ents = ents;
308 		__entry->truncated = (nents > DMA_TRACE_MAX_ENTRIES) || (ents > DMA_TRACE_MAX_ENTRIES);
309 		for_each_sg(sgl, sg, traced_nents, i)
310 			((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
311 		for_each_sg(sgl, sg, traced_ents, i) {
312 			((u64 *)__get_dynamic_array(dma_addrs))[i] =
313 				sg_dma_address(sg);
314 			((unsigned int *)__get_dynamic_array(lengths))[i] =
315 				sg_dma_len(sg);
316 		}
317 		__entry->dir = dir;
318 		__entry->attrs = attrs;
319 	),
320 
321 	TP_printk("%s dir=%s nents=%d/%d ents=%d/%d%s dma_addrs=%s sizes=%s phys_addrs=%s attrs=%s",
322 		__get_str(device),
323 		decode_dma_data_direction(__entry->dir),
324 		min_t(int, __entry->full_nents, DMA_TRACE_MAX_ENTRIES), __entry->full_nents,
325 		min_t(int, __entry->full_ents, DMA_TRACE_MAX_ENTRIES), __entry->full_ents,
326 		__entry->truncated ? " [TRUNCATED]" : "",
327 		__print_array(__get_dynamic_array(dma_addrs),
328 			      __get_dynamic_array_len(dma_addrs) /
329 				sizeof(u64), sizeof(u64)),
330 		__print_array(__get_dynamic_array(lengths),
331 			      __get_dynamic_array_len(lengths) /
332 				sizeof(unsigned int), sizeof(unsigned int)),
333 		__print_array(__get_dynamic_array(phys_addrs),
334 			      __get_dynamic_array_len(phys_addrs) /
335 				sizeof(u64), sizeof(u64)),
336 		decode_dma_attrs(__entry->attrs))
337 );
338 
339 TRACE_EVENT(dma_map_sg_err,
340 	TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
341 		 int err, enum dma_data_direction dir, unsigned long attrs),
342 	TP_ARGS(dev, sgl, nents, err, dir, attrs),
343 
344 	TP_STRUCT__entry(
345 		__string(device, dev_name(dev))
346 		__dynamic_array(u64, phys_addrs, nents)
347 		__field(int, err)
348 		__field(enum dma_data_direction, dir)
349 		__field(unsigned long, attrs)
350 	),
351 
352 	TP_fast_assign(
353 		struct scatterlist *sg;
354 		int i;
355 
356 		__assign_str(device);
357 		for_each_sg(sgl, sg, nents, i)
358 			((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
359 		__entry->err = err;
360 		__entry->dir = dir;
361 		__entry->attrs = attrs;
362 	),
363 
364 	TP_printk("%s dir=%s dma_addrs=%s err=%d attrs=%s",
365 		__get_str(device),
366 		decode_dma_data_direction(__entry->dir),
367 		__print_array(__get_dynamic_array(phys_addrs),
368 			      __get_dynamic_array_len(phys_addrs) /
369 				sizeof(u64), sizeof(u64)),
370 		__entry->err,
371 		decode_dma_attrs(__entry->attrs))
372 );
373 
374 TRACE_EVENT(dma_unmap_sg,
375 	TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
376 		 enum dma_data_direction dir, unsigned long attrs),
377 	TP_ARGS(dev, sgl, nents, dir, attrs),
378 
379 	TP_STRUCT__entry(
380 		__string(device, dev_name(dev))
381 		__dynamic_array(u64, addrs, nents)
382 		__field(enum dma_data_direction, dir)
383 		__field(unsigned long, attrs)
384 	),
385 
386 	TP_fast_assign(
387 		struct scatterlist *sg;
388 		int i;
389 
390 		__assign_str(device);
391 		for_each_sg(sgl, sg, nents, i)
392 			((u64 *)__get_dynamic_array(addrs))[i] = sg_phys(sg);
393 		__entry->dir = dir;
394 		__entry->attrs = attrs;
395 	),
396 
397 	TP_printk("%s dir=%s phys_addrs=%s attrs=%s",
398 		__get_str(device),
399 		decode_dma_data_direction(__entry->dir),
400 		__print_array(__get_dynamic_array(addrs),
401 			      __get_dynamic_array_len(addrs) /
402 				sizeof(u64), sizeof(u64)),
403 		decode_dma_attrs(__entry->attrs))
404 );
405 
406 DECLARE_EVENT_CLASS(dma_sync_single,
407 	TP_PROTO(struct device *dev, dma_addr_t dma_addr, size_t size,
408 		 enum dma_data_direction dir),
409 	TP_ARGS(dev, dma_addr, size, dir),
410 
411 	TP_STRUCT__entry(
412 		__string(device, dev_name(dev))
413 		__field(u64, dma_addr)
414 		__field(size_t, size)
415 		__field(enum dma_data_direction, dir)
416 	),
417 
418 	TP_fast_assign(
419 		__assign_str(device);
420 		__entry->dma_addr = dma_addr;
421 		__entry->size = size;
422 		__entry->dir = dir;
423 	),
424 
425 	TP_printk("%s dir=%s dma_addr=%llx size=%zu",
426 		__get_str(device),
427 		decode_dma_data_direction(__entry->dir),
428 		__entry->dma_addr,
429 		__entry->size)
430 );
431 
432 #define DEFINE_SYNC_SINGLE_EVENT(name) \
433 DEFINE_EVENT(dma_sync_single, name, \
434 	TP_PROTO(struct device *dev, dma_addr_t dma_addr, size_t size, \
435 		 enum dma_data_direction dir), \
436 	TP_ARGS(dev, dma_addr, size, dir))
437 
438 DEFINE_SYNC_SINGLE_EVENT(dma_sync_single_for_cpu);
439 DEFINE_SYNC_SINGLE_EVENT(dma_sync_single_for_device);
440 
441 DECLARE_EVENT_CLASS(dma_sync_sg,
442 	TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
443 		 enum dma_data_direction dir),
444 	TP_ARGS(dev, sgl, nents, dir),
445 
446 	TP_STRUCT__entry(
447 		__string(device, dev_name(dev))
448 		__dynamic_array(u64, dma_addrs, nents)
449 		__dynamic_array(unsigned int, lengths, nents)
450 		__field(enum dma_data_direction, dir)
451 	),
452 
453 	TP_fast_assign(
454 		struct scatterlist *sg;
455 		int i;
456 
457 		__assign_str(device);
458 		for_each_sg(sgl, sg, nents, i) {
459 			((u64 *)__get_dynamic_array(dma_addrs))[i] =
460 				sg_dma_address(sg);
461 			((unsigned int *)__get_dynamic_array(lengths))[i] =
462 				sg_dma_len(sg);
463 		}
464 		__entry->dir = dir;
465 	),
466 
467 	TP_printk("%s dir=%s dma_addrs=%s sizes=%s",
468 		__get_str(device),
469 		decode_dma_data_direction(__entry->dir),
470 		__print_array(__get_dynamic_array(dma_addrs),
471 			      __get_dynamic_array_len(dma_addrs) /
472 				sizeof(u64), sizeof(u64)),
473 		__print_array(__get_dynamic_array(lengths),
474 			      __get_dynamic_array_len(lengths) /
475 				sizeof(unsigned int), sizeof(unsigned int)))
476 );
477 
478 #define DEFINE_SYNC_SG_EVENT(name) \
479 DEFINE_EVENT(dma_sync_sg, name, \
480 	TP_PROTO(struct device *dev, struct scatterlist *sg, int nents, \
481 		 enum dma_data_direction dir), \
482 	TP_ARGS(dev, sg, nents, dir))
483 
484 DEFINE_SYNC_SG_EVENT(dma_sync_sg_for_cpu);
485 DEFINE_SYNC_SG_EVENT(dma_sync_sg_for_device);
486 
487 #endif /*  _TRACE_DMA_H */
488 
489 /* This part must be outside protection */
490 #include <trace/define_trace.h>
491