xref: /linux/drivers/iommu/intel/trace.h (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Intel IOMMU trace support
4  *
5  * Copyright (C) 2019 Intel Corporation
6  *
7  * Author: Lu Baolu <baolu.lu@linux.intel.com>
8  */
9 #undef TRACE_SYSTEM
10 #define TRACE_SYSTEM intel_iommu
11 
12 #if !defined(_TRACE_INTEL_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ)
13 #define _TRACE_INTEL_IOMMU_H
14 
15 #include <linux/tracepoint.h>
16 
17 #include "iommu.h"
18 
19 #define MSG_MAX		256
20 
21 TRACE_EVENT(qi_submit,
22 	TP_PROTO(struct intel_iommu *iommu, u64 qw0, u64 qw1, u64 qw2, u64 qw3),
23 
24 	TP_ARGS(iommu, qw0, qw1, qw2, qw3),
25 
26 	TP_STRUCT__entry(
27 		__field(u64, qw0)
28 		__field(u64, qw1)
29 		__field(u64, qw2)
30 		__field(u64, qw3)
31 		__string(iommu, iommu->name)
32 	),
33 
34 	TP_fast_assign(
35 		__assign_str(iommu);
36 		__entry->qw0 = qw0;
37 		__entry->qw1 = qw1;
38 		__entry->qw2 = qw2;
39 		__entry->qw3 = qw3;
40 	),
41 
42 	TP_printk("%s %s: 0x%llx 0x%llx 0x%llx 0x%llx",
43 		  __print_symbolic(__entry->qw0 & 0xf,
44 				   { QI_CC_TYPE,	"cc_inv" },
45 				   { QI_IOTLB_TYPE,	"iotlb_inv" },
46 				   { QI_DIOTLB_TYPE,	"dev_tlb_inv" },
47 				   { QI_IEC_TYPE,	"iec_inv" },
48 				   { QI_IWD_TYPE,	"inv_wait" },
49 				   { QI_EIOTLB_TYPE,	"p_iotlb_inv" },
50 				   { QI_PC_TYPE,	"pc_inv" },
51 				   { QI_DEIOTLB_TYPE,	"p_dev_tlb_inv" },
52 				   { QI_PGRP_RESP_TYPE,	"page_grp_resp" }),
53 		__get_str(iommu),
54 		__entry->qw0, __entry->qw1, __entry->qw2, __entry->qw3
55 	)
56 );
57 
58 TRACE_EVENT(prq_report,
59 	TP_PROTO(struct intel_iommu *iommu, struct device *dev,
60 		 u64 dw0, u64 dw1, u64 dw2, u64 dw3,
61 		 unsigned long seq),
62 
63 	TP_ARGS(iommu, dev, dw0, dw1, dw2, dw3, seq),
64 
65 	TP_STRUCT__entry(
66 		__field(u64, dw0)
67 		__field(u64, dw1)
68 		__field(u64, dw2)
69 		__field(u64, dw3)
70 		__field(unsigned long, seq)
71 		__string(iommu, iommu->name)
72 		__string(dev, dev_name(dev))
73 		__dynamic_array(char, buff, MSG_MAX)
74 	),
75 
76 	TP_fast_assign(
77 		__entry->dw0 = dw0;
78 		__entry->dw1 = dw1;
79 		__entry->dw2 = dw2;
80 		__entry->dw3 = dw3;
81 		__entry->seq = seq;
82 		__assign_str(iommu);
83 		__assign_str(dev);
84 	),
85 
86 	TP_printk("%s/%s seq# %ld: %s",
87 		__get_str(iommu), __get_str(dev), __entry->seq,
88 		decode_prq_descriptor(__get_str(buff), MSG_MAX, __entry->dw0,
89 				      __entry->dw1, __entry->dw2, __entry->dw3)
90 	)
91 );
92 
93 DECLARE_EVENT_CLASS(cache_tag_log,
94 	TP_PROTO(struct cache_tag *tag),
95 	TP_ARGS(tag),
96 	TP_STRUCT__entry(
97 		__string(iommu, tag->iommu->name)
98 		__string(dev, dev_name(tag->dev))
99 		__field(u16, type)
100 		__field(u16, domain_id)
101 		__field(u32, pasid)
102 		__field(u32, users)
103 	),
104 	TP_fast_assign(
105 		__assign_str(iommu);
106 		__assign_str(dev);
107 		__entry->type = tag->type;
108 		__entry->domain_id = tag->domain_id;
109 		__entry->pasid = tag->pasid;
110 		__entry->users = tag->users;
111 	),
112 	TP_printk("%s/%s type %s did %d pasid %d ref %d",
113 		  __get_str(iommu), __get_str(dev),
114 		  __print_symbolic(__entry->type,
115 			{ CACHE_TAG_IOTLB,		"iotlb" },
116 			{ CACHE_TAG_DEVTLB,		"devtlb" },
117 			{ CACHE_TAG_NESTING_IOTLB,	"nesting_iotlb" },
118 			{ CACHE_TAG_NESTING_DEVTLB,	"nesting_devtlb" }),
119 		__entry->domain_id, __entry->pasid, __entry->users
120 	)
121 );
122 
123 DEFINE_EVENT(cache_tag_log, cache_tag_assign,
124 	TP_PROTO(struct cache_tag *tag),
125 	TP_ARGS(tag)
126 );
127 
128 DEFINE_EVENT(cache_tag_log, cache_tag_unassign,
129 	TP_PROTO(struct cache_tag *tag),
130 	TP_ARGS(tag)
131 );
132 
133 DEFINE_EVENT(cache_tag_log, cache_tag_flush_all,
134 	TP_PROTO(struct cache_tag *tag),
135 	TP_ARGS(tag)
136 );
137 
138 DECLARE_EVENT_CLASS(cache_tag_flush,
139 	TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
140 		 unsigned long addr, unsigned long pages, unsigned long mask),
141 	TP_ARGS(tag, start, end, addr, pages, mask),
142 	TP_STRUCT__entry(
143 		__string(iommu, tag->iommu->name)
144 		__string(dev, dev_name(tag->dev))
145 		__field(u16, type)
146 		__field(u16, domain_id)
147 		__field(u32, pasid)
148 		__field(unsigned long, start)
149 		__field(unsigned long, end)
150 		__field(unsigned long, addr)
151 		__field(unsigned long, pages)
152 		__field(unsigned long, mask)
153 	),
154 	TP_fast_assign(
155 		__assign_str(iommu);
156 		__assign_str(dev);
157 		__entry->type = tag->type;
158 		__entry->domain_id = tag->domain_id;
159 		__entry->pasid = tag->pasid;
160 		__entry->start = start;
161 		__entry->end = end;
162 		__entry->addr = addr;
163 		__entry->pages = pages;
164 		__entry->mask = mask;
165 	),
166 	TP_printk("%s %s[%d] type %s did %d [0x%lx-0x%lx] addr 0x%lx pages 0x%lx mask 0x%lx",
167 		  __get_str(iommu), __get_str(dev), __entry->pasid,
168 		  __print_symbolic(__entry->type,
169 			{ CACHE_TAG_IOTLB,		"iotlb" },
170 			{ CACHE_TAG_DEVTLB,		"devtlb" },
171 			{ CACHE_TAG_NESTING_IOTLB,	"nesting_iotlb" },
172 			{ CACHE_TAG_NESTING_DEVTLB,	"nesting_devtlb" }),
173 		__entry->domain_id, __entry->start, __entry->end,
174 		__entry->addr, __entry->pages, __entry->mask
175 	)
176 );
177 
178 DEFINE_EVENT(cache_tag_flush, cache_tag_flush_range,
179 	TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
180 		 unsigned long addr, unsigned long pages, unsigned long mask),
181 	TP_ARGS(tag, start, end, addr, pages, mask)
182 );
183 
184 DEFINE_EVENT(cache_tag_flush, cache_tag_flush_range_np,
185 	TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
186 		 unsigned long addr, unsigned long pages, unsigned long mask),
187 	TP_ARGS(tag, start, end, addr, pages, mask)
188 );
189 #endif /* _TRACE_INTEL_IOMMU_H */
190 
191 /* This part must be outside protection */
192 #undef TRACE_INCLUDE_PATH
193 #undef TRACE_INCLUDE_FILE
194 #define TRACE_INCLUDE_PATH ../../drivers/iommu/intel/
195 #define TRACE_INCLUDE_FILE trace
196 #include <trace/define_trace.h>
197