xref: /linux/drivers/iommu/intel/trace.h (revision 22c55fb9eb92395d999b8404d73e58540d11bdd8)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Intel IOMMU trace support
4  *
5  * Copyright (C) 2019 Intel Corporation
6  *
7  * Author: Lu Baolu <baolu.lu@linux.intel.com>
8  */
9 #undef TRACE_SYSTEM
10 #define TRACE_SYSTEM intel_iommu
11 
12 #if !defined(_TRACE_INTEL_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ)
13 #define _TRACE_INTEL_IOMMU_H
14 
15 #include <linux/tracepoint.h>
16 
17 #include "iommu.h"
18 
19 #define MSG_MAX		256
20 
21 TRACE_EVENT(qi_submit,
22 	TP_PROTO(struct intel_iommu *iommu, u64 qw0, u64 qw1, u64 qw2, u64 qw3),
23 
24 	TP_ARGS(iommu, qw0, qw1, qw2, qw3),
25 
26 	TP_STRUCT__entry(
27 		__field(u64, qw0)
28 		__field(u64, qw1)
29 		__field(u64, qw2)
30 		__field(u64, qw3)
31 		__string(iommu, iommu->name)
32 	),
33 
34 	TP_fast_assign(
35 		__assign_str(iommu);
36 		__entry->qw0 = qw0;
37 		__entry->qw1 = qw1;
38 		__entry->qw2 = qw2;
39 		__entry->qw3 = qw3;
40 	),
41 
42 	TP_printk("%s %s: 0x%llx 0x%llx 0x%llx 0x%llx",
43 		  __print_symbolic(__entry->qw0 & 0xf,
44 				   { QI_CC_TYPE,	"cc_inv" },
45 				   { QI_IOTLB_TYPE,	"iotlb_inv" },
46 				   { QI_DIOTLB_TYPE,	"dev_tlb_inv" },
47 				   { QI_IEC_TYPE,	"iec_inv" },
48 				   { QI_IWD_TYPE,	"inv_wait" },
49 				   { QI_EIOTLB_TYPE,	"p_iotlb_inv" },
50 				   { QI_PC_TYPE,	"pc_inv" },
51 				   { QI_DEIOTLB_TYPE,	"p_dev_tlb_inv" },
52 				   { QI_PGRP_RESP_TYPE,	"page_grp_resp" }),
53 		__get_str(iommu),
54 		__entry->qw0, __entry->qw1, __entry->qw2, __entry->qw3
55 	)
56 );
57 
58 TRACE_EVENT(prq_report,
59 	TP_PROTO(struct intel_iommu *iommu, struct device *dev,
60 		 u64 dw0, u64 dw1, u64 dw2, u64 dw3,
61 		 unsigned long seq),
62 
63 	TP_ARGS(iommu, dev, dw0, dw1, dw2, dw3, seq),
64 
65 	TP_STRUCT__entry(
66 		__field(u64, dw0)
67 		__field(u64, dw1)
68 		__field(u64, dw2)
69 		__field(u64, dw3)
70 		__field(unsigned long, seq)
71 		__string(iommu, iommu->name)
72 		__string(dev, dev_name(dev))
73 		__dynamic_array(char, buff, MSG_MAX)
74 	),
75 
76 	TP_fast_assign(
77 		__entry->dw0 = dw0;
78 		__entry->dw1 = dw1;
79 		__entry->dw2 = dw2;
80 		__entry->dw3 = dw3;
81 		__entry->seq = seq;
82 		__assign_str(iommu);
83 		__assign_str(dev);
84 	),
85 
86 	TP_printk("%s/%s seq# %ld: %s",
87 		__get_str(iommu), __get_str(dev), __entry->seq,
88 		decode_prq_descriptor(__get_str(buff), MSG_MAX, __entry->dw0,
89 				      __entry->dw1, __entry->dw2, __entry->dw3)
90 	)
91 );
92 
93 DECLARE_EVENT_CLASS(cache_tag_log,
94 	TP_PROTO(struct cache_tag *tag),
95 	TP_ARGS(tag),
96 	TP_STRUCT__entry(
97 		__string(iommu, tag->iommu->name)
98 		__string(dev, dev_name(tag->dev))
99 		__field(u16, type)
100 		__field(u16, domain_id)
101 		__field(u32, pasid)
102 		__field(u32, users)
103 	),
104 	TP_fast_assign(
105 		__assign_str(iommu);
106 		__assign_str(dev);
107 		__entry->type = tag->type;
108 		__entry->domain_id = tag->domain_id;
109 		__entry->pasid = tag->pasid;
110 		__entry->users = tag->users;
111 	),
112 	TP_printk("%s/%s type %s did %d pasid %d ref %d",
113 		  __get_str(iommu), __get_str(dev),
114 		  __print_symbolic(__entry->type,
115 			{ CACHE_TAG_IOTLB,		"iotlb" },
116 			{ CACHE_TAG_DEVTLB,		"devtlb" },
117 			{ CACHE_TAG_NESTING_IOTLB,	"nesting_iotlb" },
118 			{ CACHE_TAG_NESTING_DEVTLB,	"nesting_devtlb" }),
119 		__entry->domain_id, __entry->pasid, __entry->users
120 	)
121 );
122 
123 DEFINE_EVENT(cache_tag_log, cache_tag_assign,
124 	TP_PROTO(struct cache_tag *tag),
125 	TP_ARGS(tag)
126 );
127 
128 DEFINE_EVENT(cache_tag_log, cache_tag_unassign,
129 	TP_PROTO(struct cache_tag *tag),
130 	TP_ARGS(tag)
131 );
132 
133 DECLARE_EVENT_CLASS(cache_tag_flush,
134 	TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
135 		 unsigned long addr, unsigned long pages, unsigned long mask),
136 	TP_ARGS(tag, start, end, addr, pages, mask),
137 	TP_STRUCT__entry(
138 		__string(iommu, tag->iommu->name)
139 		__string(dev, dev_name(tag->dev))
140 		__field(u16, type)
141 		__field(u16, domain_id)
142 		__field(u32, pasid)
143 		__field(unsigned long, start)
144 		__field(unsigned long, end)
145 		__field(unsigned long, addr)
146 		__field(unsigned long, pages)
147 		__field(unsigned long, mask)
148 	),
149 	TP_fast_assign(
150 		__assign_str(iommu);
151 		__assign_str(dev);
152 		__entry->type = tag->type;
153 		__entry->domain_id = tag->domain_id;
154 		__entry->pasid = tag->pasid;
155 		__entry->start = start;
156 		__entry->end = end;
157 		__entry->addr = addr;
158 		__entry->pages = pages;
159 		__entry->mask = mask;
160 	),
161 	TP_printk("%s %s[%d] type %s did %d [0x%lx-0x%lx] addr 0x%lx pages 0x%lx mask 0x%lx",
162 		  __get_str(iommu), __get_str(dev), __entry->pasid,
163 		  __print_symbolic(__entry->type,
164 			{ CACHE_TAG_IOTLB,		"iotlb" },
165 			{ CACHE_TAG_DEVTLB,		"devtlb" },
166 			{ CACHE_TAG_NESTING_IOTLB,	"nesting_iotlb" },
167 			{ CACHE_TAG_NESTING_DEVTLB,	"nesting_devtlb" }),
168 		__entry->domain_id, __entry->start, __entry->end,
169 		__entry->addr, __entry->pages, __entry->mask
170 	)
171 );
172 
173 DEFINE_EVENT(cache_tag_flush, cache_tag_flush_range,
174 	TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
175 		 unsigned long addr, unsigned long pages, unsigned long mask),
176 	TP_ARGS(tag, start, end, addr, pages, mask)
177 );
178 
179 DEFINE_EVENT(cache_tag_flush, cache_tag_flush_range_np,
180 	TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
181 		 unsigned long addr, unsigned long pages, unsigned long mask),
182 	TP_ARGS(tag, start, end, addr, pages, mask)
183 );
184 #endif /* _TRACE_INTEL_IOMMU_H */
185 
186 /* This part must be outside protection */
187 #undef TRACE_INCLUDE_PATH
188 #undef TRACE_INCLUDE_FILE
189 #define TRACE_INCLUDE_PATH ../../drivers/iommu/intel/
190 #define TRACE_INCLUDE_FILE trace
191 #include <trace/define_trace.h>
192