xref: /linux/include/trace/events/net.h (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM net
4 
5 #if !defined(_TRACE_NET_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_NET_H
7 
8 #include <linux/skbuff.h>
9 #include <linux/netdevice.h>
10 #include <linux/if_vlan.h>
11 #include <linux/ip.h>
12 #include <linux/tracepoint.h>
13 
14 TRACE_EVENT(net_dev_start_xmit,
15 
16 	TP_PROTO(const struct sk_buff *skb, const struct net_device *dev),
17 
18 	TP_ARGS(skb, dev),
19 
20 	TP_STRUCT__entry(
21 		__string(	name,			dev->name	)
22 		__field(	u16,			queue_mapping	)
23 		__field(	const void *,		skbaddr		)
24 		__field(	bool,			vlan_tagged	)
25 		__field(	u16,			vlan_proto	)
26 		__field(	u16,			vlan_tci	)
27 		__field(	u16,			protocol	)
28 		__field(	u8,			ip_summed	)
29 		__field(	unsigned int,		len		)
30 		__field(	unsigned int,		data_len	)
31 		__field(	int,			network_offset	)
32 		__field(	bool,			transport_offset_valid)
33 		__field(	int,			transport_offset)
34 		__field(	u8,			tx_flags	)
35 		__field(	u16,			gso_size	)
36 		__field(	u16,			gso_segs	)
37 		__field(	u16,			gso_type	)
38 		__field(	u64,			net_cookie	)
39 	),
40 
41 	TP_fast_assign(
42 		__assign_str(name);
43 		__entry->queue_mapping = skb->queue_mapping;
44 		__entry->skbaddr = skb;
45 		__entry->vlan_tagged = skb_vlan_tag_present(skb);
46 		__entry->vlan_proto = ntohs(skb->vlan_proto);
47 		__entry->vlan_tci = skb_vlan_tag_get(skb);
48 		__entry->protocol = ntohs(skb->protocol);
49 		__entry->ip_summed = skb->ip_summed;
50 		__entry->len = skb->len;
51 		__entry->data_len = skb->data_len;
52 		__entry->network_offset = skb_network_offset(skb);
53 		__entry->transport_offset_valid =
54 			skb_transport_header_was_set(skb);
55 		__entry->transport_offset = skb_transport_header_was_set(skb) ?
56 			skb_transport_offset(skb) : 0;
57 		__entry->tx_flags = skb_shinfo(skb)->tx_flags;
58 		__entry->gso_size = skb_shinfo(skb)->gso_size;
59 		__entry->gso_segs = skb_shinfo(skb)->gso_segs;
60 		__entry->gso_type = skb_shinfo(skb)->gso_type;
61 		__entry->net_cookie = dev_net(dev)->net_cookie;
62 	),
63 
64 	TP_printk("dev=%s queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d len=%u data_len=%u network_offset=%d transport_offset_valid=%d transport_offset=%d tx_flags=%d gso_size=%d gso_segs=%d gso_type=%#x net_cookie=%llu",
65 		  __get_str(name), __entry->queue_mapping, __entry->skbaddr,
66 		  __entry->vlan_tagged, __entry->vlan_proto, __entry->vlan_tci,
67 		  __entry->protocol, __entry->ip_summed, __entry->len,
68 		  __entry->data_len,
69 		  __entry->network_offset, __entry->transport_offset_valid,
70 		  __entry->transport_offset, __entry->tx_flags,
71 		  __entry->gso_size, __entry->gso_segs,
72 		  __entry->gso_type, __entry->net_cookie)
73 );
74 
75 TRACE_EVENT(net_dev_xmit,
76 
77 	TP_PROTO(struct sk_buff *skb,
78 		 int rc,
79 		 struct net_device *dev,
80 		 unsigned int skb_len),
81 
82 	TP_ARGS(skb, rc, dev, skb_len),
83 
84 	TP_STRUCT__entry(
85 		__field(	void *,		skbaddr		)
86 		__field(	unsigned int,	len		)
87 		__field(	int,		rc		)
88 		__string(	name,		dev->name	)
89 		__field(	u64,		net_cookie	)
90 	),
91 
92 	TP_fast_assign(
93 		__entry->skbaddr = skb;
94 		__entry->len = skb_len;
95 		__entry->rc = rc;
96 		__entry->net_cookie = dev_net(dev)->net_cookie;
97 		__assign_str(name);
98 	),
99 
100 	TP_printk("dev=%s skbaddr=%p len=%u rc=%d net_cookie=%llu",
101 		__get_str(name), __entry->skbaddr,
102 		__entry->len, __entry->rc,
103 		__entry->net_cookie)
104 );
105 
106 TRACE_EVENT(net_dev_xmit_timeout,
107 
108 	TP_PROTO(struct net_device *dev,
109 		 int queue_index),
110 
111 	TP_ARGS(dev, queue_index),
112 
113 	TP_STRUCT__entry(
114 		__string(	name,		dev->name	)
115 		__string(	driver,		netdev_drivername(dev))
116 		__field(	int,		queue_index	)
117 		__field(	u64,		net_cookie	)
118 	),
119 
120 	TP_fast_assign(
121 		__assign_str(name);
122 		__assign_str(driver);
123 		__entry->queue_index = queue_index;
124 		__entry->net_cookie = dev_net(dev)->net_cookie;
125 	),
126 
127 	TP_printk("dev=%s driver=%s queue=%d net_cookie=%llu",
128 		__get_str(name), __get_str(driver),
129 		__entry->queue_index, __entry->net_cookie)
130 );
131 
132 DECLARE_EVENT_CLASS(net_dev_template,
133 
134 	TP_PROTO(struct sk_buff *skb),
135 
136 	TP_ARGS(skb),
137 
138 	TP_STRUCT__entry(
139 		__field(	void *,		skbaddr		)
140 		__field(	unsigned int,	len		)
141 		__string(	name,		skb->dev->name	)
142 		__field(	u64,		net_cookie	)
143 	),
144 
145 	TP_fast_assign(
146 		__entry->skbaddr = skb;
147 		__entry->len = skb->len;
148 		__entry->net_cookie = dev_net(skb->dev)->net_cookie;
149 		__assign_str(name);
150 	),
151 
152 	TP_printk("dev=%s skbaddr=%p len=%u net_cookie=%llu",
153 		__get_str(name), __entry->skbaddr,
154 		__entry->len,
155 		__entry->net_cookie)
156 )
157 
158 DEFINE_EVENT(net_dev_template, net_dev_queue,
159 
160 	TP_PROTO(struct sk_buff *skb),
161 
162 	TP_ARGS(skb)
163 );
164 
165 DEFINE_EVENT(net_dev_template, netif_receive_skb,
166 
167 	TP_PROTO(struct sk_buff *skb),
168 
169 	TP_ARGS(skb)
170 );
171 
172 DEFINE_EVENT(net_dev_template, netif_rx,
173 
174 	TP_PROTO(struct sk_buff *skb),
175 
176 	TP_ARGS(skb)
177 );
178 
179 DECLARE_EVENT_CLASS(net_dev_rx_verbose_template,
180 
181 	TP_PROTO(const struct sk_buff *skb),
182 
183 	TP_ARGS(skb),
184 
185 	TP_STRUCT__entry(
186 		__string(	name,			skb->dev->name	)
187 		__field(	unsigned int,		napi_id		)
188 		__field(	u16,			queue_mapping	)
189 		__field(	const void *,		skbaddr		)
190 		__field(	bool,			vlan_tagged	)
191 		__field(	u16,			vlan_proto	)
192 		__field(	u16,			vlan_tci	)
193 		__field(	u16,			protocol	)
194 		__field(	u8,			ip_summed	)
195 		__field(	u32,			hash		)
196 		__field(	bool,			l4_hash		)
197 		__field(	unsigned int,		len		)
198 		__field(	unsigned int,		data_len	)
199 		__field(	unsigned int,		truesize	)
200 		__field(	bool,			mac_header_valid)
201 		__field(	int,			mac_header	)
202 		__field(	unsigned char,		nr_frags	)
203 		__field(	u16,			gso_size	)
204 		__field(	u16,			gso_type	)
205 		__field(	u64,			net_cookie	)
206 	),
207 
208 	TP_fast_assign(
209 		__assign_str(name);
210 #ifdef CONFIG_NET_RX_BUSY_POLL
211 		__entry->napi_id = skb->napi_id;
212 #else
213 		__entry->napi_id = 0;
214 #endif
215 		__entry->queue_mapping = skb->queue_mapping;
216 		__entry->skbaddr = skb;
217 		__entry->vlan_tagged = skb_vlan_tag_present(skb);
218 		__entry->vlan_proto = ntohs(skb->vlan_proto);
219 		__entry->vlan_tci = skb_vlan_tag_get(skb);
220 		__entry->protocol = ntohs(skb->protocol);
221 		__entry->ip_summed = skb->ip_summed;
222 		__entry->hash = skb->hash;
223 		__entry->l4_hash = skb->l4_hash;
224 		__entry->len = skb->len;
225 		__entry->data_len = skb->data_len;
226 		__entry->truesize = skb->truesize;
227 		__entry->mac_header_valid = skb_mac_header_was_set(skb);
228 		__entry->mac_header = skb_mac_header(skb) - skb->data;
229 		__entry->nr_frags = skb_shinfo(skb)->nr_frags;
230 		__entry->gso_size = skb_shinfo(skb)->gso_size;
231 		__entry->gso_type = skb_shinfo(skb)->gso_type;
232 		__entry->net_cookie = dev_net(skb->dev)->net_cookie;
233 	),
234 
235 	TP_printk("dev=%s napi_id=%#x queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d hash=0x%08x l4_hash=%d len=%u data_len=%u truesize=%u mac_header_valid=%d mac_header=%d nr_frags=%d gso_size=%d gso_type=%#x net_cookie=%llu",
236 		  __get_str(name), __entry->napi_id, __entry->queue_mapping,
237 		  __entry->skbaddr, __entry->vlan_tagged, __entry->vlan_proto,
238 		  __entry->vlan_tci, __entry->protocol, __entry->ip_summed,
239 		  __entry->hash, __entry->l4_hash, __entry->len,
240 		  __entry->data_len, __entry->truesize,
241 		  __entry->mac_header_valid, __entry->mac_header,
242 		  __entry->nr_frags, __entry->gso_size,
243 		  __entry->gso_type, __entry->net_cookie)
244 );
245 
246 DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_frags_entry,
247 
248 	TP_PROTO(const struct sk_buff *skb),
249 
250 	TP_ARGS(skb)
251 );
252 
253 DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_receive_entry,
254 
255 	TP_PROTO(const struct sk_buff *skb),
256 
257 	TP_ARGS(skb)
258 );
259 
260 DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_entry,
261 
262 	TP_PROTO(const struct sk_buff *skb),
263 
264 	TP_ARGS(skb)
265 );
266 
267 DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_list_entry,
268 
269 	TP_PROTO(const struct sk_buff *skb),
270 
271 	TP_ARGS(skb)
272 );
273 
274 DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry,
275 
276 	TP_PROTO(const struct sk_buff *skb),
277 
278 	TP_ARGS(skb)
279 );
280 
281 DECLARE_EVENT_CLASS(net_dev_rx_exit_template,
282 
283 	TP_PROTO(int ret),
284 
285 	TP_ARGS(ret),
286 
287 	TP_STRUCT__entry(
288 		__field(int,	ret)
289 	),
290 
291 	TP_fast_assign(
292 		__entry->ret = ret;
293 	),
294 
295 	TP_printk("ret=%d", __entry->ret)
296 );
297 
298 DEFINE_EVENT(net_dev_rx_exit_template, napi_gro_frags_exit,
299 
300 	TP_PROTO(int ret),
301 
302 	TP_ARGS(ret)
303 );
304 
305 DEFINE_EVENT(net_dev_rx_exit_template, napi_gro_receive_exit,
306 
307 	TP_PROTO(int ret),
308 
309 	TP_ARGS(ret)
310 );
311 
312 DEFINE_EVENT(net_dev_rx_exit_template, netif_receive_skb_exit,
313 
314 	TP_PROTO(int ret),
315 
316 	TP_ARGS(ret)
317 );
318 
319 DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_exit,
320 
321 	TP_PROTO(int ret),
322 
323 	TP_ARGS(ret)
324 );
325 
326 DEFINE_EVENT(net_dev_rx_exit_template, netif_receive_skb_list_exit,
327 
328 	TP_PROTO(int ret),
329 
330 	TP_ARGS(ret)
331 );
332 
333 #endif /* _TRACE_NET_H */
334 
335 /* This part must be outside protection */
336 #include <trace/define_trace.h>
337