xref: /linux/include/trace/events/net.h (revision 7a5f1cd22d47f8ca4b760b6334378ae42c1bd24b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM net
4 
5 #if !defined(_TRACE_NET_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_NET_H
7 
8 #include <linux/skbuff.h>
9 #include <linux/netdevice.h>
10 #include <linux/if_vlan.h>
11 #include <linux/ip.h>
12 #include <linux/tracepoint.h>
13 #include <net/busy_poll.h>
14 
15 TRACE_EVENT(net_dev_start_xmit,
16 
17 	TP_PROTO(const struct sk_buff *skb, const struct net_device *dev),
18 
19 	TP_ARGS(skb, dev),
20 
21 	TP_STRUCT__entry(
22 		__string(	name,			dev->name	)
23 		__field(	u16,			queue_mapping	)
24 		__field(	const void *,		skbaddr		)
25 		__field(	bool,			vlan_tagged	)
26 		__field(	u16,			vlan_proto	)
27 		__field(	u16,			vlan_tci	)
28 		__field(	u16,			protocol	)
29 		__field(	u8,			ip_summed	)
30 		__field(	unsigned int,		len		)
31 		__field(	unsigned int,		data_len	)
32 		__field(	int,			network_offset	)
33 		__field(	bool,			transport_offset_valid)
34 		__field(	int,			transport_offset)
35 		__field(	u8,			tx_flags	)
36 		__field(	u16,			gso_size	)
37 		__field(	u16,			gso_segs	)
38 		__field(	u16,			gso_type	)
39 		__field(	u64,			net_cookie	)
40 	),
41 
42 	TP_fast_assign(
43 		__assign_str(name);
44 		__entry->queue_mapping = skb->queue_mapping;
45 		__entry->skbaddr = skb;
46 		__entry->vlan_tagged = skb_vlan_tag_present(skb);
47 		__entry->vlan_proto = ntohs(skb->vlan_proto);
48 		__entry->vlan_tci = skb_vlan_tag_get(skb);
49 		__entry->protocol = ntohs(skb->protocol);
50 		__entry->ip_summed = skb->ip_summed;
51 		__entry->len = skb->len;
52 		__entry->data_len = skb->data_len;
53 		__entry->network_offset = skb_network_offset(skb);
54 		__entry->transport_offset_valid =
55 			skb_transport_header_was_set(skb);
56 		__entry->transport_offset = skb_transport_header_was_set(skb) ?
57 			skb_transport_offset(skb) : 0;
58 		__entry->tx_flags = skb_shinfo(skb)->tx_flags;
59 		__entry->gso_size = skb_shinfo(skb)->gso_size;
60 		__entry->gso_segs = skb_shinfo(skb)->gso_segs;
61 		__entry->gso_type = skb_shinfo(skb)->gso_type;
62 		__entry->net_cookie = dev_net(dev)->net_cookie;
63 	),
64 
65 	TP_printk("dev=%s queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d len=%u data_len=%u network_offset=%d transport_offset_valid=%d transport_offset=%d tx_flags=%d gso_size=%d gso_segs=%d gso_type=%#x net_cookie=%llu",
66 		  __get_str(name), __entry->queue_mapping, __entry->skbaddr,
67 		  __entry->vlan_tagged, __entry->vlan_proto, __entry->vlan_tci,
68 		  __entry->protocol, __entry->ip_summed, __entry->len,
69 		  __entry->data_len,
70 		  __entry->network_offset, __entry->transport_offset_valid,
71 		  __entry->transport_offset, __entry->tx_flags,
72 		  __entry->gso_size, __entry->gso_segs,
73 		  __entry->gso_type, __entry->net_cookie)
74 );
75 
76 TRACE_EVENT(net_dev_xmit,
77 
78 	TP_PROTO(struct sk_buff *skb,
79 		 int rc,
80 		 struct net_device *dev,
81 		 unsigned int skb_len),
82 
83 	TP_ARGS(skb, rc, dev, skb_len),
84 
85 	TP_STRUCT__entry(
86 		__field(	void *,		skbaddr		)
87 		__field(	unsigned int,	len		)
88 		__field(	int,		rc		)
89 		__string(	name,		dev->name	)
90 		__field(	u64,		net_cookie	)
91 	),
92 
93 	TP_fast_assign(
94 		__entry->skbaddr = skb;
95 		__entry->len = skb_len;
96 		__entry->rc = rc;
97 		__entry->net_cookie = dev_net(dev)->net_cookie;
98 		__assign_str(name);
99 	),
100 
101 	TP_printk("dev=%s skbaddr=%p len=%u rc=%d net_cookie=%llu",
102 		__get_str(name), __entry->skbaddr,
103 		__entry->len, __entry->rc,
104 		__entry->net_cookie)
105 );
106 
107 TRACE_EVENT(net_dev_xmit_timeout,
108 
109 	TP_PROTO(struct net_device *dev,
110 		 int queue_index),
111 
112 	TP_ARGS(dev, queue_index),
113 
114 	TP_STRUCT__entry(
115 		__string(	name,		dev->name	)
116 		__string(	driver,		netdev_drivername(dev))
117 		__field(	int,		queue_index	)
118 		__field(	u64,		net_cookie	)
119 	),
120 
121 	TP_fast_assign(
122 		__assign_str(name);
123 		__assign_str(driver);
124 		__entry->queue_index = queue_index;
125 		__entry->net_cookie = dev_net(dev)->net_cookie;
126 	),
127 
128 	TP_printk("dev=%s driver=%s queue=%d net_cookie=%llu",
129 		__get_str(name), __get_str(driver),
130 		__entry->queue_index, __entry->net_cookie)
131 );
132 
133 DECLARE_EVENT_CLASS(net_dev_template,
134 
135 	TP_PROTO(struct sk_buff *skb),
136 
137 	TP_ARGS(skb),
138 
139 	TP_STRUCT__entry(
140 		__field(	void *,		skbaddr		)
141 		__field(	unsigned int,	len		)
142 		__string(	name,		skb->dev->name	)
143 		__field(	u64,		net_cookie	)
144 	),
145 
146 	TP_fast_assign(
147 		__entry->skbaddr = skb;
148 		__entry->len = skb->len;
149 		__entry->net_cookie = dev_net(skb->dev)->net_cookie;
150 		__assign_str(name);
151 	),
152 
153 	TP_printk("dev=%s skbaddr=%p len=%u net_cookie=%llu",
154 		__get_str(name), __entry->skbaddr,
155 		__entry->len,
156 		__entry->net_cookie)
157 )
158 
159 DEFINE_EVENT(net_dev_template, net_dev_queue,
160 
161 	TP_PROTO(struct sk_buff *skb),
162 
163 	TP_ARGS(skb)
164 );
165 
166 DEFINE_EVENT(net_dev_template, netif_receive_skb,
167 
168 	TP_PROTO(struct sk_buff *skb),
169 
170 	TP_ARGS(skb)
171 );
172 
173 DEFINE_EVENT(net_dev_template, netif_rx,
174 
175 	TP_PROTO(struct sk_buff *skb),
176 
177 	TP_ARGS(skb)
178 );
179 
180 DECLARE_EVENT_CLASS(net_dev_rx_verbose_template,
181 
182 	TP_PROTO(const struct sk_buff *skb),
183 
184 	TP_ARGS(skb),
185 
186 	TP_STRUCT__entry(
187 		__string(	name,			skb->dev->name	)
188 		__field(	unsigned int,		napi_id		)
189 		__field(	u16,			queue_mapping	)
190 		__field(	const void *,		skbaddr		)
191 		__field(	bool,			vlan_tagged	)
192 		__field(	u16,			vlan_proto	)
193 		__field(	u16,			vlan_tci	)
194 		__field(	u16,			protocol	)
195 		__field(	u8,			ip_summed	)
196 		__field(	u32,			hash		)
197 		__field(	bool,			l4_hash		)
198 		__field(	unsigned int,		len		)
199 		__field(	unsigned int,		data_len	)
200 		__field(	unsigned int,		truesize	)
201 		__field(	bool,			mac_header_valid)
202 		__field(	int,			mac_header	)
203 		__field(	unsigned char,		nr_frags	)
204 		__field(	u16,			gso_size	)
205 		__field(	u16,			gso_type	)
206 		__field(	u64,			net_cookie	)
207 	),
208 
209 	TP_fast_assign(
210 		__assign_str(name);
211 #ifdef CONFIG_NET_RX_BUSY_POLL
212 		__entry->napi_id = napi_id_valid(skb->napi_id) ?
213 				   skb->napi_id : 0;
214 #else
215 		__entry->napi_id = 0;
216 #endif
217 		__entry->queue_mapping = skb->queue_mapping;
218 		__entry->skbaddr = skb;
219 		__entry->vlan_tagged = skb_vlan_tag_present(skb);
220 		__entry->vlan_proto = ntohs(skb->vlan_proto);
221 		__entry->vlan_tci = skb_vlan_tag_get(skb);
222 		__entry->protocol = ntohs(skb->protocol);
223 		__entry->ip_summed = skb->ip_summed;
224 		__entry->hash = skb->hash;
225 		__entry->l4_hash = skb->l4_hash;
226 		__entry->len = skb->len;
227 		__entry->data_len = skb->data_len;
228 		__entry->truesize = skb->truesize;
229 		__entry->mac_header_valid = skb_mac_header_was_set(skb);
230 		__entry->mac_header = skb_mac_header(skb) - skb->data;
231 		__entry->nr_frags = skb_shinfo(skb)->nr_frags;
232 		__entry->gso_size = skb_shinfo(skb)->gso_size;
233 		__entry->gso_type = skb_shinfo(skb)->gso_type;
234 		__entry->net_cookie = dev_net(skb->dev)->net_cookie;
235 	),
236 
237 	TP_printk("dev=%s napi_id=%#x queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d hash=0x%08x l4_hash=%d len=%u data_len=%u truesize=%u mac_header_valid=%d mac_header=%d nr_frags=%d gso_size=%d gso_type=%#x net_cookie=%llu",
238 		  __get_str(name), __entry->napi_id, __entry->queue_mapping,
239 		  __entry->skbaddr, __entry->vlan_tagged, __entry->vlan_proto,
240 		  __entry->vlan_tci, __entry->protocol, __entry->ip_summed,
241 		  __entry->hash, __entry->l4_hash, __entry->len,
242 		  __entry->data_len, __entry->truesize,
243 		  __entry->mac_header_valid, __entry->mac_header,
244 		  __entry->nr_frags, __entry->gso_size,
245 		  __entry->gso_type, __entry->net_cookie)
246 );
247 
248 DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_frags_entry,
249 
250 	TP_PROTO(const struct sk_buff *skb),
251 
252 	TP_ARGS(skb)
253 );
254 
255 DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_receive_entry,
256 
257 	TP_PROTO(const struct sk_buff *skb),
258 
259 	TP_ARGS(skb)
260 );
261 
262 DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_entry,
263 
264 	TP_PROTO(const struct sk_buff *skb),
265 
266 	TP_ARGS(skb)
267 );
268 
269 DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_list_entry,
270 
271 	TP_PROTO(const struct sk_buff *skb),
272 
273 	TP_ARGS(skb)
274 );
275 
276 DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry,
277 
278 	TP_PROTO(const struct sk_buff *skb),
279 
280 	TP_ARGS(skb)
281 );
282 
283 DECLARE_EVENT_CLASS(net_dev_rx_exit_template,
284 
285 	TP_PROTO(int ret),
286 
287 	TP_ARGS(ret),
288 
289 	TP_STRUCT__entry(
290 		__field(int,	ret)
291 	),
292 
293 	TP_fast_assign(
294 		__entry->ret = ret;
295 	),
296 
297 	TP_printk("ret=%d", __entry->ret)
298 );
299 
300 DEFINE_EVENT(net_dev_rx_exit_template, napi_gro_frags_exit,
301 
302 	TP_PROTO(int ret),
303 
304 	TP_ARGS(ret)
305 );
306 
307 DEFINE_EVENT(net_dev_rx_exit_template, napi_gro_receive_exit,
308 
309 	TP_PROTO(int ret),
310 
311 	TP_ARGS(ret)
312 );
313 
314 DEFINE_EVENT(net_dev_rx_exit_template, netif_receive_skb_exit,
315 
316 	TP_PROTO(int ret),
317 
318 	TP_ARGS(ret)
319 );
320 
321 DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_exit,
322 
323 	TP_PROTO(int ret),
324 
325 	TP_ARGS(ret)
326 );
327 
328 DEFINE_EVENT(net_dev_rx_exit_template, netif_receive_skb_list_exit,
329 
330 	TP_PROTO(int ret),
331 
332 	TP_ARGS(ret)
333 );
334 
335 #endif /* _TRACE_NET_H */
336 
337 /* This part must be outside protection */
338 #include <trace/define_trace.h>
339