xref: /linux/include/trace/events/mptcp.h (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM mptcp
4 
5 #if !defined(_TRACE_MPTCP_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_MPTCP_H
7 
8 #include <linux/ipv6.h>
9 #include <linux/tcp.h>
10 #include <linux/tracepoint.h>
11 #include <net/ipv6.h>
12 #include <net/tcp.h>
13 #include <linux/sock_diag.h>
14 #include <net/rstreason.h>
15 
16 #define show_mapping_status(status)					\
17 	__print_symbolic(status,					\
18 		{ 0, "MAPPING_OK" },					\
19 		{ 1, "MAPPING_INVALID" },				\
20 		{ 2, "MAPPING_EMPTY" },					\
21 		{ 3, "MAPPING_DATA_FIN" },				\
22 		{ 4, "MAPPING_DUMMY" })
23 
24 TRACE_EVENT(mptcp_subflow_get_send,
25 
26 	TP_PROTO(struct mptcp_subflow_context *subflow),
27 
28 	TP_ARGS(subflow),
29 
30 	TP_STRUCT__entry(
31 		__field(bool, active)
32 		__field(bool, free)
33 		__field(u32, snd_wnd)
34 		__field(u32, pace)
35 		__field(u8, backup)
36 		__field(u64, ratio)
37 	),
38 
39 	TP_fast_assign(
40 		struct sock *ssk;
41 
42 		__entry->active = mptcp_subflow_active(subflow);
43 		__entry->backup = subflow->backup || subflow->request_bkup;
44 
45 		if (subflow->tcp_sock && sk_fullsock(subflow->tcp_sock))
46 			__entry->free = sk_stream_memory_free(subflow->tcp_sock);
47 		else
48 			__entry->free = 0;
49 
50 		ssk = mptcp_subflow_tcp_sock(subflow);
51 		if (ssk && sk_fullsock(ssk)) {
52 			__entry->snd_wnd = tcp_sk(ssk)->snd_wnd;
53 			__entry->pace = READ_ONCE(ssk->sk_pacing_rate);
54 		} else {
55 			__entry->snd_wnd = 0;
56 			__entry->pace = 0;
57 		}
58 
59 		if (ssk && sk_fullsock(ssk) && __entry->pace)
60 			__entry->ratio = div_u64((u64)ssk->sk_wmem_queued << 32, __entry->pace);
61 		else
62 			__entry->ratio = 0;
63 	),
64 
65 	TP_printk("active=%d free=%d snd_wnd=%u pace=%u backup=%u ratio=%llu",
66 		  __entry->active, __entry->free,
67 		  __entry->snd_wnd, __entry->pace,
68 		  __entry->backup, __entry->ratio)
69 );
70 
71 DECLARE_EVENT_CLASS(mptcp_dump_mpext,
72 
73 	TP_PROTO(struct mptcp_ext *mpext),
74 
75 	TP_ARGS(mpext),
76 
77 	TP_STRUCT__entry(
78 		__field(u64, data_ack)
79 		__field(u64, data_seq)
80 		__field(u32, subflow_seq)
81 		__field(u16, data_len)
82 		__field(u16, csum)
83 		__field(u8, use_map)
84 		__field(u8, dsn64)
85 		__field(u8, data_fin)
86 		__field(u8, use_ack)
87 		__field(u8, ack64)
88 		__field(u8, mpc_map)
89 		__field(u8, frozen)
90 		__field(u8, reset_transient)
91 		__field(u8, reset_reason)
92 		__field(u8, csum_reqd)
93 		__field(u8, infinite_map)
94 	),
95 
96 	TP_fast_assign(
97 		__entry->data_ack = mpext->ack64 ? mpext->data_ack : mpext->data_ack32;
98 		__entry->data_seq = mpext->data_seq;
99 		__entry->subflow_seq = mpext->subflow_seq;
100 		__entry->data_len = mpext->data_len;
101 		__entry->csum = (__force u16)mpext->csum;
102 		__entry->use_map = mpext->use_map;
103 		__entry->dsn64 = mpext->dsn64;
104 		__entry->data_fin = mpext->data_fin;
105 		__entry->use_ack = mpext->use_ack;
106 		__entry->ack64 = mpext->ack64;
107 		__entry->mpc_map = mpext->mpc_map;
108 		__entry->frozen = mpext->frozen;
109 		__entry->reset_transient = mpext->reset_transient;
110 		__entry->reset_reason = mpext->reset_reason;
111 		__entry->csum_reqd = mpext->csum_reqd;
112 		__entry->infinite_map = mpext->infinite_map;
113 	),
114 
115 	TP_printk("data_ack=%llu data_seq=%llu subflow_seq=%u data_len=%u csum=%x use_map=%u dsn64=%u data_fin=%u use_ack=%u ack64=%u mpc_map=%u frozen=%u reset_transient=%u reset_reason=%u csum_reqd=%u infinite_map=%u",
116 		  __entry->data_ack, __entry->data_seq,
117 		  __entry->subflow_seq, __entry->data_len,
118 		  __entry->csum, __entry->use_map,
119 		  __entry->dsn64, __entry->data_fin,
120 		  __entry->use_ack, __entry->ack64,
121 		  __entry->mpc_map, __entry->frozen,
122 		  __entry->reset_transient, __entry->reset_reason,
123 		  __entry->csum_reqd, __entry->infinite_map)
124 );
125 
126 DEFINE_EVENT(mptcp_dump_mpext, mptcp_sendmsg_frag,
127 	TP_PROTO(struct mptcp_ext *mpext),
128 	TP_ARGS(mpext));
129 
130 DEFINE_EVENT(mptcp_dump_mpext, get_mapping_status,
131 	TP_PROTO(struct mptcp_ext *mpext),
132 	TP_ARGS(mpext));
133 
134 TRACE_EVENT(ack_update_msk,
135 
136 	TP_PROTO(u64 data_ack, u64 old_snd_una,
137 		 u64 new_snd_una, u64 new_wnd_end,
138 		 u64 msk_wnd_end),
139 
140 	TP_ARGS(data_ack, old_snd_una,
141 		new_snd_una, new_wnd_end,
142 		msk_wnd_end),
143 
144 	TP_STRUCT__entry(
145 		__field(u64, data_ack)
146 		__field(u64, old_snd_una)
147 		__field(u64, new_snd_una)
148 		__field(u64, new_wnd_end)
149 		__field(u64, msk_wnd_end)
150 	),
151 
152 	TP_fast_assign(
153 		__entry->data_ack = data_ack;
154 		__entry->old_snd_una = old_snd_una;
155 		__entry->new_snd_una = new_snd_una;
156 		__entry->new_wnd_end = new_wnd_end;
157 		__entry->msk_wnd_end = msk_wnd_end;
158 	),
159 
160 	TP_printk("data_ack=%llu old_snd_una=%llu new_snd_una=%llu new_wnd_end=%llu msk_wnd_end=%llu",
161 		  __entry->data_ack, __entry->old_snd_una,
162 		  __entry->new_snd_una, __entry->new_wnd_end,
163 		  __entry->msk_wnd_end)
164 );
165 
166 TRACE_EVENT(subflow_check_data_avail,
167 
168 	TP_PROTO(__u8 status, struct sk_buff *skb),
169 
170 	TP_ARGS(status, skb),
171 
172 	TP_STRUCT__entry(
173 		__field(u8, status)
174 		__field(const void *, skb)
175 	),
176 
177 	TP_fast_assign(
178 		__entry->status = status;
179 		__entry->skb = skb;
180 	),
181 
182 	TP_printk("mapping_status=%s, skb=%p",
183 		  show_mapping_status(__entry->status),
184 		  __entry->skb)
185 );
186 
187 #include <trace/events/net_probe_common.h>
188 
189 TRACE_EVENT(mptcp_rcvbuf_grow,
190 
191 	TP_PROTO(struct sock *sk, int time),
192 
193 	TP_ARGS(sk, time),
194 
195 	TP_STRUCT__entry(
196 		__field(int, time)
197 		__field(__u32, rtt_us)
198 		__field(__u32, copied)
199 		__field(__u32, inq)
200 		__field(__u32, space)
201 		__field(__u32, ooo_space)
202 		__field(__u32, rcvbuf)
203 		__field(__u32, rcv_wnd)
204 		__field(__u8, scaling_ratio)
205 		__field(__u16, sport)
206 		__field(__u16, dport)
207 		__field(__u16, family)
208 		__array(__u8, saddr, 4)
209 		__array(__u8, daddr, 4)
210 		__array(__u8, saddr_v6, 16)
211 		__array(__u8, daddr_v6, 16)
212 		__field(const void *, skaddr)
213 	),
214 
215 	TP_fast_assign(
216 		struct mptcp_sock *msk = mptcp_sk(sk);
217 		struct inet_sock *inet = inet_sk(sk);
218 		bool ofo_empty;
219 		__be32 *p32;
220 
221 		__entry->time = time;
222 		__entry->rtt_us = msk->rcvq_space.rtt_us >> 3;
223 		__entry->copied = msk->rcvq_space.copied;
224 		__entry->inq = mptcp_inq_hint(sk);
225 		__entry->space = msk->rcvq_space.space;
226 		ofo_empty = RB_EMPTY_ROOT(&msk->out_of_order_queue);
227 		__entry->ooo_space = ofo_empty ? 0 :
228 				     MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq -
229 				     msk->ack_seq;
230 
231 		__entry->rcvbuf = sk->sk_rcvbuf;
232 		__entry->rcv_wnd = atomic64_read(&msk->rcv_wnd_sent) -
233 				   msk->ack_seq;
234 		__entry->scaling_ratio = msk->scaling_ratio;
235 		__entry->sport = ntohs(inet->inet_sport);
236 		__entry->dport = ntohs(inet->inet_dport);
237 		__entry->family = sk->sk_family;
238 
239 		p32 = (__be32 *)__entry->saddr;
240 		*p32 = inet->inet_saddr;
241 
242 		p32 = (__be32 *)__entry->daddr;
243 		*p32 = inet->inet_daddr;
244 
245 		TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
246 			       sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
247 
248 		__entry->skaddr = sk;
249 	),
250 
251 	TP_printk("time=%u rtt_us=%u copied=%u inq=%u space=%u ooo=%u scaling_ratio=%u "
252 		  "rcvbuf=%u rcv_wnd=%u family=%d sport=%hu dport=%hu saddr=%pI4 "
253 		  "daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c skaddr=%p",
254 		  __entry->time, __entry->rtt_us, __entry->copied,
255 		  __entry->inq, __entry->space, __entry->ooo_space,
256 		  __entry->scaling_ratio, __entry->rcvbuf, __entry->rcv_wnd,
257 		  __entry->family, __entry->sport, __entry->dport,
258 		  __entry->saddr, __entry->daddr, __entry->saddr_v6,
259 		  __entry->daddr_v6, __entry->skaddr)
260 );
261 #endif /* _TRACE_MPTCP_H */
262 
263 /* This part must be outside protection */
264 #include <trace/define_trace.h>
265