1 // SPDX-License-Identifier: GPL-2.0-only
2
3 /* WARNING: This implementation is not necessarily the same
4 * as the tcp_cubic.c. The purpose is mainly for testing
5 * the kernel BPF logic.
6 *
7 * Highlights:
8 * 1. CONFIG_HZ .kconfig map is used.
9 * 2. In bictcp_update(), calculation is changed to use usec
10 * resolution (i.e. USEC_PER_JIFFY) instead of using jiffies.
11 * Thus, usecs_to_jiffies() is not used in the bpf_cubic.c.
12 * 3. In bitctcp_update() [under tcp_friendliness], the original
13 * "while (ca->ack_cnt > delta)" loop is changed to the equivalent
14 * "ca->ack_cnt / delta" operation.
15 */
16
17 #include "bpf_tracing_net.h"
18 #include <bpf/bpf_tracing.h>
19 #include <errno.h>
20
21 char _license[] SEC("license") = "GPL";
22
23 #define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
24
25 extern __u32 tcp_slow_start(struct tcp_sock *tp, __u32 acked) __ksym;
26 extern void tcp_cong_avoid_ai(struct tcp_sock *tp, __u32 w, __u32 acked) __ksym;
27
28 #define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
29 * max_cwnd = snd_cwnd * beta
30 */
31 #define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */
32
33 /* Two methods of hybrid slow start */
34 #define HYSTART_ACK_TRAIN 0x1
35 #define HYSTART_DELAY 0x2
36
37 /* Number of delay samples for detecting the increase of delay */
38 #define HYSTART_MIN_SAMPLES 8
39 #define HYSTART_DELAY_MIN (4000U) /* 4ms */
40 #define HYSTART_DELAY_MAX (16000U) /* 16 ms */
41 #define HYSTART_DELAY_THRESH(x) clamp(x, HYSTART_DELAY_MIN, HYSTART_DELAY_MAX)
42
43 static int fast_convergence = 1;
44 static const int beta = 717; /* = 717/1024 (BICTCP_BETA_SCALE) */
45 static int initial_ssthresh;
46 static const int bic_scale = 41;
47 static int tcp_friendliness = 1;
48
49 static int hystart = 1;
50 static int hystart_detect = HYSTART_ACK_TRAIN | HYSTART_DELAY;
51 static int hystart_low_window = 16;
52 static int hystart_ack_delta_us = 2000;
53
54 static const __u32 cube_rtt_scale = (bic_scale * 10); /* 1024*c/rtt */
55 static const __u32 beta_scale = 8*(BICTCP_BETA_SCALE+beta) / 3
56 / (BICTCP_BETA_SCALE - beta);
57 /* calculate the "K" for (wmax-cwnd) = c/rtt * K^3
58 * so K = cubic_root( (wmax-cwnd)*rtt/c )
59 * the unit of K is bictcp_HZ=2^10, not HZ
60 *
61 * c = bic_scale >> 10
62 * rtt = 100ms
63 *
64 * the following code has been designed and tested for
65 * cwnd < 1 million packets
66 * RTT < 100 seconds
67 * HZ < 1,000,00 (corresponding to 10 nano-second)
68 */
69
70 /* 1/c * 2^2*bictcp_HZ * srtt, 2^40 */
71 static const __u64 cube_factor = (__u64)(1ull << (10+3*BICTCP_HZ))
72 / (bic_scale * 10);
73
74 /* BIC TCP Parameters */
75 struct bpf_bictcp {
76 __u32 cnt; /* increase cwnd by 1 after ACKs */
77 __u32 last_max_cwnd; /* last maximum snd_cwnd */
78 __u32 last_cwnd; /* the last snd_cwnd */
79 __u32 last_time; /* time when updated last_cwnd */
80 __u32 bic_origin_point;/* origin point of bic function */
81 __u32 bic_K; /* time to origin point
82 from the beginning of the current epoch */
83 __u32 delay_min; /* min delay (usec) */
84 __u32 epoch_start; /* beginning of an epoch */
85 __u32 ack_cnt; /* number of acks */
86 __u32 tcp_cwnd; /* estimated tcp cwnd */
87 __u16 unused;
88 __u8 sample_cnt; /* number of samples to decide curr_rtt */
89 __u8 found; /* the exit point is found? */
90 __u32 round_start; /* beginning of each round */
91 __u32 end_seq; /* end_seq of the round */
92 __u32 last_ack; /* last time when the ACK spacing is close */
93 __u32 curr_rtt; /* the minimum rtt of current round */
94 };
95
bictcp_reset(struct bpf_bictcp * ca)96 static void bictcp_reset(struct bpf_bictcp *ca)
97 {
98 ca->cnt = 0;
99 ca->last_max_cwnd = 0;
100 ca->last_cwnd = 0;
101 ca->last_time = 0;
102 ca->bic_origin_point = 0;
103 ca->bic_K = 0;
104 ca->delay_min = 0;
105 ca->epoch_start = 0;
106 ca->ack_cnt = 0;
107 ca->tcp_cwnd = 0;
108 ca->found = 0;
109 }
110
111 extern unsigned long CONFIG_HZ __kconfig;
112 #define HZ CONFIG_HZ
113 #define USEC_PER_MSEC 1000UL
114 #define USEC_PER_SEC 1000000UL
115 #define USEC_PER_JIFFY (USEC_PER_SEC / HZ)
116
div64_u64(__u64 dividend,__u64 divisor)117 static __u64 div64_u64(__u64 dividend, __u64 divisor)
118 {
119 return dividend / divisor;
120 }
121
122 #define div64_ul div64_u64
123
124 #define BITS_PER_U64 (sizeof(__u64) * 8)
fls64(__u64 x)125 static int fls64(__u64 x)
126 {
127 int num = BITS_PER_U64 - 1;
128
129 if (x == 0)
130 return 0;
131
132 if (!(x & (~0ull << (BITS_PER_U64-32)))) {
133 num -= 32;
134 x <<= 32;
135 }
136 if (!(x & (~0ull << (BITS_PER_U64-16)))) {
137 num -= 16;
138 x <<= 16;
139 }
140 if (!(x & (~0ull << (BITS_PER_U64-8)))) {
141 num -= 8;
142 x <<= 8;
143 }
144 if (!(x & (~0ull << (BITS_PER_U64-4)))) {
145 num -= 4;
146 x <<= 4;
147 }
148 if (!(x & (~0ull << (BITS_PER_U64-2)))) {
149 num -= 2;
150 x <<= 2;
151 }
152 if (!(x & (~0ull << (BITS_PER_U64-1))))
153 num -= 1;
154
155 return num + 1;
156 }
157
bictcp_clock_us(const struct sock * sk)158 static __u32 bictcp_clock_us(const struct sock *sk)
159 {
160 return tcp_sk(sk)->tcp_mstamp;
161 }
162
bictcp_hystart_reset(struct sock * sk)163 static void bictcp_hystart_reset(struct sock *sk)
164 {
165 struct tcp_sock *tp = tcp_sk(sk);
166 struct bpf_bictcp *ca = inet_csk_ca(sk);
167
168 ca->round_start = ca->last_ack = bictcp_clock_us(sk);
169 ca->end_seq = tp->snd_nxt;
170 ca->curr_rtt = ~0U;
171 ca->sample_cnt = 0;
172 }
173
174 bool nodelay_init_reject = false;
175 bool nodelay_cwnd_event_tx_start_reject = false;
176
177 SEC("struct_ops")
BPF_PROG(bpf_cubic_init,struct sock * sk)178 void BPF_PROG(bpf_cubic_init, struct sock *sk)
179 {
180 struct bpf_bictcp *ca = inet_csk_ca(sk);
181 int true_val = 1, ret;
182
183 ret = bpf_setsockopt(sk, SOL_TCP, TCP_NODELAY, &true_val, sizeof(true_val));
184 if (ret == -EOPNOTSUPP)
185 nodelay_init_reject = true;
186
187 bictcp_reset(ca);
188
189 if (hystart)
190 bictcp_hystart_reset(sk);
191
192 if (!hystart && initial_ssthresh)
193 tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
194 }
195
196 SEC("struct_ops")
BPF_PROG(bpf_cubic_cwnd_event_tx_start,struct sock * sk)197 void BPF_PROG(bpf_cubic_cwnd_event_tx_start, struct sock *sk)
198 {
199 struct bpf_bictcp *ca = inet_csk_ca(sk);
200 __u32 now = tcp_jiffies32;
201 int true_val = 1, ret;
202 __s32 delta;
203
204 ret = bpf_setsockopt(sk, SOL_TCP, TCP_NODELAY, &true_val, sizeof(true_val));
205 if (ret == -EOPNOTSUPP)
206 nodelay_cwnd_event_tx_start_reject = true;
207
208 delta = now - tcp_sk(sk)->lsndtime;
209
210 /* We were application limited (idle) for a while.
211 * Shift epoch_start to keep cwnd growth to cubic curve.
212 */
213 if (ca->epoch_start && delta > 0) {
214 ca->epoch_start += delta;
215 if (after(ca->epoch_start, now))
216 ca->epoch_start = now;
217 }
218 }
219
220 /*
221 * cbrt(x) MSB values for x MSB values in [0..63].
222 * Precomputed then refined by hand - Willy Tarreau
223 *
224 * For x in [0..63],
225 * v = cbrt(x << 18) - 1
226 * cbrt(x) = (v[x] + 10) >> 6
227 */
228 static const __u8 v[] = {
229 /* 0x00 */ 0, 54, 54, 54, 118, 118, 118, 118,
230 /* 0x08 */ 123, 129, 134, 138, 143, 147, 151, 156,
231 /* 0x10 */ 157, 161, 164, 168, 170, 173, 176, 179,
232 /* 0x18 */ 181, 185, 187, 190, 192, 194, 197, 199,
233 /* 0x20 */ 200, 202, 204, 206, 209, 211, 213, 215,
234 /* 0x28 */ 217, 219, 221, 222, 224, 225, 227, 229,
235 /* 0x30 */ 231, 232, 234, 236, 237, 239, 240, 242,
236 /* 0x38 */ 244, 245, 246, 248, 250, 251, 252, 254,
237 };
238
239 /* calculate the cubic root of x using a table lookup followed by one
240 * Newton-Raphson iteration.
241 * Avg err ~= 0.195%
242 */
cubic_root(__u64 a)243 static __u32 cubic_root(__u64 a)
244 {
245 __u32 x, b, shift;
246
247 if (a < 64) {
248 /* a in [0..63] */
249 return ((__u32)v[(__u32)a] + 35) >> 6;
250 }
251
252 b = fls64(a);
253 b = ((b * 84) >> 8) - 1;
254 shift = (a >> (b * 3));
255
256 /* it is needed for verifier's bound check on v */
257 if (shift >= 64)
258 return 0;
259
260 x = ((__u32)(((__u32)v[shift] + 10) << b)) >> 6;
261
262 /*
263 * Newton-Raphson iteration
264 * 2
265 * x = ( 2 * x + a / x ) / 3
266 * k+1 k k
267 */
268 x = (2 * x + (__u32)div64_u64(a, (__u64)x * (__u64)(x - 1)));
269 x = ((x * 341) >> 10);
270 return x;
271 }
272
273 /*
274 * Compute congestion window to use.
275 */
bictcp_update(struct bpf_bictcp * ca,__u32 cwnd,__u32 acked)276 static void bictcp_update(struct bpf_bictcp *ca, __u32 cwnd, __u32 acked)
277 {
278 __u32 delta, bic_target, max_cnt;
279 __u64 offs, t;
280
281 ca->ack_cnt += acked; /* count the number of ACKed packets */
282
283 if (ca->last_cwnd == cwnd &&
284 (__s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
285 return;
286
287 /* The CUBIC function can update ca->cnt at most once per jiffy.
288 * On all cwnd reduction events, ca->epoch_start is set to 0,
289 * which will force a recalculation of ca->cnt.
290 */
291 if (ca->epoch_start && tcp_jiffies32 == ca->last_time)
292 goto tcp_friendliness;
293
294 ca->last_cwnd = cwnd;
295 ca->last_time = tcp_jiffies32;
296
297 if (ca->epoch_start == 0) {
298 ca->epoch_start = tcp_jiffies32; /* record beginning */
299 ca->ack_cnt = acked; /* start counting */
300 ca->tcp_cwnd = cwnd; /* syn with cubic */
301
302 if (ca->last_max_cwnd <= cwnd) {
303 ca->bic_K = 0;
304 ca->bic_origin_point = cwnd;
305 } else {
306 /* Compute new K based on
307 * (wmax-cwnd) * (srtt>>3 / HZ) / c * 2^(3*bictcp_HZ)
308 */
309 ca->bic_K = cubic_root(cube_factor
310 * (ca->last_max_cwnd - cwnd));
311 ca->bic_origin_point = ca->last_max_cwnd;
312 }
313 }
314
315 /* cubic function - calc*/
316 /* calculate c * time^3 / rtt,
317 * while considering overflow in calculation of time^3
318 * (so time^3 is done by using 64 bit)
319 * and without the support of division of 64bit numbers
320 * (so all divisions are done by using 32 bit)
321 * also NOTE the unit of those variables
322 * time = (t - K) / 2^bictcp_HZ
323 * c = bic_scale >> 10
324 * rtt = (srtt >> 3) / HZ
325 * !!! The following code does not have overflow problems,
326 * if the cwnd < 1 million packets !!!
327 */
328
329 t = (__s32)(tcp_jiffies32 - ca->epoch_start) * USEC_PER_JIFFY;
330 t += ca->delay_min;
331 /* change the unit from usec to bictcp_HZ */
332 t <<= BICTCP_HZ;
333 t /= USEC_PER_SEC;
334
335 if (t < ca->bic_K) /* t - K */
336 offs = ca->bic_K - t;
337 else
338 offs = t - ca->bic_K;
339
340 /* c/rtt * (t-K)^3 */
341 delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ);
342 if (t < ca->bic_K) /* below origin*/
343 bic_target = ca->bic_origin_point - delta;
344 else /* above origin*/
345 bic_target = ca->bic_origin_point + delta;
346
347 /* cubic function - calc bictcp_cnt*/
348 if (bic_target > cwnd) {
349 ca->cnt = cwnd / (bic_target - cwnd);
350 } else {
351 ca->cnt = 100 * cwnd; /* very small increment*/
352 }
353
354 /*
355 * The initial growth of cubic function may be too conservative
356 * when the available bandwidth is still unknown.
357 */
358 if (ca->last_max_cwnd == 0 && ca->cnt > 20)
359 ca->cnt = 20; /* increase cwnd 5% per RTT */
360
361 tcp_friendliness:
362 /* TCP Friendly */
363 if (tcp_friendliness) {
364 __u32 scale = beta_scale;
365 __u32 n;
366
367 /* update tcp cwnd */
368 delta = (cwnd * scale) >> 3;
369 if (ca->ack_cnt > delta && delta) {
370 n = ca->ack_cnt / delta;
371 ca->ack_cnt -= n * delta;
372 ca->tcp_cwnd += n;
373 }
374
375 if (ca->tcp_cwnd > cwnd) { /* if bic is slower than tcp */
376 delta = ca->tcp_cwnd - cwnd;
377 max_cnt = cwnd / delta;
378 if (ca->cnt > max_cnt)
379 ca->cnt = max_cnt;
380 }
381 }
382
383 /* The maximum rate of cwnd increase CUBIC allows is 1 packet per
384 * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT.
385 */
386 ca->cnt = max(ca->cnt, 2U);
387 }
388
389 SEC("struct_ops")
BPF_PROG(bpf_cubic_cong_avoid,struct sock * sk,__u32 ack,__u32 acked)390 void BPF_PROG(bpf_cubic_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
391 {
392 struct tcp_sock *tp = tcp_sk(sk);
393 struct bpf_bictcp *ca = inet_csk_ca(sk);
394
395 if (!tcp_is_cwnd_limited(sk))
396 return;
397
398 if (tcp_in_slow_start(tp)) {
399 if (hystart && after(ack, ca->end_seq))
400 bictcp_hystart_reset(sk);
401 acked = tcp_slow_start(tp, acked);
402 if (!acked)
403 return;
404 }
405 bictcp_update(ca, tp->snd_cwnd, acked);
406 tcp_cong_avoid_ai(tp, ca->cnt, acked);
407 }
408
409 SEC("struct_ops")
BPF_PROG(bpf_cubic_recalc_ssthresh,struct sock * sk)410 __u32 BPF_PROG(bpf_cubic_recalc_ssthresh, struct sock *sk)
411 {
412 const struct tcp_sock *tp = tcp_sk(sk);
413 struct bpf_bictcp *ca = inet_csk_ca(sk);
414
415 ca->epoch_start = 0; /* end of epoch */
416
417 /* Wmax and fast convergence */
418 if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
419 ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
420 / (2 * BICTCP_BETA_SCALE);
421 else
422 ca->last_max_cwnd = tp->snd_cwnd;
423
424 return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
425 }
426
427 SEC("struct_ops")
BPF_PROG(bpf_cubic_state,struct sock * sk,__u8 new_state)428 void BPF_PROG(bpf_cubic_state, struct sock *sk, __u8 new_state)
429 {
430 if (new_state == TCP_CA_Loss) {
431 bictcp_reset(inet_csk_ca(sk));
432 bictcp_hystart_reset(sk);
433 }
434 }
435
436 #define GSO_MAX_SIZE 65536
437
438 /* Account for TSO/GRO delays.
439 * Otherwise short RTT flows could get too small ssthresh, since during
440 * slow start we begin with small TSO packets and ca->delay_min would
441 * not account for long aggregation delay when TSO packets get bigger.
442 * Ideally even with a very small RTT we would like to have at least one
443 * TSO packet being sent and received by GRO, and another one in qdisc layer.
444 * We apply another 100% factor because @rate is doubled at this point.
445 * We cap the cushion to 1ms.
446 */
hystart_ack_delay(struct sock * sk)447 static __u32 hystart_ack_delay(struct sock *sk)
448 {
449 unsigned long rate;
450
451 rate = sk->sk_pacing_rate;
452 if (!rate)
453 return 0;
454 return min((__u64)USEC_PER_MSEC,
455 div64_ul((__u64)GSO_MAX_SIZE * 4 * USEC_PER_SEC, rate));
456 }
457
hystart_update(struct sock * sk,__u32 delay)458 static void hystart_update(struct sock *sk, __u32 delay)
459 {
460 struct tcp_sock *tp = tcp_sk(sk);
461 struct bpf_bictcp *ca = inet_csk_ca(sk);
462 __u32 threshold;
463
464 if (hystart_detect & HYSTART_ACK_TRAIN) {
465 __u32 now = bictcp_clock_us(sk);
466
467 /* first detection parameter - ack-train detection */
468 if ((__s32)(now - ca->last_ack) <= hystart_ack_delta_us) {
469 ca->last_ack = now;
470
471 threshold = ca->delay_min + hystart_ack_delay(sk);
472
473 /* Hystart ack train triggers if we get ack past
474 * ca->delay_min/2.
475 * Pacing might have delayed packets up to RTT/2
476 * during slow start.
477 */
478 if (sk->sk_pacing_status == SK_PACING_NONE)
479 threshold >>= 1;
480
481 if ((__s32)(now - ca->round_start) > threshold) {
482 ca->found = 1;
483 tp->snd_ssthresh = tp->snd_cwnd;
484 }
485 }
486 }
487
488 if (hystart_detect & HYSTART_DELAY) {
489 /* obtain the minimum delay of more than sampling packets */
490 if (ca->curr_rtt > delay)
491 ca->curr_rtt = delay;
492 if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
493 ca->sample_cnt++;
494 } else {
495 if (ca->curr_rtt > ca->delay_min +
496 HYSTART_DELAY_THRESH(ca->delay_min >> 3)) {
497 ca->found = 1;
498 tp->snd_ssthresh = tp->snd_cwnd;
499 }
500 }
501 }
502 }
503
504 int bpf_cubic_acked_called = 0;
505
506 SEC("struct_ops")
BPF_PROG(bpf_cubic_acked,struct sock * sk,const struct ack_sample * sample)507 void BPF_PROG(bpf_cubic_acked, struct sock *sk, const struct ack_sample *sample)
508 {
509 const struct tcp_sock *tp = tcp_sk(sk);
510 struct bpf_bictcp *ca = inet_csk_ca(sk);
511 __u32 delay;
512
513 bpf_cubic_acked_called = 1;
514 /* Some calls are for duplicates without timestamps */
515 if (sample->rtt_us < 0)
516 return;
517
518 /* Discard delay samples right after fast recovery */
519 if (ca->epoch_start && (__s32)(tcp_jiffies32 - ca->epoch_start) < HZ)
520 return;
521
522 delay = sample->rtt_us;
523 if (delay == 0)
524 delay = 1;
525
526 /* first time call or link delay decreases */
527 if (ca->delay_min == 0 || ca->delay_min > delay)
528 ca->delay_min = delay;
529
530 /* hystart triggers when cwnd is larger than some threshold */
531 if (!ca->found && tcp_in_slow_start(tp) && hystart &&
532 tp->snd_cwnd >= hystart_low_window)
533 hystart_update(sk, delay);
534 }
535
536 extern __u32 tcp_reno_undo_cwnd(struct sock *sk) __ksym;
537
538 SEC("struct_ops")
BPF_PROG(bpf_cubic_undo_cwnd,struct sock * sk)539 __u32 BPF_PROG(bpf_cubic_undo_cwnd, struct sock *sk)
540 {
541 return tcp_reno_undo_cwnd(sk);
542 }
543
544 SEC(".struct_ops")
545 struct tcp_congestion_ops cubic = {
546 .init = (void *)bpf_cubic_init,
547 .ssthresh = (void *)bpf_cubic_recalc_ssthresh,
548 .cong_avoid = (void *)bpf_cubic_cong_avoid,
549 .set_state = (void *)bpf_cubic_state,
550 .undo_cwnd = (void *)bpf_cubic_undo_cwnd,
551 .cwnd_event_tx_start = (void *)bpf_cubic_cwnd_event_tx_start,
552 .pkts_acked = (void *)bpf_cubic_acked,
553 .name = "bpf_cubic",
554 };
555