Lines Matching defs:ca

13  *    "while (ca->ack_cnt > delta)" loop is changed to the equivalent
14 * "ca->ack_cnt / delta" operation.
95 static void bictcp_reset(struct bpf_bictcp *ca)
97 ca->cnt = 0;
98 ca->last_max_cwnd = 0;
99 ca->last_cwnd = 0;
100 ca->last_time = 0;
101 ca->bic_origin_point = 0;
102 ca->bic_K = 0;
103 ca->delay_min = 0;
104 ca->epoch_start = 0;
105 ca->ack_cnt = 0;
106 ca->tcp_cwnd = 0;
107 ca->found = 0;
165 struct bpf_bictcp *ca = inet_csk_ca(sk);
167 ca->round_start = ca->last_ack = bictcp_clock_us(sk);
168 ca->end_seq = tp->snd_nxt;
169 ca->curr_rtt = ~0U;
170 ca->sample_cnt = 0;
176 struct bpf_bictcp *ca = inet_csk_ca(sk);
178 bictcp_reset(ca);
191 struct bpf_bictcp *ca = inet_csk_ca(sk);
200 if (ca->epoch_start && delta > 0) {
201 ca->epoch_start += delta;
202 if (after(ca->epoch_start, now))
203 ca->epoch_start = now;
265 static void bictcp_update(struct bpf_bictcp *ca, __u32 cwnd, __u32 acked)
270 ca->ack_cnt += acked; /* count the number of ACKed packets */
272 if (ca->last_cwnd == cwnd &&
273 (__s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
276 /* The CUBIC function can update ca->cnt at most once per jiffy.
277 * On all cwnd reduction events, ca->epoch_start is set to 0,
278 * which will force a recalculation of ca->cnt.
280 if (ca->epoch_start && tcp_jiffies32 == ca->last_time)
283 ca->last_cwnd = cwnd;
284 ca->last_time = tcp_jiffies32;
286 if (ca->epoch_start == 0) {
287 ca->epoch_start = tcp_jiffies32; /* record beginning */
288 ca->ack_cnt = acked; /* start counting */
289 ca->tcp_cwnd = cwnd; /* syn with cubic */
291 if (ca->last_max_cwnd <= cwnd) {
292 ca->bic_K = 0;
293 ca->bic_origin_point = cwnd;
298 ca->bic_K = cubic_root(cube_factor
299 * (ca->last_max_cwnd - cwnd));
300 ca->bic_origin_point = ca->last_max_cwnd;
318 t = (__s32)(tcp_jiffies32 - ca->epoch_start) * USEC_PER_JIFFY;
319 t += ca->delay_min;
324 if (t < ca->bic_K) /* t - K */
325 offs = ca->bic_K - t;
327 offs = t - ca->bic_K;
331 if (t < ca->bic_K) /* below origin*/
332 bic_target = ca->bic_origin_point - delta;
334 bic_target = ca->bic_origin_point + delta;
338 ca->cnt = cwnd / (bic_target - cwnd);
340 ca->cnt = 100 * cwnd; /* very small increment*/
347 if (ca->last_max_cwnd == 0 && ca->cnt > 20)
348 ca->cnt = 20; /* increase cwnd 5% per RTT */
358 if (ca->ack_cnt > delta && delta) {
359 n = ca->ack_cnt / delta;
360 ca->ack_cnt -= n * delta;
361 ca->tcp_cwnd += n;
364 if (ca->tcp_cwnd > cwnd) { /* if bic is slower than tcp */
365 delta = ca->tcp_cwnd - cwnd;
367 if (ca->cnt > max_cnt)
368 ca->cnt = max_cnt;
375 ca->cnt = max(ca->cnt, 2U);
382 struct bpf_bictcp *ca = inet_csk_ca(sk);
388 if (hystart && after(ack, ca->end_seq))
394 bictcp_update(ca, tp->snd_cwnd, acked);
395 tcp_cong_avoid_ai(tp, ca->cnt, acked);
402 struct bpf_bictcp *ca = inet_csk_ca(sk);
404 ca->epoch_start = 0; /* end of epoch */
407 if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
408 ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
411 ca->last_max_cwnd = tp->snd_cwnd;
429 * slow start we begin with small TSO packets and ca->delay_min would
450 struct bpf_bictcp *ca = inet_csk_ca(sk);
457 if ((__s32)(now - ca->last_ack) <= hystart_ack_delta_us) {
458 ca->last_ack = now;
460 threshold = ca->delay_min + hystart_ack_delay(sk);
463 * ca->delay_min/2.
470 if ((__s32)(now - ca->round_start) > threshold) {
471 ca->found = 1;
479 if (ca->curr_rtt > delay)
480 ca->curr_rtt = delay;
481 if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
482 ca->sample_cnt++;
484 if (ca->curr_rtt > ca->delay_min +
485 HYSTART_DELAY_THRESH(ca->delay_min >> 3)) {
486 ca->found = 1;
499 struct bpf_bictcp *ca = inet_csk_ca(sk);
508 if (ca->epoch_start && (__s32)(tcp_jiffies32 - ca->epoch_start) < HZ)
516 if (ca->delay_min == 0 || ca->delay_min > delay)
517 ca->delay_min = delay;
520 if (!ca->found && tcp_in_slow_start(tp) && hystart &&