1 #include "internal/quic_cc.h" 2 #include "internal/quic_types.h" 3 #include "internal/safe_math.h" 4 5 OSSL_SAFE_MATH_UNSIGNED(u64, uint64_t) 6 7 typedef struct ossl_cc_newreno_st { 8 /* Dependencies. */ 9 OSSL_TIME (*now_cb)(void *arg); 10 void *now_cb_arg; 11 12 /* 'Constants' (which we allow to be configurable). */ 13 uint64_t k_init_wnd, k_min_wnd; 14 uint32_t k_loss_reduction_factor_num, k_loss_reduction_factor_den; 15 uint32_t persistent_cong_thresh; 16 17 /* State. */ 18 size_t max_dgram_size; 19 uint64_t bytes_in_flight, cong_wnd, slow_start_thresh, bytes_acked; 20 OSSL_TIME cong_recovery_start_time; 21 22 /* Unflushed state during multiple on-loss calls. */ 23 int processing_loss; /* 1 if not flushed */ 24 OSSL_TIME tx_time_of_last_loss; 25 26 /* Diagnostic state. */ 27 int in_congestion_recovery; 28 29 /* Diagnostic output locations. */ 30 size_t *p_diag_max_dgram_payload_len; 31 uint64_t *p_diag_cur_cwnd_size; 32 uint64_t *p_diag_min_cwnd_size; 33 uint64_t *p_diag_cur_bytes_in_flight; 34 uint32_t *p_diag_cur_state; 35 } OSSL_CC_NEWRENO; 36 37 #define MIN_MAX_INIT_WND_SIZE 14720 /* RFC 9002 s. 7.2 */ 38 39 /* TODO(QUIC FUTURE): Pacing support. */ 40 41 static void newreno_set_max_dgram_size(OSSL_CC_NEWRENO *nr, 42 size_t max_dgram_size); 43 static void newreno_update_diag(OSSL_CC_NEWRENO *nr); 44 45 static void newreno_reset(OSSL_CC_DATA *cc); 46 47 static OSSL_CC_DATA *newreno_new(OSSL_TIME (*now_cb)(void *arg), 48 void *now_cb_arg) 49 { 50 OSSL_CC_NEWRENO *nr; 51 52 if ((nr = OPENSSL_zalloc(sizeof(*nr))) == NULL) 53 return NULL; 54 55 nr->now_cb = now_cb; 56 nr->now_cb_arg = now_cb_arg; 57 58 newreno_set_max_dgram_size(nr, QUIC_MIN_INITIAL_DGRAM_LEN); 59 newreno_reset((OSSL_CC_DATA *)nr); 60 61 return (OSSL_CC_DATA *)nr; 62 } 63 64 static void newreno_free(OSSL_CC_DATA *cc) 65 { 66 OPENSSL_free(cc); 67 } 68 69 static void newreno_set_max_dgram_size(OSSL_CC_NEWRENO *nr, 70 size_t max_dgram_size) 71 { 72 size_t max_init_wnd; 73 int is_reduced = (max_dgram_size < nr->max_dgram_size); 74 75 nr->max_dgram_size = max_dgram_size; 76 77 max_init_wnd = 2 * max_dgram_size; 78 if (max_init_wnd < MIN_MAX_INIT_WND_SIZE) 79 max_init_wnd = MIN_MAX_INIT_WND_SIZE; 80 81 nr->k_init_wnd = 10 * max_dgram_size; 82 if (nr->k_init_wnd > max_init_wnd) 83 nr->k_init_wnd = max_init_wnd; 84 85 nr->k_min_wnd = 2 * max_dgram_size; 86 87 if (is_reduced) 88 nr->cong_wnd = nr->k_init_wnd; 89 90 newreno_update_diag(nr); 91 } 92 93 static void newreno_reset(OSSL_CC_DATA *cc) 94 { 95 OSSL_CC_NEWRENO *nr = (OSSL_CC_NEWRENO *)cc; 96 97 nr->k_loss_reduction_factor_num = 1; 98 nr->k_loss_reduction_factor_den = 2; 99 nr->persistent_cong_thresh = 3; 100 101 nr->cong_wnd = nr->k_init_wnd; 102 nr->bytes_in_flight = 0; 103 nr->bytes_acked = 0; 104 nr->slow_start_thresh = UINT64_MAX; 105 nr->cong_recovery_start_time = ossl_time_zero(); 106 107 nr->processing_loss = 0; 108 nr->tx_time_of_last_loss = ossl_time_zero(); 109 nr->in_congestion_recovery = 0; 110 } 111 112 static int newreno_set_input_params(OSSL_CC_DATA *cc, const OSSL_PARAM *params) 113 { 114 OSSL_CC_NEWRENO *nr = (OSSL_CC_NEWRENO *)cc; 115 const OSSL_PARAM *p; 116 size_t value; 117 118 p = OSSL_PARAM_locate_const(params, OSSL_CC_OPTION_MAX_DGRAM_PAYLOAD_LEN); 119 if (p != NULL) { 120 if (!OSSL_PARAM_get_size_t(p, &value)) 121 return 0; 122 if (value < QUIC_MIN_INITIAL_DGRAM_LEN) 123 return 0; 124 125 newreno_set_max_dgram_size(nr, value); 126 } 127 128 return 1; 129 } 130 131 static int bind_diag(OSSL_PARAM *params, const char *param_name, size_t len, 132 void **pp) 133 { 134 const OSSL_PARAM *p = OSSL_PARAM_locate_const(params, param_name); 135 136 *pp = NULL; 137 138 if (p == NULL) 139 return 1; 140 141 if (p->data_type != OSSL_PARAM_UNSIGNED_INTEGER 142 || p->data_size != len) 143 return 0; 144 145 *pp = p->data; 146 return 1; 147 } 148 149 static int newreno_bind_diagnostic(OSSL_CC_DATA *cc, OSSL_PARAM *params) 150 { 151 OSSL_CC_NEWRENO *nr = (OSSL_CC_NEWRENO *)cc; 152 size_t *new_p_max_dgram_payload_len; 153 uint64_t *new_p_cur_cwnd_size; 154 uint64_t *new_p_min_cwnd_size; 155 uint64_t *new_p_cur_bytes_in_flight; 156 uint32_t *new_p_cur_state; 157 158 if (!bind_diag(params, OSSL_CC_OPTION_MAX_DGRAM_PAYLOAD_LEN, 159 sizeof(size_t), (void **)&new_p_max_dgram_payload_len) 160 || !bind_diag(params, OSSL_CC_OPTION_CUR_CWND_SIZE, 161 sizeof(uint64_t), (void **)&new_p_cur_cwnd_size) 162 || !bind_diag(params, OSSL_CC_OPTION_MIN_CWND_SIZE, 163 sizeof(uint64_t), (void **)&new_p_min_cwnd_size) 164 || !bind_diag(params, OSSL_CC_OPTION_CUR_BYTES_IN_FLIGHT, 165 sizeof(uint64_t), (void **)&new_p_cur_bytes_in_flight) 166 || !bind_diag(params, OSSL_CC_OPTION_CUR_STATE, 167 sizeof(uint32_t), (void **)&new_p_cur_state)) 168 return 0; 169 170 if (new_p_max_dgram_payload_len != NULL) 171 nr->p_diag_max_dgram_payload_len = new_p_max_dgram_payload_len; 172 173 if (new_p_cur_cwnd_size != NULL) 174 nr->p_diag_cur_cwnd_size = new_p_cur_cwnd_size; 175 176 if (new_p_min_cwnd_size != NULL) 177 nr->p_diag_min_cwnd_size = new_p_min_cwnd_size; 178 179 if (new_p_cur_bytes_in_flight != NULL) 180 nr->p_diag_cur_bytes_in_flight = new_p_cur_bytes_in_flight; 181 182 if (new_p_cur_state != NULL) 183 nr->p_diag_cur_state = new_p_cur_state; 184 185 newreno_update_diag(nr); 186 return 1; 187 } 188 189 static void unbind_diag(OSSL_PARAM *params, const char *param_name, 190 void **pp) 191 { 192 const OSSL_PARAM *p = OSSL_PARAM_locate_const(params, param_name); 193 194 if (p != NULL) 195 *pp = NULL; 196 } 197 198 static int newreno_unbind_diagnostic(OSSL_CC_DATA *cc, OSSL_PARAM *params) 199 { 200 OSSL_CC_NEWRENO *nr = (OSSL_CC_NEWRENO *)cc; 201 202 unbind_diag(params, OSSL_CC_OPTION_MAX_DGRAM_PAYLOAD_LEN, 203 (void **)&nr->p_diag_max_dgram_payload_len); 204 unbind_diag(params, OSSL_CC_OPTION_CUR_CWND_SIZE, 205 (void **)&nr->p_diag_cur_cwnd_size); 206 unbind_diag(params, OSSL_CC_OPTION_MIN_CWND_SIZE, 207 (void **)&nr->p_diag_min_cwnd_size); 208 unbind_diag(params, OSSL_CC_OPTION_CUR_BYTES_IN_FLIGHT, 209 (void **)&nr->p_diag_cur_bytes_in_flight); 210 unbind_diag(params, OSSL_CC_OPTION_CUR_STATE, 211 (void **)&nr->p_diag_cur_state); 212 return 1; 213 } 214 215 static void newreno_update_diag(OSSL_CC_NEWRENO *nr) 216 { 217 if (nr->p_diag_max_dgram_payload_len != NULL) 218 *nr->p_diag_max_dgram_payload_len = nr->max_dgram_size; 219 220 if (nr->p_diag_cur_cwnd_size != NULL) 221 *nr->p_diag_cur_cwnd_size = nr->cong_wnd; 222 223 if (nr->p_diag_min_cwnd_size != NULL) 224 *nr->p_diag_min_cwnd_size = nr->k_min_wnd; 225 226 if (nr->p_diag_cur_bytes_in_flight != NULL) 227 *nr->p_diag_cur_bytes_in_flight = nr->bytes_in_flight; 228 229 if (nr->p_diag_cur_state != NULL) { 230 if (nr->in_congestion_recovery) 231 *nr->p_diag_cur_state = 'R'; 232 else if (nr->cong_wnd < nr->slow_start_thresh) 233 *nr->p_diag_cur_state = 'S'; 234 else 235 *nr->p_diag_cur_state = 'A'; 236 } 237 } 238 239 static int newreno_in_cong_recovery(OSSL_CC_NEWRENO *nr, OSSL_TIME tx_time) 240 { 241 return ossl_time_compare(tx_time, nr->cong_recovery_start_time) <= 0; 242 } 243 244 static void newreno_cong(OSSL_CC_NEWRENO *nr, OSSL_TIME tx_time) 245 { 246 int err = 0; 247 248 /* No reaction if already in a recovery period. */ 249 if (newreno_in_cong_recovery(nr, tx_time)) 250 return; 251 252 /* Start a new recovery period. */ 253 nr->in_congestion_recovery = 1; 254 nr->cong_recovery_start_time = nr->now_cb(nr->now_cb_arg); 255 256 /* slow_start_thresh = cong_wnd * loss_reduction_factor */ 257 nr->slow_start_thresh 258 = safe_muldiv_u64(nr->cong_wnd, 259 nr->k_loss_reduction_factor_num, 260 nr->k_loss_reduction_factor_den, 261 &err); 262 263 if (err) 264 nr->slow_start_thresh = UINT64_MAX; 265 266 nr->cong_wnd = nr->slow_start_thresh; 267 if (nr->cong_wnd < nr->k_min_wnd) 268 nr->cong_wnd = nr->k_min_wnd; 269 } 270 271 static void newreno_flush(OSSL_CC_NEWRENO *nr, uint32_t flags) 272 { 273 if (!nr->processing_loss) 274 return; 275 276 newreno_cong(nr, nr->tx_time_of_last_loss); 277 278 if ((flags & OSSL_CC_LOST_FLAG_PERSISTENT_CONGESTION) != 0) { 279 nr->cong_wnd = nr->k_min_wnd; 280 nr->cong_recovery_start_time = ossl_time_zero(); 281 } 282 283 nr->processing_loss = 0; 284 newreno_update_diag(nr); 285 } 286 287 static uint64_t newreno_get_tx_allowance(OSSL_CC_DATA *cc) 288 { 289 OSSL_CC_NEWRENO *nr = (OSSL_CC_NEWRENO *)cc; 290 291 if (nr->bytes_in_flight >= nr->cong_wnd) 292 return 0; 293 294 return nr->cong_wnd - nr->bytes_in_flight; 295 } 296 297 static OSSL_TIME newreno_get_wakeup_deadline(OSSL_CC_DATA *cc) 298 { 299 if (newreno_get_tx_allowance(cc) > 0) { 300 /* We have TX allowance now so wakeup immediately */ 301 return ossl_time_zero(); 302 } else { 303 /* 304 * The NewReno congestion controller does not vary its state in time, 305 * only in response to stimulus. 306 */ 307 return ossl_time_infinite(); 308 } 309 } 310 311 static int newreno_on_data_sent(OSSL_CC_DATA *cc, uint64_t num_bytes) 312 { 313 OSSL_CC_NEWRENO *nr = (OSSL_CC_NEWRENO *)cc; 314 315 nr->bytes_in_flight += num_bytes; 316 newreno_update_diag(nr); 317 return 1; 318 } 319 320 static int newreno_is_cong_limited(OSSL_CC_NEWRENO *nr) 321 { 322 uint64_t wnd_rem; 323 324 /* We are congestion-limited if we are already at the congestion window. */ 325 if (nr->bytes_in_flight >= nr->cong_wnd) 326 return 1; 327 328 wnd_rem = nr->cong_wnd - nr->bytes_in_flight; 329 330 /* 331 * Consider ourselves congestion-limited if less than three datagrams' worth 332 * of congestion window remains to be spent, or if we are in slow start and 333 * have consumed half of our window. 334 */ 335 return (nr->cong_wnd < nr->slow_start_thresh && wnd_rem <= nr->cong_wnd / 2) 336 || wnd_rem <= 3 * nr->max_dgram_size; 337 } 338 339 static int newreno_on_data_acked(OSSL_CC_DATA *cc, 340 const OSSL_CC_ACK_INFO *info) 341 { 342 OSSL_CC_NEWRENO *nr = (OSSL_CC_NEWRENO *)cc; 343 344 /* 345 * Packet has been acked. Firstly, remove it from the aggregate count of 346 * bytes in flight. 347 */ 348 nr->bytes_in_flight -= info->tx_size; 349 350 /* 351 * We use acknowledgement of data as a signal that we are not at channel 352 * capacity and that it may be reasonable to increase the congestion window. 353 * However, acknowledgement is not a useful signal that there is further 354 * capacity if we are not actually saturating the congestion window that we 355 * already have (for example, if the application is not generating much data 356 * or we are limited by flow control). Therefore, we only expand the 357 * congestion window if we are consuming a significant fraction of the 358 * congestion window. 359 */ 360 if (!newreno_is_cong_limited(nr)) 361 goto out; 362 363 /* 364 * We can handle acknowledgement of a packet in one of three ways 365 * depending on our current state: 366 * 367 * - Congestion Recovery: Do nothing. We don't start increasing 368 * the congestion window in response to acknowledgements until 369 * we are no longer in the Congestion Recovery state. 370 * 371 * - Slow Start: Increase the congestion window using the slow 372 * start scale. 373 * 374 * - Congestion Avoidance: Increase the congestion window using 375 * the congestion avoidance scale. 376 */ 377 if (newreno_in_cong_recovery(nr, info->tx_time)) { 378 /* Congestion recovery, do nothing. */ 379 } else if (nr->cong_wnd < nr->slow_start_thresh) { 380 /* When this condition is true we are in the Slow Start state. */ 381 nr->cong_wnd += info->tx_size; 382 nr->in_congestion_recovery = 0; 383 } else { 384 /* Otherwise, we are in the Congestion Avoidance state. */ 385 nr->bytes_acked += info->tx_size; 386 387 /* 388 * Avoid integer division as per RFC 9002 s. B.5. / RFC3465 s. 2.1. 389 */ 390 if (nr->bytes_acked >= nr->cong_wnd) { 391 nr->bytes_acked -= nr->cong_wnd; 392 nr->cong_wnd += nr->max_dgram_size; 393 } 394 395 nr->in_congestion_recovery = 0; 396 } 397 398 out: 399 newreno_update_diag(nr); 400 return 1; 401 } 402 403 static int newreno_on_data_lost(OSSL_CC_DATA *cc, 404 const OSSL_CC_LOSS_INFO *info) 405 { 406 OSSL_CC_NEWRENO *nr = (OSSL_CC_NEWRENO *)cc; 407 408 if (info->tx_size > nr->bytes_in_flight) 409 return 0; 410 411 nr->bytes_in_flight -= info->tx_size; 412 413 if (!nr->processing_loss) { 414 415 if (ossl_time_compare(info->tx_time, nr->tx_time_of_last_loss) <= 0) 416 /* 417 * After triggering congestion due to a lost packet at time t, don't 418 * trigger congestion again due to any subsequently detected lost 419 * packet at a time s < t, as we've effectively already signalled 420 * congestion on loss of that and subsequent packets. 421 */ 422 goto out; 423 424 nr->processing_loss = 1; 425 426 /* 427 * Cancel any pending window increase in the Congestion Avoidance state. 428 */ 429 nr->bytes_acked = 0; 430 } 431 432 nr->tx_time_of_last_loss 433 = ossl_time_max(nr->tx_time_of_last_loss, info->tx_time); 434 435 out: 436 newreno_update_diag(nr); 437 return 1; 438 } 439 440 static int newreno_on_data_lost_finished(OSSL_CC_DATA *cc, uint32_t flags) 441 { 442 OSSL_CC_NEWRENO *nr = (OSSL_CC_NEWRENO *)cc; 443 444 newreno_flush(nr, flags); 445 return 1; 446 } 447 448 static int newreno_on_data_invalidated(OSSL_CC_DATA *cc, 449 uint64_t num_bytes) 450 { 451 OSSL_CC_NEWRENO *nr = (OSSL_CC_NEWRENO *)cc; 452 453 nr->bytes_in_flight -= num_bytes; 454 newreno_update_diag(nr); 455 return 1; 456 } 457 458 static int newreno_on_ecn(OSSL_CC_DATA *cc, 459 const OSSL_CC_ECN_INFO *info) 460 { 461 OSSL_CC_NEWRENO *nr = (OSSL_CC_NEWRENO *)cc; 462 463 nr->processing_loss = 1; 464 nr->bytes_acked = 0; 465 nr->tx_time_of_last_loss = info->largest_acked_time; 466 newreno_flush(nr, 0); 467 return 1; 468 } 469 470 const OSSL_CC_METHOD ossl_cc_newreno_method = { 471 newreno_new, 472 newreno_free, 473 newreno_reset, 474 newreno_set_input_params, 475 newreno_bind_diagnostic, 476 newreno_unbind_diagnostic, 477 newreno_get_tx_allowance, 478 newreno_get_wakeup_deadline, 479 newreno_on_data_sent, 480 newreno_on_data_acked, 481 newreno_on_data_lost, 482 newreno_on_data_lost_finished, 483 newreno_on_data_invalidated, 484 newreno_on_ecn, 485 }; 486