1 /* 2 * Copyright 2022-2025 The OpenSSL Project Authors. All Rights Reserved. 3 * 4 * Licensed under the Apache License 2.0 (the "License"). You may not use 5 * this file except in compliance with the License. You can obtain a copy 6 * in the file LICENSE in the source distribution or at 7 * https://www.openssl.org/source/license.html 8 */ 9 10 #include "internal/quic_txp.h" 11 #include "internal/quic_fifd.h" 12 #include "internal/quic_stream_map.h" 13 #include "internal/quic_error.h" 14 #include "internal/common.h" 15 #include <openssl/err.h> 16 17 #define MIN_CRYPTO_HDR_SIZE 3 18 19 #define MIN_FRAME_SIZE_HANDSHAKE_DONE 1 20 #define MIN_FRAME_SIZE_MAX_DATA 2 21 #define MIN_FRAME_SIZE_ACK 5 22 #define MIN_FRAME_SIZE_CRYPTO (MIN_CRYPTO_HDR_SIZE + 1) 23 #define MIN_FRAME_SIZE_STREAM 3 /* minimum useful size (for non-FIN) */ 24 #define MIN_FRAME_SIZE_MAX_STREAMS_BIDI 2 25 #define MIN_FRAME_SIZE_MAX_STREAMS_UNI 2 26 27 /* 28 * Packet Archetypes 29 * ================= 30 */ 31 32 /* Generate normal packets containing most frame types, subject to EL. */ 33 #define TX_PACKETISER_ARCHETYPE_NORMAL 0 34 35 /* 36 * A probe packet is different in that: 37 * - It bypasses CC, but *is* counted as in flight for purposes of CC; 38 * - It must be ACK-eliciting. 39 */ 40 #define TX_PACKETISER_ARCHETYPE_PROBE 1 41 42 /* 43 * An ACK-only packet is different in that: 44 * - It bypasses CC, and is considered a 'non-inflight' packet; 45 * - It may not contain anything other than an ACK frame, not even padding. 46 */ 47 #define TX_PACKETISER_ARCHETYPE_ACK_ONLY 2 48 49 #define TX_PACKETISER_ARCHETYPE_NUM 3 50 51 struct ossl_quic_tx_packetiser_st { 52 OSSL_QUIC_TX_PACKETISER_ARGS args; 53 54 /* 55 * Opaque initial token blob provided by caller. TXP frees using the 56 * callback when it is no longer needed. 57 */ 58 const unsigned char *initial_token; 59 size_t initial_token_len; 60 ossl_quic_initial_token_free_fn *initial_token_free_cb; 61 void *initial_token_free_cb_arg; 62 63 /* Subcomponents of the TXP that we own. */ 64 QUIC_FIFD fifd; /* QUIC Frame-in-Flight Dispatcher */ 65 66 /* Internal state. */ 67 uint64_t next_pn[QUIC_PN_SPACE_NUM]; /* Next PN to use in given PN space. */ 68 OSSL_TIME last_tx_time; /* Last time a packet was generated, or 0. */ 69 70 size_t unvalidated_credit; /* Limit of data we can send until validated */ 71 72 /* Internal state - frame (re)generation flags. */ 73 unsigned int want_handshake_done : 1; 74 unsigned int want_max_data : 1; 75 unsigned int want_max_streams_bidi : 1; 76 unsigned int want_max_streams_uni : 1; 77 78 /* Internal state - frame (re)generation flags - per PN space. */ 79 unsigned int want_ack : QUIC_PN_SPACE_NUM; 80 unsigned int force_ack_eliciting : QUIC_PN_SPACE_NUM; 81 82 /* 83 * Internal state - connection close terminal state. 84 * Once this is set, it is not unset unlike other want_ flags - we keep 85 * sending it in every packet. 86 */ 87 unsigned int want_conn_close : 1; 88 89 /* Has the handshake been completed? */ 90 unsigned int handshake_complete : 1; 91 92 OSSL_QUIC_FRAME_CONN_CLOSE conn_close_frame; 93 94 /* 95 * Counts of the number of bytes received and sent while in the closing 96 * state. 97 */ 98 uint64_t closing_bytes_recv; 99 uint64_t closing_bytes_xmit; 100 101 /* Internal state - packet assembly. */ 102 struct txp_el { 103 unsigned char *scratch; /* scratch buffer for packet assembly */ 104 size_t scratch_len; /* number of bytes allocated for scratch */ 105 OSSL_QTX_IOVEC *iovec; /* scratch iovec array for use with QTX */ 106 size_t alloc_iovec; /* size of iovec array */ 107 } el[QUIC_ENC_LEVEL_NUM]; 108 109 /* Message callback related arguments */ 110 ossl_msg_cb msg_callback; 111 void *msg_callback_arg; 112 SSL *msg_callback_ssl; 113 114 /* Callbacks. */ 115 void (*ack_tx_cb)(const OSSL_QUIC_FRAME_ACK *ack, 116 uint32_t pn_space, 117 void *arg); 118 void *ack_tx_cb_arg; 119 }; 120 121 /* 122 * The TX helper records state used while generating frames into packets. It 123 * enables serialization into the packet to be done "transactionally" where 124 * serialization of a frame can be rolled back if it fails midway (e.g. if it 125 * does not fit). 126 */ 127 struct tx_helper { 128 OSSL_QUIC_TX_PACKETISER *txp; 129 /* 130 * The Maximum Packet Payload Length in bytes. This is the amount of 131 * space we have to generate frames into. 132 */ 133 size_t max_ppl; 134 /* 135 * Number of bytes we have generated so far. 136 */ 137 size_t bytes_appended; 138 /* 139 * Number of scratch bytes in txp->scratch we have used so far. Some iovecs 140 * will reference this scratch buffer. When we need to use more of it (e.g. 141 * when we need to put frame headers somewhere), we append to the scratch 142 * buffer, resizing if necessary, and increase this accordingly. 143 */ 144 size_t scratch_bytes; 145 /* 146 * Bytes reserved in the MaxPPL budget. We keep this number of bytes spare 147 * until reserve_allowed is set to 1. Currently this is always at most 1, as 148 * a PING frame takes up one byte and this mechanism is only used to ensure 149 * we can encode a PING frame if we have been asked to ensure a packet is 150 * ACK-eliciting and we are unusure if we are going to add any other 151 * ACK-eliciting frames before we reach our MaxPPL budget. 152 */ 153 size_t reserve; 154 /* 155 * Number of iovecs we have currently appended. This is the number of 156 * entries valid in txp->iovec. 157 */ 158 size_t num_iovec; 159 /* The EL this TX helper is being used for. */ 160 uint32_t enc_level; 161 /* 162 * Whether we are allowed to make use of the reserve bytes in our MaxPPL 163 * budget. This is used to ensure we have room to append a PING frame later 164 * if we need to. Once we know we will not need to append a PING frame, this 165 * is set to 1. 166 */ 167 unsigned int reserve_allowed : 1; 168 /* 169 * Set to 1 if we have appended a STREAM frame with an implicit length. If 170 * this happens we should never append another frame after that frame as it 171 * cannot be validly encoded. This is just a safety check. 172 */ 173 unsigned int done_implicit : 1; 174 struct { 175 /* 176 * The fields in this structure are valid if active is set, which means 177 * that a serialization transaction is currently in progress. 178 */ 179 unsigned char *data; 180 WPACKET wpkt; 181 unsigned int active : 1; 182 } txn; 183 }; 184 185 static void tx_helper_rollback(struct tx_helper *h); 186 static int txp_el_ensure_iovec(struct txp_el *el, size_t num); 187 188 /* Initialises the TX helper. */ 189 static int tx_helper_init(struct tx_helper *h, OSSL_QUIC_TX_PACKETISER *txp, 190 uint32_t enc_level, size_t max_ppl, size_t reserve) 191 { 192 if (reserve > max_ppl) 193 return 0; 194 195 h->txp = txp; 196 h->enc_level = enc_level; 197 h->max_ppl = max_ppl; 198 h->reserve = reserve; 199 h->num_iovec = 0; 200 h->bytes_appended = 0; 201 h->scratch_bytes = 0; 202 h->reserve_allowed = 0; 203 h->done_implicit = 0; 204 h->txn.data = NULL; 205 h->txn.active = 0; 206 207 if (max_ppl > h->txp->el[enc_level].scratch_len) { 208 unsigned char *scratch; 209 210 scratch = OPENSSL_realloc(h->txp->el[enc_level].scratch, max_ppl); 211 if (scratch == NULL) 212 return 0; 213 214 h->txp->el[enc_level].scratch = scratch; 215 h->txp->el[enc_level].scratch_len = max_ppl; 216 } 217 218 return 1; 219 } 220 221 static void tx_helper_cleanup(struct tx_helper *h) 222 { 223 if (h->txn.active) 224 tx_helper_rollback(h); 225 226 h->txp = NULL; 227 } 228 229 static void tx_helper_unrestrict(struct tx_helper *h) 230 { 231 h->reserve_allowed = 1; 232 } 233 234 /* 235 * Append an extent of memory to the iovec list. The memory must remain 236 * allocated until we finish generating the packet and call the QTX. 237 * 238 * In general, the buffers passed to this function will be from one of two 239 * ranges: 240 * 241 * - Application data contained in stream buffers managed elsewhere 242 * in the QUIC stack; or 243 * 244 * - Control frame data appended into txp->scratch using tx_helper_begin and 245 * tx_helper_commit. 246 * 247 */ 248 static int tx_helper_append_iovec(struct tx_helper *h, 249 const unsigned char *buf, 250 size_t buf_len) 251 { 252 struct txp_el *el = &h->txp->el[h->enc_level]; 253 254 if (buf_len == 0) 255 return 1; 256 257 if (!ossl_assert(!h->done_implicit)) 258 return 0; 259 260 if (!txp_el_ensure_iovec(el, h->num_iovec + 1)) 261 return 0; 262 263 el->iovec[h->num_iovec].buf = buf; 264 el->iovec[h->num_iovec].buf_len = buf_len; 265 266 ++h->num_iovec; 267 h->bytes_appended += buf_len; 268 return 1; 269 } 270 271 /* 272 * How many more bytes of space do we have left in our plaintext packet payload? 273 */ 274 static size_t tx_helper_get_space_left(struct tx_helper *h) 275 { 276 return h->max_ppl 277 - (h->reserve_allowed ? 0 : h->reserve) - h->bytes_appended; 278 } 279 280 /* 281 * Begin a control frame serialization transaction. This allows the 282 * serialization of the control frame to be backed out if it turns out it won't 283 * fit. Write the control frame to the returned WPACKET. Ensure you always 284 * call tx_helper_rollback or tx_helper_commit (or tx_helper_cleanup). Returns 285 * NULL on failure. 286 */ 287 static WPACKET *tx_helper_begin(struct tx_helper *h) 288 { 289 size_t space_left, len; 290 unsigned char *data; 291 struct txp_el *el = &h->txp->el[h->enc_level]; 292 293 if (!ossl_assert(!h->txn.active)) 294 return NULL; 295 296 if (!ossl_assert(!h->done_implicit)) 297 return NULL; 298 299 data = (unsigned char *)el->scratch + h->scratch_bytes; 300 len = el->scratch_len - h->scratch_bytes; 301 302 space_left = tx_helper_get_space_left(h); 303 if (!ossl_assert(space_left <= len)) 304 return NULL; 305 306 if (!WPACKET_init_static_len(&h->txn.wpkt, data, len, 0)) 307 return NULL; 308 309 if (!WPACKET_set_max_size(&h->txn.wpkt, space_left)) { 310 WPACKET_cleanup(&h->txn.wpkt); 311 return NULL; 312 } 313 314 h->txn.data = data; 315 h->txn.active = 1; 316 return &h->txn.wpkt; 317 } 318 319 static void tx_helper_end(struct tx_helper *h, int success) 320 { 321 if (success) 322 WPACKET_finish(&h->txn.wpkt); 323 else 324 WPACKET_cleanup(&h->txn.wpkt); 325 326 h->txn.active = 0; 327 h->txn.data = NULL; 328 } 329 330 /* Abort a control frame serialization transaction. */ 331 static void tx_helper_rollback(struct tx_helper *h) 332 { 333 if (!h->txn.active) 334 return; 335 336 tx_helper_end(h, 0); 337 } 338 339 /* Commit a control frame. */ 340 static int tx_helper_commit(struct tx_helper *h) 341 { 342 size_t l = 0; 343 344 if (!h->txn.active) 345 return 0; 346 347 if (!WPACKET_get_total_written(&h->txn.wpkt, &l)) { 348 tx_helper_end(h, 0); 349 return 0; 350 } 351 352 if (!tx_helper_append_iovec(h, h->txn.data, l)) { 353 tx_helper_end(h, 0); 354 return 0; 355 } 356 357 if (h->txp->msg_callback != NULL && l > 0) { 358 uint64_t ftype; 359 int ctype = SSL3_RT_QUIC_FRAME_FULL; 360 PACKET pkt; 361 362 if (!PACKET_buf_init(&pkt, h->txn.data, l) 363 || !ossl_quic_wire_peek_frame_header(&pkt, &ftype, NULL)) { 364 tx_helper_end(h, 0); 365 return 0; 366 } 367 368 if (ftype == OSSL_QUIC_FRAME_TYPE_PADDING) 369 ctype = SSL3_RT_QUIC_FRAME_PADDING; 370 else if (OSSL_QUIC_FRAME_TYPE_IS_STREAM(ftype) 371 || ftype == OSSL_QUIC_FRAME_TYPE_CRYPTO) 372 ctype = SSL3_RT_QUIC_FRAME_HEADER; 373 374 h->txp->msg_callback(1, OSSL_QUIC1_VERSION, ctype, h->txn.data, l, 375 h->txp->msg_callback_ssl, 376 h->txp->msg_callback_arg); 377 } 378 379 h->scratch_bytes += l; 380 tx_helper_end(h, 1); 381 return 1; 382 } 383 384 struct archetype_data { 385 unsigned int allow_ack : 1; 386 unsigned int allow_ping : 1; 387 unsigned int allow_crypto : 1; 388 unsigned int allow_handshake_done : 1; 389 unsigned int allow_path_challenge : 1; 390 unsigned int allow_path_response : 1; 391 unsigned int allow_new_conn_id : 1; 392 unsigned int allow_retire_conn_id : 1; 393 unsigned int allow_stream_rel : 1; 394 unsigned int allow_conn_fc : 1; 395 unsigned int allow_conn_close : 1; 396 unsigned int allow_cfq_other : 1; 397 unsigned int allow_new_token : 1; 398 unsigned int allow_force_ack_eliciting : 1; 399 unsigned int allow_padding : 1; 400 unsigned int require_ack_eliciting : 1; 401 unsigned int bypass_cc : 1; 402 }; 403 404 struct txp_pkt_geom { 405 size_t cmpl, cmppl, hwm, pkt_overhead; 406 uint32_t archetype; 407 struct archetype_data adata; 408 }; 409 410 struct txp_pkt { 411 struct tx_helper h; 412 int h_valid; 413 QUIC_TXPIM_PKT *tpkt; 414 QUIC_STREAM *stream_head; 415 QUIC_PKT_HDR phdr; 416 struct txp_pkt_geom geom; 417 int force_pad; 418 }; 419 420 static QUIC_SSTREAM *get_sstream_by_id(uint64_t stream_id, uint32_t pn_space, 421 void *arg); 422 static void on_regen_notify(uint64_t frame_type, uint64_t stream_id, 423 QUIC_TXPIM_PKT *pkt, void *arg); 424 static void on_confirm_notify(uint64_t frame_type, uint64_t stream_id, 425 QUIC_TXPIM_PKT *pkt, void *arg); 426 static void on_sstream_updated(uint64_t stream_id, void *arg); 427 static int sstream_is_pending(QUIC_SSTREAM *sstream); 428 static int txp_should_try_staging(OSSL_QUIC_TX_PACKETISER *txp, 429 uint32_t enc_level, 430 uint32_t archetype, 431 uint64_t cc_limit, 432 uint32_t *conn_close_enc_level); 433 static size_t txp_determine_pn_len(OSSL_QUIC_TX_PACKETISER *txp); 434 static int txp_determine_ppl_from_pl(OSSL_QUIC_TX_PACKETISER *txp, 435 size_t pl, 436 uint32_t enc_level, 437 size_t hdr_len, 438 size_t *r); 439 static size_t txp_get_mdpl(OSSL_QUIC_TX_PACKETISER *txp); 440 static int txp_generate_for_el(OSSL_QUIC_TX_PACKETISER *txp, 441 struct txp_pkt *pkt, 442 int chosen_for_conn_close); 443 static int txp_pkt_init(struct txp_pkt *pkt, OSSL_QUIC_TX_PACKETISER *txp, 444 uint32_t enc_level, uint32_t archetype, 445 size_t running_total); 446 static void txp_pkt_cleanup(struct txp_pkt *pkt, OSSL_QUIC_TX_PACKETISER *txp); 447 static int txp_pkt_postgen_update_pkt_overhead(struct txp_pkt *pkt, 448 OSSL_QUIC_TX_PACKETISER *txp); 449 static int txp_pkt_append_padding(struct txp_pkt *pkt, 450 OSSL_QUIC_TX_PACKETISER *txp, size_t num_bytes); 451 static int txp_pkt_commit(OSSL_QUIC_TX_PACKETISER *txp, struct txp_pkt *pkt, 452 uint32_t archetype, int *txpim_pkt_reffed); 453 static uint32_t txp_determine_archetype(OSSL_QUIC_TX_PACKETISER *txp, 454 uint64_t cc_limit); 455 456 /** 457 * Sets the validated state of a QUIC TX packetiser. 458 * 459 * This function marks the provided QUIC TX packetiser as having its credit 460 * fully validated by setting its `unvalidated_credit` field to `SIZE_MAX`. 461 * 462 * @param txp A pointer to the OSSL_QUIC_TX_PACKETISER structure to update. 463 */ 464 void ossl_quic_tx_packetiser_set_validated(OSSL_QUIC_TX_PACKETISER *txp) 465 { 466 txp->unvalidated_credit = SIZE_MAX; 467 return; 468 } 469 470 /** 471 * Adds unvalidated credit to a QUIC TX packetiser. 472 * 473 * This function increases the unvalidated credit of the provided QUIC TX 474 * packetiser. If the current unvalidated credit is not `SIZE_MAX`, the 475 * function adds three times the specified `credit` value, ensuring it does 476 * not exceed the maximum allowable value (`SIZE_MAX - 1`). If the addition 477 * would cause an overflow, the unvalidated credit is capped at 478 * `SIZE_MAX - 1`. If the current unvalidated credit is already `SIZE_MAX`, 479 * the function does nothing. 480 * 481 * @param txp A pointer to the OSSL_QUIC_TX_PACKETISER structure to update. 482 * @param credit The amount of credit to add, multiplied by 3. 483 */ 484 void ossl_quic_tx_packetiser_add_unvalidated_credit(OSSL_QUIC_TX_PACKETISER *txp, 485 size_t credit) 486 { 487 if (txp->unvalidated_credit != SIZE_MAX) { 488 if ((SIZE_MAX - txp->unvalidated_credit) > (credit * 3)) 489 txp->unvalidated_credit += credit * 3; 490 else 491 txp->unvalidated_credit = SIZE_MAX - 1; 492 } 493 494 return; 495 } 496 497 /** 498 * Consumes unvalidated credit from a QUIC TX packetiser. 499 * 500 * This function decreases the unvalidated credit of the specified 501 * QUIC TX packetiser by the given `credit` value. If the unvalidated credit 502 * is set to `SIZE_MAX`, the function does nothing, as `SIZE_MAX` represents 503 * an unlimited credit state. 504 * 505 * @param txp A pointer to the OSSL_QUIC_TX_PACKETISER structure to update. 506 * @param credit The amount of credit to consume. 507 */ 508 void ossl_quic_tx_packetiser_consume_unvalidated_credit(OSSL_QUIC_TX_PACKETISER *txp, 509 size_t credit) 510 { 511 if (txp->unvalidated_credit != SIZE_MAX) { 512 if (txp->unvalidated_credit < credit) 513 txp->unvalidated_credit = 0; 514 else 515 txp->unvalidated_credit -= credit; 516 } 517 } 518 519 /** 520 * Checks if the QUIC TX packetiser has sufficient unvalidated credit. 521 * 522 * This function determines whether the unvalidated credit of the specified 523 * QUIC TX packetiser exceeds the required credit value (`req_credit`). 524 * If the unvalidated credit is greater than `req_credit`, the function 525 * returns 1 (true); otherwise, it returns 0 (false). 526 * 527 * @param txp A pointer to the OSSL_QUIC_TX_PACKETISER structure to check. 528 * @param req_credit The required credit value to compare against. 529 * 530 * @return 1 if the unvalidated credit exceeds `req_credit`, 0 otherwise. 531 */ 532 int ossl_quic_tx_packetiser_check_unvalidated_credit(OSSL_QUIC_TX_PACKETISER *txp, 533 size_t req_credit) 534 { 535 return (txp->unvalidated_credit > req_credit); 536 } 537 538 OSSL_QUIC_TX_PACKETISER *ossl_quic_tx_packetiser_new(const OSSL_QUIC_TX_PACKETISER_ARGS *args) 539 { 540 OSSL_QUIC_TX_PACKETISER *txp; 541 542 if (args == NULL 543 || args->qtx == NULL 544 || args->txpim == NULL 545 || args->cfq == NULL 546 || args->ackm == NULL 547 || args->qsm == NULL 548 || args->conn_txfc == NULL 549 || args->conn_rxfc == NULL 550 || args->max_streams_bidi_rxfc == NULL 551 || args->max_streams_uni_rxfc == NULL 552 || args->protocol_version == 0) { 553 ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_NULL_PARAMETER); 554 return NULL; 555 } 556 557 txp = OPENSSL_zalloc(sizeof(*txp)); 558 if (txp == NULL) 559 return NULL; 560 561 txp->args = *args; 562 txp->last_tx_time = ossl_time_zero(); 563 564 if (!ossl_quic_fifd_init(&txp->fifd, 565 txp->args.cfq, txp->args.ackm, txp->args.txpim, 566 get_sstream_by_id, txp, 567 on_regen_notify, txp, 568 on_confirm_notify, txp, 569 on_sstream_updated, txp, 570 args->get_qlog_cb, 571 args->get_qlog_cb_arg)) { 572 OPENSSL_free(txp); 573 return NULL; 574 } 575 576 return txp; 577 } 578 579 void ossl_quic_tx_packetiser_free(OSSL_QUIC_TX_PACKETISER *txp) 580 { 581 uint32_t enc_level; 582 583 if (txp == NULL) 584 return; 585 586 ossl_quic_tx_packetiser_set_initial_token(txp, NULL, 0, NULL, NULL); 587 ossl_quic_fifd_cleanup(&txp->fifd); 588 OPENSSL_free(txp->conn_close_frame.reason); 589 590 for (enc_level = QUIC_ENC_LEVEL_INITIAL; 591 enc_level < QUIC_ENC_LEVEL_NUM; 592 ++enc_level) { 593 OPENSSL_free(txp->el[enc_level].iovec); 594 OPENSSL_free(txp->el[enc_level].scratch); 595 } 596 597 OPENSSL_free(txp); 598 } 599 600 /* 601 * Determine if an Initial packet token length is reasonable based on the 602 * current MDPL, returning 1 if it is OK. 603 * 604 * The real PMTU to the peer could differ from our (pessimistic) understanding 605 * of the PMTU, therefore it is possible we could receive an Initial token from 606 * a server in a Retry packet which is bigger than the MDPL. In this case it is 607 * impossible for us ever to make forward progress and we need to error out 608 * and fail the connection attempt. 609 * 610 * The specific boundary condition is complex: for example, after the size of 611 * the Initial token, there are the Initial packet header overheads and then 612 * encryption/AEAD tag overheads. After that, the minimum room for frame data in 613 * order to guarantee forward progress must be guaranteed. For example, a crypto 614 * stream needs to always be able to serialize at least one byte in a CRYPTO 615 * frame in order to make forward progress. Because the offset field of a CRYPTO 616 * frame uses a variable-length integer, the number of bytes needed to ensure 617 * this also varies. 618 * 619 * Rather than trying to get this boundary condition check actually right, 620 * require a reasonable amount of slack to avoid pathological behaviours. (After 621 * all, transmitting a CRYPTO stream one byte at a time is probably not 622 * desirable anyway.) 623 * 624 * We choose 160 bytes as the required margin, which is double the rough 625 * estimation of the minimum we would require to guarantee forward progress 626 * under worst case packet overheads. 627 */ 628 #define TXP_REQUIRED_TOKEN_MARGIN 160 629 630 static int txp_check_token_len(size_t token_len, size_t mdpl) 631 { 632 if (token_len == 0) 633 return 1; 634 635 if (token_len >= mdpl) 636 return 0; 637 638 if (TXP_REQUIRED_TOKEN_MARGIN >= mdpl) 639 /* (should not be possible because MDPL must be at least 1200) */ 640 return 0; 641 642 if (token_len > mdpl - TXP_REQUIRED_TOKEN_MARGIN) 643 return 0; 644 645 return 1; 646 } 647 648 int ossl_quic_tx_packetiser_set_initial_token(OSSL_QUIC_TX_PACKETISER *txp, 649 const unsigned char *token, 650 size_t token_len, 651 ossl_quic_initial_token_free_fn *free_cb, 652 void *free_cb_arg) 653 { 654 if (!txp_check_token_len(token_len, txp_get_mdpl(txp))) 655 return 0; 656 657 if (txp->initial_token != NULL && txp->initial_token_free_cb != NULL) 658 txp->initial_token_free_cb(txp->initial_token, txp->initial_token_len, 659 txp->initial_token_free_cb_arg); 660 661 txp->initial_token = token; 662 txp->initial_token_len = token_len; 663 txp->initial_token_free_cb = free_cb; 664 txp->initial_token_free_cb_arg = free_cb_arg; 665 return 1; 666 } 667 668 int ossl_quic_tx_packetiser_set_protocol_version(OSSL_QUIC_TX_PACKETISER *txp, 669 uint32_t protocol_version) 670 { 671 txp->args.protocol_version = protocol_version; 672 return 1; 673 } 674 675 int ossl_quic_tx_packetiser_set_cur_dcid(OSSL_QUIC_TX_PACKETISER *txp, 676 const QUIC_CONN_ID *dcid) 677 { 678 if (dcid == NULL) { 679 ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_NULL_PARAMETER); 680 return 0; 681 } 682 683 txp->args.cur_dcid = *dcid; 684 return 1; 685 } 686 687 int ossl_quic_tx_packetiser_set_cur_scid(OSSL_QUIC_TX_PACKETISER *txp, 688 const QUIC_CONN_ID *scid) 689 { 690 if (scid == NULL) { 691 ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_NULL_PARAMETER); 692 return 0; 693 } 694 695 txp->args.cur_scid = *scid; 696 return 1; 697 } 698 699 /* Change the destination L4 address the TXP uses to send datagrams. */ 700 int ossl_quic_tx_packetiser_set_peer(OSSL_QUIC_TX_PACKETISER *txp, 701 const BIO_ADDR *peer) 702 { 703 if (peer == NULL) { 704 BIO_ADDR_clear(&txp->args.peer); 705 return 1; 706 } 707 708 return BIO_ADDR_copy(&txp->args.peer, peer); 709 } 710 711 void ossl_quic_tx_packetiser_set_ack_tx_cb(OSSL_QUIC_TX_PACKETISER *txp, 712 void (*cb)(const OSSL_QUIC_FRAME_ACK *ack, 713 uint32_t pn_space, 714 void *arg), 715 void *cb_arg) 716 { 717 txp->ack_tx_cb = cb; 718 txp->ack_tx_cb_arg = cb_arg; 719 } 720 721 void ossl_quic_tx_packetiser_set_qlog_cb(OSSL_QUIC_TX_PACKETISER *txp, 722 QLOG *(*get_qlog_cb)(void *arg), 723 void *get_qlog_cb_arg) 724 { 725 ossl_quic_fifd_set_qlog_cb(&txp->fifd, get_qlog_cb, get_qlog_cb_arg); 726 727 } 728 729 int ossl_quic_tx_packetiser_discard_enc_level(OSSL_QUIC_TX_PACKETISER *txp, 730 uint32_t enc_level) 731 { 732 if (enc_level >= QUIC_ENC_LEVEL_NUM) { 733 ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_INVALID_ARGUMENT); 734 return 0; 735 } 736 737 if (enc_level != QUIC_ENC_LEVEL_0RTT) 738 txp->args.crypto[ossl_quic_enc_level_to_pn_space(enc_level)] = NULL; 739 740 return 1; 741 } 742 743 void ossl_quic_tx_packetiser_notify_handshake_complete(OSSL_QUIC_TX_PACKETISER *txp) 744 { 745 txp->handshake_complete = 1; 746 } 747 748 void ossl_quic_tx_packetiser_schedule_handshake_done(OSSL_QUIC_TX_PACKETISER *txp) 749 { 750 txp->want_handshake_done = 1; 751 } 752 753 void ossl_quic_tx_packetiser_schedule_ack_eliciting(OSSL_QUIC_TX_PACKETISER *txp, 754 uint32_t pn_space) 755 { 756 txp->force_ack_eliciting |= (1UL << pn_space); 757 } 758 759 void ossl_quic_tx_packetiser_schedule_ack(OSSL_QUIC_TX_PACKETISER *txp, 760 uint32_t pn_space) 761 { 762 txp->want_ack |= (1UL << pn_space); 763 } 764 765 #define TXP_ERR_INTERNAL 0 /* Internal (e.g. alloc) error */ 766 #define TXP_ERR_SUCCESS 1 /* Success */ 767 #define TXP_ERR_SPACE 2 /* Not enough room for another packet */ 768 #define TXP_ERR_INPUT 3 /* Invalid/malformed input */ 769 770 /* 771 * Generates a datagram by polling the various ELs to determine if they want to 772 * generate any frames, and generating a datagram which coalesces packets for 773 * any ELs which do. 774 */ 775 int ossl_quic_tx_packetiser_generate(OSSL_QUIC_TX_PACKETISER *txp, 776 QUIC_TXP_STATUS *status) 777 { 778 /* 779 * Called to generate one or more datagrams, each containing one or more 780 * packets. 781 * 782 * There are some tricky things to note here: 783 * 784 * - The TXP is only concerned with generating encrypted packets; 785 * other packets use a different path. 786 * 787 * - Any datagram containing an Initial packet must have a payload length 788 * (DPL) of at least 1200 bytes. This padding need not necessarily be 789 * found in the Initial packet. 790 * 791 * - It is desirable to be able to coalesce an Initial packet 792 * with a Handshake packet. Since, before generating the Handshake 793 * packet, we do not know how long it will be, we cannot know the 794 * correct amount of padding to ensure a DPL of at least 1200 bytes. 795 * Thus this padding must added to the Handshake packet (or whatever 796 * packet is the last in the datagram). 797 * 798 * - However, at the time that we generate the Initial packet, 799 * we do not actually know for sure that we will be followed 800 * in the datagram by another packet. For example, suppose we have 801 * some queued data (e.g. crypto stream data for the HANDSHAKE EL) 802 * it looks like we will want to send on the HANDSHAKE EL. 803 * We could assume padding will be placed in the Handshake packet 804 * subsequently and avoid adding any padding to the Initial packet 805 * (which would leave no room for the Handshake packet in the 806 * datagram). 807 * 808 * However, this is not actually a safe assumption. Suppose that we 809 * are using a link with a MDPL of 1200 bytes, the minimum allowed by 810 * QUIC. Suppose that the Initial packet consumes 1195 bytes in total. 811 * Since it is not possible to fit a Handshake packet in just 5 bytes, 812 * upon trying to add a Handshake packet after generating the Initial 813 * packet, we will discover we have no room to fit it! This is not a 814 * problem in itself as another datagram can be sent subsequently, but 815 * it is a problem because we were counting to use that packet to hold 816 * the essential padding. But if we have already finished encrypting 817 * the Initial packet, we cannot go and add padding to it anymore. 818 * This leaves us stuck. 819 * 820 * Because of this, we have to plan multiple packets simultaneously, such 821 * that we can start generating a Handshake (or 0-RTT or 1-RTT, or so on) 822 * packet while still having the option to go back and add padding to the 823 * Initial packet if it turns out to be needed. 824 * 825 * Trying to predict ahead of time (e.g. during Initial packet generation) 826 * whether we will successfully generate a subsequent packet is fraught with 827 * error as it relies on a large number of variables: 828 * 829 * - Do we have room to fit a packet header? (Consider that due to 830 * variable-length integer encoding this is highly variable and can even 831 * depend on payload length due to a variable-length Length field.) 832 * 833 * - Can we fit even a single one of the frames we want to put in this 834 * packet in the packet? (Each frame type has a bespoke encoding. While 835 * our encodings of some frame types are adaptive based on the available 836 * room - e.g. STREAM frames - ultimately all frame types have some 837 * absolute minimum number of bytes to be successfully encoded. For 838 * example, if after an Initial packet there is enough room to encode 839 * only one byte of frame data, it is quite likely we can't send any of 840 * the frames we wanted to send.) While this is not strictly a problem 841 * because we could just fill the packet with padding frames, this is a 842 * pointless packet and is wasteful. 843 * 844 * Thus we adopt a multi-phase architecture: 845 * 846 * 1. Archetype Selection: Determine desired packet archetype. 847 * 848 * 2. Packet Staging: Generation of packet information and packet payload 849 * data (frame data) into staging areas. 850 * 851 * 3. Packet Adjustment: Adjustment of staged packets, adding padding to 852 * the staged packets if needed. 853 * 854 * 4. Commit: The packets are sent to the QTX and recorded as having been 855 * sent to the FIFM. 856 * 857 */ 858 int res = 0, rc; 859 uint32_t archetype, enc_level; 860 uint32_t conn_close_enc_level = QUIC_ENC_LEVEL_NUM; 861 struct txp_pkt pkt[QUIC_ENC_LEVEL_NUM]; 862 size_t pkts_done = 0; 863 uint64_t cc_limit = txp->args.cc_method->get_tx_allowance(txp->args.cc_data); 864 int need_padding = 0, txpim_pkt_reffed; 865 866 memset(status, 0, sizeof(*status)); 867 868 for (enc_level = QUIC_ENC_LEVEL_INITIAL; 869 enc_level < QUIC_ENC_LEVEL_NUM; 870 ++enc_level) 871 pkt[enc_level].h_valid = 0; 872 873 874 /* 875 * Should not be needed, but a sanity check in case anyone else has been 876 * using the QTX. 877 */ 878 ossl_qtx_finish_dgram(txp->args.qtx); 879 880 /* 1. Archetype Selection */ 881 archetype = txp_determine_archetype(txp, cc_limit); 882 883 /* 2. Packet Staging */ 884 for (enc_level = QUIC_ENC_LEVEL_INITIAL; 885 enc_level < QUIC_ENC_LEVEL_NUM; 886 ++enc_level) { 887 size_t running_total = (enc_level > QUIC_ENC_LEVEL_INITIAL) 888 ? pkt[enc_level - 1].geom.hwm : 0; 889 890 pkt[enc_level].geom.hwm = running_total; 891 892 if (!txp_should_try_staging(txp, enc_level, archetype, cc_limit, 893 &conn_close_enc_level)) 894 continue; 895 896 if (!txp_pkt_init(&pkt[enc_level], txp, enc_level, archetype, 897 running_total)) 898 /* 899 * If this fails this is not a fatal error - it means the geometry 900 * planning determined there was not enough space for another 901 * packet. So just proceed with what we've already planned for. 902 */ 903 break; 904 905 rc = txp_generate_for_el(txp, &pkt[enc_level], 906 conn_close_enc_level == enc_level); 907 if (rc != TXP_ERR_SUCCESS) 908 goto out; 909 910 if (pkt[enc_level].force_pad) 911 /* 912 * txp_generate_for_el emitted a frame which forces packet padding. 913 */ 914 need_padding = 1; 915 916 pkt[enc_level].geom.hwm = running_total 917 + pkt[enc_level].h.bytes_appended 918 + pkt[enc_level].geom.pkt_overhead; 919 } 920 921 /* 3. Packet Adjustment */ 922 if (pkt[QUIC_ENC_LEVEL_INITIAL].h_valid 923 && pkt[QUIC_ENC_LEVEL_INITIAL].h.bytes_appended > 0) 924 /* 925 * We have an Initial packet in this datagram, so we need to make sure 926 * the total size of the datagram is adequate. 927 */ 928 need_padding = 1; 929 930 if (need_padding) { 931 size_t total_dgram_size = 0; 932 const size_t min_dpl = QUIC_MIN_INITIAL_DGRAM_LEN; 933 uint32_t pad_el = QUIC_ENC_LEVEL_NUM; 934 935 for (enc_level = QUIC_ENC_LEVEL_INITIAL; 936 enc_level < QUIC_ENC_LEVEL_NUM; 937 ++enc_level) 938 if (pkt[enc_level].h_valid && pkt[enc_level].h.bytes_appended > 0) { 939 if (pad_el == QUIC_ENC_LEVEL_NUM 940 /* 941 * We might not be able to add padding, for example if we 942 * are using the ACK_ONLY archetype. 943 */ 944 && pkt[enc_level].geom.adata.allow_padding 945 && !pkt[enc_level].h.done_implicit) 946 pad_el = enc_level; 947 948 txp_pkt_postgen_update_pkt_overhead(&pkt[enc_level], txp); 949 total_dgram_size += pkt[enc_level].geom.pkt_overhead 950 + pkt[enc_level].h.bytes_appended; 951 } 952 953 if (pad_el != QUIC_ENC_LEVEL_NUM && total_dgram_size < min_dpl) { 954 size_t deficit = min_dpl - total_dgram_size; 955 956 if (!txp_pkt_append_padding(&pkt[pad_el], txp, deficit)) 957 goto out; 958 959 total_dgram_size += deficit; 960 961 /* 962 * Padding frames make a packet ineligible for being a non-inflight 963 * packet. 964 */ 965 pkt[pad_el].tpkt->ackm_pkt.is_inflight = 1; 966 } 967 968 /* 969 * If we have failed to make a datagram of adequate size, for example 970 * because we have a padding requirement but are using the ACK_ONLY 971 * archetype (because we are CC limited), which precludes us from 972 * sending padding, give up on generating the datagram - there is 973 * nothing we can do. 974 */ 975 if (total_dgram_size < min_dpl) { 976 res = 1; 977 goto out; 978 } 979 } 980 981 /* 4. Commit */ 982 for (enc_level = QUIC_ENC_LEVEL_INITIAL; 983 enc_level < QUIC_ENC_LEVEL_NUM; 984 ++enc_level) { 985 986 if (!pkt[enc_level].h_valid) 987 /* Did not attempt to generate a packet for this EL. */ 988 continue; 989 990 if (pkt[enc_level].h.bytes_appended == 0) 991 /* Nothing was generated for this EL, so skip. */ 992 continue; 993 994 if (!ossl_quic_tx_packetiser_check_unvalidated_credit(txp, 995 pkt[enc_level].h.bytes_appended)) { 996 res = TXP_ERR_SPACE; 997 goto out; 998 } 999 ossl_quic_tx_packetiser_consume_unvalidated_credit(txp, pkt[enc_level].h.bytes_appended); 1000 1001 rc = txp_pkt_commit(txp, &pkt[enc_level], archetype, 1002 &txpim_pkt_reffed); 1003 if (rc) { 1004 status->sent_ack_eliciting 1005 = status->sent_ack_eliciting 1006 || pkt[enc_level].tpkt->ackm_pkt.is_ack_eliciting; 1007 1008 if (enc_level == QUIC_ENC_LEVEL_HANDSHAKE) 1009 status->sent_handshake 1010 = (pkt[enc_level].h_valid 1011 && pkt[enc_level].h.bytes_appended > 0); 1012 } 1013 1014 if (txpim_pkt_reffed) 1015 pkt[enc_level].tpkt = NULL; /* don't free */ 1016 1017 if (!rc) 1018 goto out; 1019 1020 ++pkts_done; 1021 1022 } 1023 1024 /* Flush & Cleanup */ 1025 res = 1; 1026 out: 1027 ossl_qtx_finish_dgram(txp->args.qtx); 1028 1029 for (enc_level = QUIC_ENC_LEVEL_INITIAL; 1030 enc_level < QUIC_ENC_LEVEL_NUM; 1031 ++enc_level) 1032 txp_pkt_cleanup(&pkt[enc_level], txp); 1033 1034 status->sent_pkt = pkts_done; 1035 1036 return res; 1037 } 1038 1039 static const struct archetype_data archetypes[QUIC_ENC_LEVEL_NUM][TX_PACKETISER_ARCHETYPE_NUM] = { 1040 /* EL 0(INITIAL) */ 1041 { 1042 /* EL 0(INITIAL) - Archetype 0(NORMAL) */ 1043 { 1044 /*allow_ack =*/ 1, 1045 /*allow_ping =*/ 1, 1046 /*allow_crypto =*/ 1, 1047 /*allow_handshake_done =*/ 0, 1048 /*allow_path_challenge =*/ 0, 1049 /*allow_path_response =*/ 0, 1050 /*allow_new_conn_id =*/ 0, 1051 /*allow_retire_conn_id =*/ 0, 1052 /*allow_stream_rel =*/ 0, 1053 /*allow_conn_fc =*/ 0, 1054 /*allow_conn_close =*/ 1, 1055 /*allow_cfq_other =*/ 0, 1056 /*allow_new_token =*/ 0, 1057 /*allow_force_ack_eliciting =*/ 1, 1058 /*allow_padding =*/ 1, 1059 /*require_ack_eliciting =*/ 0, 1060 /*bypass_cc =*/ 0, 1061 }, 1062 /* EL 0(INITIAL) - Archetype 1(PROBE) */ 1063 { 1064 /*allow_ack =*/ 1, 1065 /*allow_ping =*/ 1, 1066 /*allow_crypto =*/ 1, 1067 /*allow_handshake_done =*/ 0, 1068 /*allow_path_challenge =*/ 0, 1069 /*allow_path_response =*/ 0, 1070 /*allow_new_conn_id =*/ 0, 1071 /*allow_retire_conn_id =*/ 0, 1072 /*allow_stream_rel =*/ 0, 1073 /*allow_conn_fc =*/ 0, 1074 /*allow_conn_close =*/ 1, 1075 /*allow_cfq_other =*/ 0, 1076 /*allow_new_token =*/ 0, 1077 /*allow_force_ack_eliciting =*/ 1, 1078 /*allow_padding =*/ 1, 1079 /*require_ack_eliciting =*/ 1, 1080 /*bypass_cc =*/ 1, 1081 }, 1082 /* EL 0(INITIAL) - Archetype 2(ACK_ONLY) */ 1083 { 1084 /*allow_ack =*/ 1, 1085 /*allow_ping =*/ 0, 1086 /*allow_crypto =*/ 0, 1087 /*allow_handshake_done =*/ 0, 1088 /*allow_path_challenge =*/ 0, 1089 /*allow_path_response =*/ 0, 1090 /*allow_new_conn_id =*/ 0, 1091 /*allow_retire_conn_id =*/ 0, 1092 /*allow_stream_rel =*/ 0, 1093 /*allow_conn_fc =*/ 0, 1094 /*allow_conn_close =*/ 0, 1095 /*allow_cfq_other =*/ 0, 1096 /*allow_new_token =*/ 0, 1097 /*allow_force_ack_eliciting =*/ 1, 1098 /*allow_padding =*/ 0, 1099 /*require_ack_eliciting =*/ 0, 1100 /*bypass_cc =*/ 1, 1101 }, 1102 }, 1103 /* EL 1(0RTT) */ 1104 { 1105 /* EL 1(0RTT) - Archetype 0(NORMAL) */ 1106 { 1107 /*allow_ack =*/ 0, 1108 /*allow_ping =*/ 1, 1109 /*allow_crypto =*/ 0, 1110 /*allow_handshake_done =*/ 0, 1111 /*allow_path_challenge =*/ 0, 1112 /*allow_path_response =*/ 0, 1113 /*allow_new_conn_id =*/ 1, 1114 /*allow_retire_conn_id =*/ 1, 1115 /*allow_stream_rel =*/ 1, 1116 /*allow_conn_fc =*/ 1, 1117 /*allow_conn_close =*/ 1, 1118 /*allow_cfq_other =*/ 0, 1119 /*allow_new_token =*/ 0, 1120 /*allow_force_ack_eliciting =*/ 0, 1121 /*allow_padding =*/ 1, 1122 /*require_ack_eliciting =*/ 0, 1123 /*bypass_cc =*/ 0, 1124 }, 1125 /* EL 1(0RTT) - Archetype 1(PROBE) */ 1126 { 1127 /*allow_ack =*/ 0, 1128 /*allow_ping =*/ 1, 1129 /*allow_crypto =*/ 0, 1130 /*allow_handshake_done =*/ 0, 1131 /*allow_path_challenge =*/ 0, 1132 /*allow_path_response =*/ 0, 1133 /*allow_new_conn_id =*/ 1, 1134 /*allow_retire_conn_id =*/ 1, 1135 /*allow_stream_rel =*/ 1, 1136 /*allow_conn_fc =*/ 1, 1137 /*allow_conn_close =*/ 1, 1138 /*allow_cfq_other =*/ 0, 1139 /*allow_new_token =*/ 0, 1140 /*allow_force_ack_eliciting =*/ 0, 1141 /*allow_padding =*/ 1, 1142 /*require_ack_eliciting =*/ 1, 1143 /*bypass_cc =*/ 1, 1144 }, 1145 /* EL 1(0RTT) - Archetype 2(ACK_ONLY) */ 1146 { 1147 /*allow_ack =*/ 0, 1148 /*allow_ping =*/ 0, 1149 /*allow_crypto =*/ 0, 1150 /*allow_handshake_done =*/ 0, 1151 /*allow_path_challenge =*/ 0, 1152 /*allow_path_response =*/ 0, 1153 /*allow_new_conn_id =*/ 0, 1154 /*allow_retire_conn_id =*/ 0, 1155 /*allow_stream_rel =*/ 0, 1156 /*allow_conn_fc =*/ 0, 1157 /*allow_conn_close =*/ 0, 1158 /*allow_cfq_other =*/ 0, 1159 /*allow_new_token =*/ 0, 1160 /*allow_force_ack_eliciting =*/ 0, 1161 /*allow_padding =*/ 0, 1162 /*require_ack_eliciting =*/ 0, 1163 /*bypass_cc =*/ 1, 1164 }, 1165 }, 1166 /* EL (HANDSHAKE) */ 1167 { 1168 /* EL 2(HANDSHAKE) - Archetype 0(NORMAL) */ 1169 { 1170 /*allow_ack =*/ 1, 1171 /*allow_ping =*/ 1, 1172 /*allow_crypto =*/ 1, 1173 /*allow_handshake_done =*/ 0, 1174 /*allow_path_challenge =*/ 0, 1175 /*allow_path_response =*/ 0, 1176 /*allow_new_conn_id =*/ 0, 1177 /*allow_retire_conn_id =*/ 0, 1178 /*allow_stream_rel =*/ 0, 1179 /*allow_conn_fc =*/ 0, 1180 /*allow_conn_close =*/ 1, 1181 /*allow_cfq_other =*/ 0, 1182 /*allow_new_token =*/ 0, 1183 /*allow_force_ack_eliciting =*/ 1, 1184 /*allow_padding =*/ 1, 1185 /*require_ack_eliciting =*/ 0, 1186 /*bypass_cc =*/ 0, 1187 }, 1188 /* EL 2(HANDSHAKE) - Archetype 1(PROBE) */ 1189 { 1190 /*allow_ack =*/ 1, 1191 /*allow_ping =*/ 1, 1192 /*allow_crypto =*/ 1, 1193 /*allow_handshake_done =*/ 0, 1194 /*allow_path_challenge =*/ 0, 1195 /*allow_path_response =*/ 0, 1196 /*allow_new_conn_id =*/ 0, 1197 /*allow_retire_conn_id =*/ 0, 1198 /*allow_stream_rel =*/ 0, 1199 /*allow_conn_fc =*/ 0, 1200 /*allow_conn_close =*/ 1, 1201 /*allow_cfq_other =*/ 0, 1202 /*allow_new_token =*/ 0, 1203 /*allow_force_ack_eliciting =*/ 1, 1204 /*allow_padding =*/ 1, 1205 /*require_ack_eliciting =*/ 1, 1206 /*bypass_cc =*/ 1, 1207 }, 1208 /* EL 2(HANDSHAKE) - Archetype 2(ACK_ONLY) */ 1209 { 1210 /*allow_ack =*/ 1, 1211 /*allow_ping =*/ 0, 1212 /*allow_crypto =*/ 0, 1213 /*allow_handshake_done =*/ 0, 1214 /*allow_path_challenge =*/ 0, 1215 /*allow_path_response =*/ 0, 1216 /*allow_new_conn_id =*/ 0, 1217 /*allow_retire_conn_id =*/ 0, 1218 /*allow_stream_rel =*/ 0, 1219 /*allow_conn_fc =*/ 0, 1220 /*allow_conn_close =*/ 0, 1221 /*allow_cfq_other =*/ 0, 1222 /*allow_new_token =*/ 0, 1223 /*allow_force_ack_eliciting =*/ 1, 1224 /*allow_padding =*/ 0, 1225 /*require_ack_eliciting =*/ 0, 1226 /*bypass_cc =*/ 1, 1227 }, 1228 }, 1229 /* EL 3(1RTT) */ 1230 { 1231 /* EL 3(1RTT) - Archetype 0(NORMAL) */ 1232 { 1233 /*allow_ack =*/ 1, 1234 /*allow_ping =*/ 1, 1235 /*allow_crypto =*/ 1, 1236 /*allow_handshake_done =*/ 1, 1237 /*allow_path_challenge =*/ 0, 1238 /*allow_path_response =*/ 1, 1239 /*allow_new_conn_id =*/ 1, 1240 /*allow_retire_conn_id =*/ 1, 1241 /*allow_stream_rel =*/ 1, 1242 /*allow_conn_fc =*/ 1, 1243 /*allow_conn_close =*/ 1, 1244 /*allow_cfq_other =*/ 1, 1245 /*allow_new_token =*/ 1, 1246 /*allow_force_ack_eliciting =*/ 1, 1247 /*allow_padding =*/ 1, 1248 /*require_ack_eliciting =*/ 0, 1249 /*bypass_cc =*/ 0, 1250 }, 1251 /* EL 3(1RTT) - Archetype 1(PROBE) */ 1252 { 1253 /*allow_ack =*/ 1, 1254 /*allow_ping =*/ 1, 1255 /*allow_crypto =*/ 1, 1256 /*allow_handshake_done =*/ 1, 1257 /*allow_path_challenge =*/ 0, 1258 /*allow_path_response =*/ 1, 1259 /*allow_new_conn_id =*/ 1, 1260 /*allow_retire_conn_id =*/ 1, 1261 /*allow_stream_rel =*/ 1, 1262 /*allow_conn_fc =*/ 1, 1263 /*allow_conn_close =*/ 1, 1264 /*allow_cfq_other =*/ 1, 1265 /*allow_new_token =*/ 1, 1266 /*allow_force_ack_eliciting =*/ 1, 1267 /*allow_padding =*/ 1, 1268 /*require_ack_eliciting =*/ 1, 1269 /*bypass_cc =*/ 1, 1270 }, 1271 /* EL 3(1RTT) - Archetype 2(ACK_ONLY) */ 1272 { 1273 /*allow_ack =*/ 1, 1274 /*allow_ping =*/ 0, 1275 /*allow_crypto =*/ 0, 1276 /*allow_handshake_done =*/ 0, 1277 /*allow_path_challenge =*/ 0, 1278 /*allow_path_response =*/ 0, 1279 /*allow_new_conn_id =*/ 0, 1280 /*allow_retire_conn_id =*/ 0, 1281 /*allow_stream_rel =*/ 0, 1282 /*allow_conn_fc =*/ 0, 1283 /*allow_conn_close =*/ 0, 1284 /*allow_cfq_other =*/ 0, 1285 /*allow_new_token =*/ 0, 1286 /*allow_force_ack_eliciting =*/ 1, 1287 /*allow_padding =*/ 0, 1288 /*require_ack_eliciting =*/ 0, 1289 /*bypass_cc =*/ 1, 1290 } 1291 } 1292 }; 1293 1294 static int txp_get_archetype_data(uint32_t enc_level, 1295 uint32_t archetype, 1296 struct archetype_data *a) 1297 { 1298 if (enc_level >= QUIC_ENC_LEVEL_NUM 1299 || archetype >= TX_PACKETISER_ARCHETYPE_NUM) 1300 return 0; 1301 1302 /* No need to avoid copying this as it should not exceed one int in size. */ 1303 *a = archetypes[enc_level][archetype]; 1304 return 1; 1305 } 1306 1307 static int txp_determine_geometry(OSSL_QUIC_TX_PACKETISER *txp, 1308 uint32_t archetype, 1309 uint32_t enc_level, 1310 size_t running_total, 1311 QUIC_PKT_HDR *phdr, 1312 struct txp_pkt_geom *geom) 1313 { 1314 size_t mdpl, cmpl, hdr_len; 1315 1316 /* Get information about packet archetype. */ 1317 if (!txp_get_archetype_data(enc_level, archetype, &geom->adata)) 1318 return 0; 1319 1320 /* Assemble packet header. */ 1321 phdr->type = ossl_quic_enc_level_to_pkt_type(enc_level); 1322 phdr->spin_bit = 0; 1323 phdr->pn_len = txp_determine_pn_len(txp); 1324 phdr->partial = 0; 1325 phdr->fixed = 1; 1326 phdr->reserved = 0; 1327 phdr->version = txp->args.protocol_version; 1328 phdr->dst_conn_id = txp->args.cur_dcid; 1329 phdr->src_conn_id = txp->args.cur_scid; 1330 1331 /* 1332 * We need to know the length of the payload to get an accurate header 1333 * length for non-1RTT packets, because the Length field found in 1334 * Initial/Handshake/0-RTT packets uses a variable-length encoding. However, 1335 * we don't have a good idea of the length of our payload, because the 1336 * length of the payload depends on the room in the datagram after fitting 1337 * the header, which depends on the size of the header. 1338 * 1339 * In general, it does not matter if a packet is slightly shorter (because 1340 * e.g. we predicted use of a 2-byte length field, but ended up only needing 1341 * a 1-byte length field). However this does matter for Initial packets 1342 * which must be at least 1200 bytes, which is also the assumed default MTU; 1343 * therefore in many cases Initial packets will be padded to 1200 bytes, 1344 * which means if we overestimated the header size, we will be short by a 1345 * few bytes and the server will ignore the packet for being too short. In 1346 * this case, however, such packets always *will* be padded to meet 1200 1347 * bytes, which requires a 2-byte length field, so we don't actually need to 1348 * worry about this. Thus we estimate the header length assuming a 2-byte 1349 * length field here, which should in practice work well in all cases. 1350 */ 1351 phdr->len = OSSL_QUIC_VLINT_2B_MAX - phdr->pn_len; 1352 1353 if (enc_level == QUIC_ENC_LEVEL_INITIAL) { 1354 phdr->token = txp->initial_token; 1355 phdr->token_len = txp->initial_token_len; 1356 } else { 1357 phdr->token = NULL; 1358 phdr->token_len = 0; 1359 } 1360 1361 hdr_len = ossl_quic_wire_get_encoded_pkt_hdr_len(phdr->dst_conn_id.id_len, 1362 phdr); 1363 if (hdr_len == 0) 1364 return 0; 1365 1366 /* MDPL: Maximum datagram payload length. */ 1367 mdpl = txp_get_mdpl(txp); 1368 1369 /* 1370 * CMPL: Maximum encoded packet size we can put into this datagram given any 1371 * previous packets coalesced into it. 1372 */ 1373 if (running_total > mdpl) 1374 /* Should not be possible, but if it happens: */ 1375 cmpl = 0; 1376 else 1377 cmpl = mdpl - running_total; 1378 1379 /* CMPPL: Maximum amount we can put into the current packet payload */ 1380 if (!txp_determine_ppl_from_pl(txp, cmpl, enc_level, hdr_len, &geom->cmppl)) 1381 return 0; 1382 1383 geom->cmpl = cmpl; 1384 geom->pkt_overhead = cmpl - geom->cmppl; 1385 geom->archetype = archetype; 1386 return 1; 1387 } 1388 1389 static uint32_t txp_determine_archetype(OSSL_QUIC_TX_PACKETISER *txp, 1390 uint64_t cc_limit) 1391 { 1392 OSSL_ACKM_PROBE_INFO *probe_info 1393 = ossl_ackm_get0_probe_request(txp->args.ackm); 1394 uint32_t pn_space; 1395 1396 /* 1397 * If ACKM has requested probe generation (e.g. due to PTO), we generate a 1398 * Probe-archetype packet. Actually, we determine archetype on a 1399 * per-datagram basis, so if any EL wants a probe, do a pass in which 1400 * we try and generate a probe (if needed) for all ELs. 1401 */ 1402 if (probe_info->anti_deadlock_initial > 0 1403 || probe_info->anti_deadlock_handshake > 0) 1404 return TX_PACKETISER_ARCHETYPE_PROBE; 1405 1406 for (pn_space = QUIC_PN_SPACE_INITIAL; 1407 pn_space < QUIC_PN_SPACE_NUM; 1408 ++pn_space) 1409 if (probe_info->pto[pn_space] > 0) 1410 return TX_PACKETISER_ARCHETYPE_PROBE; 1411 1412 /* 1413 * If we are out of CC budget, we cannot send a normal packet, 1414 * but we can do an ACK-only packet (potentially, if we 1415 * want to send an ACK). 1416 */ 1417 if (cc_limit == 0) 1418 return TX_PACKETISER_ARCHETYPE_ACK_ONLY; 1419 1420 /* All other packets. */ 1421 return TX_PACKETISER_ARCHETYPE_NORMAL; 1422 } 1423 1424 static int txp_should_try_staging(OSSL_QUIC_TX_PACKETISER *txp, 1425 uint32_t enc_level, 1426 uint32_t archetype, 1427 uint64_t cc_limit, 1428 uint32_t *conn_close_enc_level) 1429 { 1430 struct archetype_data a; 1431 uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level); 1432 QUIC_CFQ_ITEM *cfq_item; 1433 1434 if (!ossl_qtx_is_enc_level_provisioned(txp->args.qtx, enc_level)) 1435 return 0; 1436 1437 if (!txp_get_archetype_data(enc_level, archetype, &a)) 1438 return 0; 1439 1440 if (!a.bypass_cc && cc_limit == 0) 1441 /* CC not allowing us to send. */ 1442 return 0; 1443 1444 /* 1445 * We can produce CONNECTION_CLOSE frames on any EL in principle, which 1446 * means we need to choose which EL we would prefer to use. After a 1447 * connection is fully established we have only one provisioned EL and this 1448 * is a non-issue. Where multiple ELs are provisioned, it is possible the 1449 * peer does not have the keys for the EL yet, which suggests in general it 1450 * is preferable to use the lowest EL which is still provisioned. 1451 * 1452 * However (RFC 9000 s. 10.2.3 & 12.5) we are also required to not send 1453 * application CONNECTION_CLOSE frames in non-1-RTT ELs, so as to not 1454 * potentially leak application data on a connection which has yet to be 1455 * authenticated. Thus when we have an application CONNECTION_CLOSE frame 1456 * queued and need to send it on a non-1-RTT EL, we have to convert it 1457 * into a transport CONNECTION_CLOSE frame which contains no application 1458 * data. Since this loses information, it suggests we should use the 1-RTT 1459 * EL to avoid this if possible, even if a lower EL is also available. 1460 * 1461 * At the same time, just because we have the 1-RTT EL provisioned locally 1462 * does not necessarily mean the peer does, for example if a handshake 1463 * CRYPTO frame has been lost. It is fairly important that CONNECTION_CLOSE 1464 * is signalled in a way we know our peer can decrypt, as we stop processing 1465 * connection retransmission logic for real after connection close and 1466 * simply 'blindly' retransmit the same CONNECTION_CLOSE frame. 1467 * 1468 * This is not a major concern for clients, since if a client has a 1-RTT EL 1469 * provisioned the server is guaranteed to also have a 1-RTT EL provisioned. 1470 * 1471 * TODO(QUIC FUTURE): Revisit this when when have reached a decision on how 1472 * best to implement this 1473 */ 1474 if (*conn_close_enc_level > enc_level 1475 && *conn_close_enc_level != QUIC_ENC_LEVEL_1RTT) 1476 *conn_close_enc_level = enc_level; 1477 1478 /* Do we need to send a PTO probe? */ 1479 if (a.allow_force_ack_eliciting) { 1480 OSSL_ACKM_PROBE_INFO *probe_info 1481 = ossl_ackm_get0_probe_request(txp->args.ackm); 1482 1483 if ((enc_level == QUIC_ENC_LEVEL_INITIAL 1484 && probe_info->anti_deadlock_initial > 0) 1485 || (enc_level == QUIC_ENC_LEVEL_HANDSHAKE 1486 && probe_info->anti_deadlock_handshake > 0) 1487 || probe_info->pto[pn_space] > 0) 1488 return 1; 1489 } 1490 1491 /* Does the crypto stream for this EL want to produce anything? */ 1492 if (a.allow_crypto && sstream_is_pending(txp->args.crypto[pn_space])) 1493 return 1; 1494 1495 /* Does the ACKM for this PN space want to produce anything? */ 1496 if (a.allow_ack && (ossl_ackm_is_ack_desired(txp->args.ackm, pn_space) 1497 || (txp->want_ack & (1UL << pn_space)) != 0)) 1498 return 1; 1499 1500 /* Do we need to force emission of an ACK-eliciting packet? */ 1501 if (a.allow_force_ack_eliciting 1502 && (txp->force_ack_eliciting & (1UL << pn_space)) != 0) 1503 return 1; 1504 1505 /* Does the connection-level RXFC want to produce a frame? */ 1506 if (a.allow_conn_fc && (txp->want_max_data 1507 || ossl_quic_rxfc_has_cwm_changed(txp->args.conn_rxfc, 0))) 1508 return 1; 1509 1510 /* Do we want to produce a MAX_STREAMS frame? */ 1511 if (a.allow_conn_fc 1512 && (txp->want_max_streams_bidi 1513 || ossl_quic_rxfc_has_cwm_changed(txp->args.max_streams_bidi_rxfc, 1514 0) 1515 || txp->want_max_streams_uni 1516 || ossl_quic_rxfc_has_cwm_changed(txp->args.max_streams_uni_rxfc, 1517 0))) 1518 return 1; 1519 1520 /* Do we want to produce a HANDSHAKE_DONE frame? */ 1521 if (a.allow_handshake_done && txp->want_handshake_done) 1522 return 1; 1523 1524 /* Do we want to produce a CONNECTION_CLOSE frame? */ 1525 if (a.allow_conn_close && txp->want_conn_close && 1526 *conn_close_enc_level == enc_level) 1527 /* 1528 * This is a bit of a special case since CONNECTION_CLOSE can appear in 1529 * most packet types, and when we decide we want to send it this status 1530 * isn't tied to a specific EL. So if we want to send it, we send it 1531 * only on the lowest non-dropped EL. 1532 */ 1533 return 1; 1534 1535 /* Does the CFQ have any frames queued for this PN space? */ 1536 if (enc_level != QUIC_ENC_LEVEL_0RTT) 1537 for (cfq_item = ossl_quic_cfq_get_priority_head(txp->args.cfq, pn_space); 1538 cfq_item != NULL; 1539 cfq_item = ossl_quic_cfq_item_get_priority_next(cfq_item, pn_space)) { 1540 uint64_t frame_type = ossl_quic_cfq_item_get_frame_type(cfq_item); 1541 1542 switch (frame_type) { 1543 case OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID: 1544 if (a.allow_new_conn_id) 1545 return 1; 1546 break; 1547 case OSSL_QUIC_FRAME_TYPE_RETIRE_CONN_ID: 1548 if (a.allow_retire_conn_id) 1549 return 1; 1550 break; 1551 case OSSL_QUIC_FRAME_TYPE_NEW_TOKEN: 1552 if (a.allow_new_token) 1553 return 1; 1554 break; 1555 case OSSL_QUIC_FRAME_TYPE_PATH_RESPONSE: 1556 if (a.allow_path_response) 1557 return 1; 1558 break; 1559 default: 1560 if (a.allow_cfq_other) 1561 return 1; 1562 break; 1563 } 1564 } 1565 1566 if (a.allow_stream_rel && txp->handshake_complete) { 1567 QUIC_STREAM_ITER it; 1568 1569 /* If there are any active streams, 0/1-RTT wants to produce a packet. 1570 * Whether a stream is on the active list is required to be precise 1571 * (i.e., a stream is never on the active list if we cannot produce a 1572 * frame for it), and all stream-related frames are governed by 1573 * a.allow_stream_rel (i.e., if we can send one type of stream-related 1574 * frame, we can send any of them), so we don't need to inspect 1575 * individual streams on the active list, just confirm that the active 1576 * list is non-empty. 1577 */ 1578 ossl_quic_stream_iter_init(&it, txp->args.qsm, 0); 1579 if (it.stream != NULL) 1580 return 1; 1581 } 1582 1583 return 0; 1584 } 1585 1586 static int sstream_is_pending(QUIC_SSTREAM *sstream) 1587 { 1588 OSSL_QUIC_FRAME_STREAM hdr; 1589 OSSL_QTX_IOVEC iov[2]; 1590 size_t num_iov = OSSL_NELEM(iov); 1591 1592 return ossl_quic_sstream_get_stream_frame(sstream, 0, &hdr, iov, &num_iov); 1593 } 1594 1595 /* Determine how many bytes we should use for the encoded PN. */ 1596 static size_t txp_determine_pn_len(OSSL_QUIC_TX_PACKETISER *txp) 1597 { 1598 return 4; /* TODO(QUIC FUTURE) */ 1599 } 1600 1601 /* Determine plaintext packet payload length from payload length. */ 1602 static int txp_determine_ppl_from_pl(OSSL_QUIC_TX_PACKETISER *txp, 1603 size_t pl, 1604 uint32_t enc_level, 1605 size_t hdr_len, 1606 size_t *r) 1607 { 1608 if (pl < hdr_len) 1609 return 0; 1610 1611 pl -= hdr_len; 1612 1613 if (!ossl_qtx_calculate_plaintext_payload_len(txp->args.qtx, enc_level, 1614 pl, &pl)) 1615 return 0; 1616 1617 *r = pl; 1618 return 1; 1619 } 1620 1621 static size_t txp_get_mdpl(OSSL_QUIC_TX_PACKETISER *txp) 1622 { 1623 return ossl_qtx_get_mdpl(txp->args.qtx); 1624 } 1625 1626 static QUIC_SSTREAM *get_sstream_by_id(uint64_t stream_id, uint32_t pn_space, 1627 void *arg) 1628 { 1629 OSSL_QUIC_TX_PACKETISER *txp = arg; 1630 QUIC_STREAM *s; 1631 1632 if (stream_id == UINT64_MAX) 1633 return txp->args.crypto[pn_space]; 1634 1635 s = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id); 1636 if (s == NULL) 1637 return NULL; 1638 1639 return s->sstream; 1640 } 1641 1642 static void on_regen_notify(uint64_t frame_type, uint64_t stream_id, 1643 QUIC_TXPIM_PKT *pkt, void *arg) 1644 { 1645 OSSL_QUIC_TX_PACKETISER *txp = arg; 1646 1647 switch (frame_type) { 1648 case OSSL_QUIC_FRAME_TYPE_HANDSHAKE_DONE: 1649 txp->want_handshake_done = 1; 1650 break; 1651 case OSSL_QUIC_FRAME_TYPE_MAX_DATA: 1652 txp->want_max_data = 1; 1653 break; 1654 case OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_BIDI: 1655 txp->want_max_streams_bidi = 1; 1656 break; 1657 case OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_UNI: 1658 txp->want_max_streams_uni = 1; 1659 break; 1660 case OSSL_QUIC_FRAME_TYPE_ACK_WITH_ECN: 1661 txp->want_ack |= (1UL << pkt->ackm_pkt.pkt_space); 1662 break; 1663 case OSSL_QUIC_FRAME_TYPE_MAX_STREAM_DATA: 1664 { 1665 QUIC_STREAM *s 1666 = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id); 1667 1668 if (s == NULL) 1669 return; 1670 1671 s->want_max_stream_data = 1; 1672 ossl_quic_stream_map_update_state(txp->args.qsm, s); 1673 } 1674 break; 1675 case OSSL_QUIC_FRAME_TYPE_STOP_SENDING: 1676 { 1677 QUIC_STREAM *s 1678 = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id); 1679 1680 if (s == NULL) 1681 return; 1682 1683 ossl_quic_stream_map_schedule_stop_sending(txp->args.qsm, s); 1684 } 1685 break; 1686 case OSSL_QUIC_FRAME_TYPE_RESET_STREAM: 1687 { 1688 QUIC_STREAM *s 1689 = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id); 1690 1691 if (s == NULL) 1692 return; 1693 1694 s->want_reset_stream = 1; 1695 ossl_quic_stream_map_update_state(txp->args.qsm, s); 1696 } 1697 break; 1698 default: 1699 assert(0); 1700 break; 1701 } 1702 } 1703 1704 static int txp_need_ping(OSSL_QUIC_TX_PACKETISER *txp, 1705 uint32_t pn_space, 1706 const struct archetype_data *adata) 1707 { 1708 return adata->allow_ping 1709 && (adata->require_ack_eliciting 1710 || (txp->force_ack_eliciting & (1UL << pn_space)) != 0); 1711 } 1712 1713 static int txp_pkt_init(struct txp_pkt *pkt, OSSL_QUIC_TX_PACKETISER *txp, 1714 uint32_t enc_level, uint32_t archetype, 1715 size_t running_total) 1716 { 1717 uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level); 1718 1719 if (!txp_determine_geometry(txp, archetype, enc_level, 1720 running_total, &pkt->phdr, &pkt->geom)) 1721 return 0; 1722 1723 /* 1724 * Initialise TX helper. If we must be ACK eliciting, reserve 1 byte for 1725 * PING. 1726 */ 1727 if (!tx_helper_init(&pkt->h, txp, enc_level, 1728 pkt->geom.cmppl, 1729 txp_need_ping(txp, pn_space, &pkt->geom.adata) ? 1 : 0)) 1730 return 0; 1731 1732 pkt->h_valid = 1; 1733 pkt->tpkt = NULL; 1734 pkt->stream_head = NULL; 1735 pkt->force_pad = 0; 1736 return 1; 1737 } 1738 1739 static void txp_pkt_cleanup(struct txp_pkt *pkt, OSSL_QUIC_TX_PACKETISER *txp) 1740 { 1741 if (!pkt->h_valid) 1742 return; 1743 1744 tx_helper_cleanup(&pkt->h); 1745 pkt->h_valid = 0; 1746 1747 if (pkt->tpkt != NULL) { 1748 ossl_quic_txpim_pkt_release(txp->args.txpim, pkt->tpkt); 1749 pkt->tpkt = NULL; 1750 } 1751 } 1752 1753 static int txp_pkt_postgen_update_pkt_overhead(struct txp_pkt *pkt, 1754 OSSL_QUIC_TX_PACKETISER *txp) 1755 { 1756 /* 1757 * After we have staged and generated our packets, but before we commit 1758 * them, it is possible for the estimated packet overhead (packet header + 1759 * AEAD tag size) to shrink slightly because we generated a short packet 1760 * whose which can be represented in fewer bytes as a variable-length 1761 * integer than we were (pessimistically) budgeting for. We need to account 1762 * for this to ensure that we get our padding calculation exactly right. 1763 * 1764 * Update pkt_overhead to be accurate now that we know how much data is 1765 * going in a packet. 1766 */ 1767 size_t hdr_len, ciphertext_len; 1768 1769 if (pkt->h.enc_level == QUIC_ENC_LEVEL_INITIAL) 1770 /* 1771 * Don't update overheads for the INITIAL EL - we have not finished 1772 * appending padding to it and would potentially miscalculate the 1773 * correct padding if we now update the pkt_overhead field to switch to 1774 * e.g. a 1-byte length field in the packet header. Since we are padding 1775 * to QUIC_MIN_INITIAL_DGRAM_LEN which requires a 2-byte length field, 1776 * this is guaranteed to be moot anyway. See comment in 1777 * txp_determine_geometry for more information. 1778 */ 1779 return 1; 1780 1781 if (!ossl_qtx_calculate_ciphertext_payload_len(txp->args.qtx, pkt->h.enc_level, 1782 pkt->h.bytes_appended, 1783 &ciphertext_len)) 1784 return 0; 1785 1786 pkt->phdr.len = ciphertext_len; 1787 1788 hdr_len = ossl_quic_wire_get_encoded_pkt_hdr_len(pkt->phdr.dst_conn_id.id_len, 1789 &pkt->phdr); 1790 1791 pkt->geom.pkt_overhead = hdr_len + ciphertext_len - pkt->h.bytes_appended; 1792 return 1; 1793 } 1794 1795 static void on_confirm_notify(uint64_t frame_type, uint64_t stream_id, 1796 QUIC_TXPIM_PKT *pkt, void *arg) 1797 { 1798 OSSL_QUIC_TX_PACKETISER *txp = arg; 1799 1800 switch (frame_type) { 1801 case OSSL_QUIC_FRAME_TYPE_STOP_SENDING: 1802 { 1803 QUIC_STREAM *s 1804 = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id); 1805 1806 if (s == NULL) 1807 return; 1808 1809 s->acked_stop_sending = 1; 1810 ossl_quic_stream_map_update_state(txp->args.qsm, s); 1811 } 1812 break; 1813 case OSSL_QUIC_FRAME_TYPE_RESET_STREAM: 1814 { 1815 QUIC_STREAM *s 1816 = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id); 1817 1818 if (s == NULL) 1819 return; 1820 1821 /* 1822 * We must already be in RESET_SENT or RESET_RECVD if we are 1823 * here, so we don't need to check state here. 1824 */ 1825 ossl_quic_stream_map_notify_reset_stream_acked(txp->args.qsm, s); 1826 ossl_quic_stream_map_update_state(txp->args.qsm, s); 1827 } 1828 break; 1829 default: 1830 assert(0); 1831 break; 1832 } 1833 } 1834 1835 static int txp_pkt_append_padding(struct txp_pkt *pkt, 1836 OSSL_QUIC_TX_PACKETISER *txp, size_t num_bytes) 1837 { 1838 WPACKET *wpkt; 1839 1840 if (num_bytes == 0) 1841 return 1; 1842 1843 if (!ossl_assert(pkt->h_valid)) 1844 return 0; 1845 1846 if (!ossl_assert(pkt->tpkt != NULL)) 1847 return 0; 1848 1849 wpkt = tx_helper_begin(&pkt->h); 1850 if (wpkt == NULL) 1851 return 0; 1852 1853 if (!ossl_quic_wire_encode_padding(wpkt, num_bytes)) { 1854 tx_helper_rollback(&pkt->h); 1855 return 0; 1856 } 1857 1858 if (!tx_helper_commit(&pkt->h)) 1859 return 0; 1860 1861 pkt->tpkt->ackm_pkt.num_bytes += num_bytes; 1862 /* Cannot be non-inflight if we have a PADDING frame */ 1863 pkt->tpkt->ackm_pkt.is_inflight = 1; 1864 return 1; 1865 } 1866 1867 static void on_sstream_updated(uint64_t stream_id, void *arg) 1868 { 1869 OSSL_QUIC_TX_PACKETISER *txp = arg; 1870 QUIC_STREAM *s; 1871 1872 s = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id); 1873 if (s == NULL) 1874 return; 1875 1876 ossl_quic_stream_map_update_state(txp->args.qsm, s); 1877 } 1878 1879 /* 1880 * Returns 1 if we can send that many bytes in closing state, 0 otherwise. 1881 * Also maintains the bytes sent state if it returns a success. 1882 */ 1883 static int try_commit_conn_close(OSSL_QUIC_TX_PACKETISER *txp, size_t n) 1884 { 1885 int res; 1886 1887 /* We can always send the first connection close frame */ 1888 if (txp->closing_bytes_recv == 0) 1889 return 1; 1890 1891 /* 1892 * RFC 9000 s. 10.2.1 Closing Connection State: 1893 * To avoid being used for an amplification attack, such 1894 * endpoints MUST limit the cumulative size of packets it sends 1895 * to three times the cumulative size of the packets that are 1896 * received and attributed to the connection. 1897 * and: 1898 * An endpoint in the closing state MUST either discard packets 1899 * received from an unvalidated address or limit the cumulative 1900 * size of packets it sends to an unvalidated address to three 1901 * times the size of packets it receives from that address. 1902 */ 1903 res = txp->closing_bytes_xmit + n <= txp->closing_bytes_recv * 3; 1904 1905 /* 1906 * Attribute the bytes to the connection, if we are allowed to send them 1907 * and this isn't the first closing frame. 1908 */ 1909 if (res && txp->closing_bytes_recv != 0) 1910 txp->closing_bytes_xmit += n; 1911 return res; 1912 } 1913 1914 void ossl_quic_tx_packetiser_record_received_closing_bytes( 1915 OSSL_QUIC_TX_PACKETISER *txp, size_t n) 1916 { 1917 txp->closing_bytes_recv += n; 1918 } 1919 1920 static int txp_generate_pre_token(OSSL_QUIC_TX_PACKETISER *txp, 1921 struct txp_pkt *pkt, 1922 int chosen_for_conn_close, 1923 int *can_be_non_inflight) 1924 { 1925 const uint32_t enc_level = pkt->h.enc_level; 1926 const uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level); 1927 const struct archetype_data *a = &pkt->geom.adata; 1928 QUIC_TXPIM_PKT *tpkt = pkt->tpkt; 1929 struct tx_helper *h = &pkt->h; 1930 const OSSL_QUIC_FRAME_ACK *ack; 1931 OSSL_QUIC_FRAME_ACK ack2; 1932 1933 tpkt->ackm_pkt.largest_acked = QUIC_PN_INVALID; 1934 1935 /* ACK Frames (Regenerate) */ 1936 if (a->allow_ack 1937 && tx_helper_get_space_left(h) >= MIN_FRAME_SIZE_ACK 1938 && (((txp->want_ack & (1UL << pn_space)) != 0) 1939 || ossl_ackm_is_ack_desired(txp->args.ackm, pn_space)) 1940 && (ack = ossl_ackm_get_ack_frame(txp->args.ackm, pn_space)) != NULL) { 1941 WPACKET *wpkt = tx_helper_begin(h); 1942 1943 if (wpkt == NULL) 1944 return 0; 1945 1946 /* We do not currently support ECN */ 1947 ack2 = *ack; 1948 ack2.ecn_present = 0; 1949 1950 if (ossl_quic_wire_encode_frame_ack(wpkt, 1951 txp->args.ack_delay_exponent, 1952 &ack2)) { 1953 if (!tx_helper_commit(h)) 1954 return 0; 1955 1956 tpkt->had_ack_frame = 1; 1957 1958 if (ack->num_ack_ranges > 0) 1959 tpkt->ackm_pkt.largest_acked = ack->ack_ranges[0].end; 1960 1961 if (txp->ack_tx_cb != NULL) 1962 txp->ack_tx_cb(&ack2, pn_space, txp->ack_tx_cb_arg); 1963 } else { 1964 tx_helper_rollback(h); 1965 } 1966 } 1967 1968 /* CONNECTION_CLOSE Frames (Regenerate) */ 1969 if (a->allow_conn_close && txp->want_conn_close && chosen_for_conn_close) { 1970 WPACKET *wpkt = tx_helper_begin(h); 1971 OSSL_QUIC_FRAME_CONN_CLOSE f, *pf = &txp->conn_close_frame; 1972 size_t l; 1973 1974 if (wpkt == NULL) 1975 return 0; 1976 1977 /* 1978 * Application CONNECTION_CLOSE frames may only be sent in the 1979 * Application PN space, as otherwise they may be sent before a 1980 * connection is authenticated and leak application data. Therefore, if 1981 * we need to send a CONNECTION_CLOSE frame in another PN space and were 1982 * given an application CONNECTION_CLOSE frame, convert it into a 1983 * transport CONNECTION_CLOSE frame, removing any sensitive application 1984 * data. 1985 * 1986 * RFC 9000 s. 10.2.3: "A CONNECTION_CLOSE of type 0x1d MUST be replaced 1987 * by a CONNECTION_CLOSE of type 0x1c when sending the frame in Initial 1988 * or Handshake packets. Otherwise, information about the application 1989 * state might be revealed. Endpoints MUST clear the value of the Reason 1990 * Phrase field and SHOULD use the APPLICATION_ERROR code when 1991 * converting to a CONNECTION_CLOSE of type 0x1c." 1992 */ 1993 if (pn_space != QUIC_PN_SPACE_APP && pf->is_app) { 1994 pf = &f; 1995 pf->is_app = 0; 1996 pf->frame_type = 0; 1997 pf->error_code = OSSL_QUIC_ERR_APPLICATION_ERROR; 1998 pf->reason = NULL; 1999 pf->reason_len = 0; 2000 } 2001 2002 if (ossl_quic_wire_encode_frame_conn_close(wpkt, pf) 2003 && WPACKET_get_total_written(wpkt, &l) 2004 && try_commit_conn_close(txp, l)) { 2005 if (!tx_helper_commit(h)) 2006 return 0; 2007 2008 tpkt->had_conn_close = 1; 2009 *can_be_non_inflight = 0; 2010 } else { 2011 tx_helper_rollback(h); 2012 } 2013 } 2014 2015 return 1; 2016 } 2017 2018 static int try_len(size_t space_left, size_t orig_len, 2019 size_t base_hdr_len, size_t lenbytes, 2020 uint64_t maxn, size_t *hdr_len, size_t *payload_len) 2021 { 2022 size_t n; 2023 size_t maxn_ = maxn > SIZE_MAX ? SIZE_MAX : (size_t)maxn; 2024 2025 *hdr_len = base_hdr_len + lenbytes; 2026 2027 if (orig_len == 0 && space_left >= *hdr_len) { 2028 *payload_len = 0; 2029 return 1; 2030 } 2031 2032 n = orig_len; 2033 if (n > maxn_) 2034 n = maxn_; 2035 if (n + *hdr_len > space_left) 2036 n = (space_left >= *hdr_len) ? space_left - *hdr_len : 0; 2037 2038 *payload_len = n; 2039 return n > 0; 2040 } 2041 2042 static int determine_len(size_t space_left, size_t orig_len, 2043 size_t base_hdr_len, 2044 uint64_t *hlen, uint64_t *len) 2045 { 2046 int ok = 0; 2047 size_t chosen_payload_len = 0; 2048 size_t chosen_hdr_len = 0; 2049 size_t payload_len[4], hdr_len[4]; 2050 int i, valid[4] = {0}; 2051 2052 valid[0] = try_len(space_left, orig_len, base_hdr_len, 2053 1, OSSL_QUIC_VLINT_1B_MAX, 2054 &hdr_len[0], &payload_len[0]); 2055 valid[1] = try_len(space_left, orig_len, base_hdr_len, 2056 2, OSSL_QUIC_VLINT_2B_MAX, 2057 &hdr_len[1], &payload_len[1]); 2058 valid[2] = try_len(space_left, orig_len, base_hdr_len, 2059 4, OSSL_QUIC_VLINT_4B_MAX, 2060 &hdr_len[2], &payload_len[2]); 2061 valid[3] = try_len(space_left, orig_len, base_hdr_len, 2062 8, OSSL_QUIC_VLINT_8B_MAX, 2063 &hdr_len[3], &payload_len[3]); 2064 2065 for (i = OSSL_NELEM(valid) - 1; i >= 0; --i) 2066 if (valid[i] && payload_len[i] >= chosen_payload_len) { 2067 chosen_payload_len = payload_len[i]; 2068 chosen_hdr_len = hdr_len[i]; 2069 ok = 1; 2070 } 2071 2072 *hlen = chosen_hdr_len; 2073 *len = chosen_payload_len; 2074 return ok; 2075 } 2076 2077 /* 2078 * Given a CRYPTO frame header with accurate chdr->len and a budget 2079 * (space_left), try to find the optimal value of chdr->len to fill as much of 2080 * the budget as possible. This is slightly hairy because larger values of 2081 * chdr->len cause larger encoded sizes of the length field of the frame, which 2082 * in turn mean less space available for payload data. We check all possible 2083 * encodings and choose the optimal encoding. 2084 */ 2085 static int determine_crypto_len(struct tx_helper *h, 2086 OSSL_QUIC_FRAME_CRYPTO *chdr, 2087 size_t space_left, 2088 uint64_t *hlen, 2089 uint64_t *len) 2090 { 2091 size_t orig_len; 2092 size_t base_hdr_len; /* CRYPTO header length without length field */ 2093 2094 if (chdr->len > SIZE_MAX) 2095 return 0; 2096 2097 orig_len = (size_t)chdr->len; 2098 2099 chdr->len = 0; 2100 base_hdr_len = ossl_quic_wire_get_encoded_frame_len_crypto_hdr(chdr); 2101 chdr->len = orig_len; 2102 if (base_hdr_len == 0) 2103 return 0; 2104 2105 --base_hdr_len; 2106 2107 return determine_len(space_left, orig_len, base_hdr_len, hlen, len); 2108 } 2109 2110 static int determine_stream_len(struct tx_helper *h, 2111 OSSL_QUIC_FRAME_STREAM *shdr, 2112 size_t space_left, 2113 uint64_t *hlen, 2114 uint64_t *len) 2115 { 2116 size_t orig_len; 2117 size_t base_hdr_len; /* STREAM header length without length field */ 2118 2119 if (shdr->len > SIZE_MAX) 2120 return 0; 2121 2122 orig_len = (size_t)shdr->len; 2123 2124 shdr->len = 0; 2125 base_hdr_len = ossl_quic_wire_get_encoded_frame_len_stream_hdr(shdr); 2126 shdr->len = orig_len; 2127 if (base_hdr_len == 0) 2128 return 0; 2129 2130 if (shdr->has_explicit_len) 2131 --base_hdr_len; 2132 2133 return determine_len(space_left, orig_len, base_hdr_len, hlen, len); 2134 } 2135 2136 static int txp_generate_crypto_frames(OSSL_QUIC_TX_PACKETISER *txp, 2137 struct txp_pkt *pkt, 2138 int *have_ack_eliciting) 2139 { 2140 const uint32_t enc_level = pkt->h.enc_level; 2141 const uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level); 2142 QUIC_TXPIM_PKT *tpkt = pkt->tpkt; 2143 struct tx_helper *h = &pkt->h; 2144 size_t num_stream_iovec; 2145 OSSL_QUIC_FRAME_STREAM shdr = {0}; 2146 OSSL_QUIC_FRAME_CRYPTO chdr = {0}; 2147 OSSL_QTX_IOVEC iov[2]; 2148 uint64_t hdr_bytes; 2149 WPACKET *wpkt; 2150 QUIC_TXPIM_CHUNK chunk = {0}; 2151 size_t i, space_left; 2152 2153 for (i = 0;; ++i) { 2154 space_left = tx_helper_get_space_left(h); 2155 2156 if (space_left < MIN_FRAME_SIZE_CRYPTO) 2157 return 1; /* no point trying */ 2158 2159 /* Do we have any CRYPTO data waiting? */ 2160 num_stream_iovec = OSSL_NELEM(iov); 2161 if (!ossl_quic_sstream_get_stream_frame(txp->args.crypto[pn_space], 2162 i, &shdr, iov, 2163 &num_stream_iovec)) 2164 return 1; /* nothing to do */ 2165 2166 /* Convert STREAM frame header to CRYPTO frame header */ 2167 chdr.offset = shdr.offset; 2168 chdr.len = shdr.len; 2169 2170 if (chdr.len == 0) 2171 return 1; /* nothing to do */ 2172 2173 /* Find best fit (header length, payload length) combination. */ 2174 if (!determine_crypto_len(h, &chdr, space_left, &hdr_bytes, 2175 &chdr.len)) 2176 return 1; /* can't fit anything */ 2177 2178 /* 2179 * Truncate IOVs to match our chosen length. 2180 * 2181 * The length cannot be more than SIZE_MAX because this length comes 2182 * from our send stream buffer. 2183 */ 2184 ossl_quic_sstream_adjust_iov((size_t)chdr.len, iov, num_stream_iovec); 2185 2186 /* 2187 * Ensure we have enough iovecs allocated (1 for the header, up to 2 for 2188 * the stream data.) 2189 */ 2190 if (!txp_el_ensure_iovec(&txp->el[enc_level], h->num_iovec + 3)) 2191 return 0; /* alloc error */ 2192 2193 /* Encode the header. */ 2194 wpkt = tx_helper_begin(h); 2195 if (wpkt == NULL) 2196 return 0; /* alloc error */ 2197 2198 if (!ossl_quic_wire_encode_frame_crypto_hdr(wpkt, &chdr)) { 2199 tx_helper_rollback(h); 2200 return 1; /* can't fit */ 2201 } 2202 2203 if (!tx_helper_commit(h)) 2204 return 0; /* alloc error */ 2205 2206 /* Add payload iovecs to the helper (infallible). */ 2207 for (i = 0; i < num_stream_iovec; ++i) 2208 tx_helper_append_iovec(h, iov[i].buf, iov[i].buf_len); 2209 2210 *have_ack_eliciting = 1; 2211 tx_helper_unrestrict(h); /* no longer need PING */ 2212 2213 /* Log chunk to TXPIM. */ 2214 chunk.stream_id = UINT64_MAX; /* crypto stream */ 2215 chunk.start = chdr.offset; 2216 chunk.end = chdr.offset + chdr.len - 1; 2217 chunk.has_fin = 0; /* Crypto stream never ends */ 2218 if (!ossl_quic_txpim_pkt_append_chunk(tpkt, &chunk)) 2219 return 0; /* alloc error */ 2220 } 2221 } 2222 2223 struct chunk_info { 2224 OSSL_QUIC_FRAME_STREAM shdr; 2225 uint64_t orig_len; 2226 OSSL_QTX_IOVEC iov[2]; 2227 size_t num_stream_iovec; 2228 int valid; 2229 }; 2230 2231 static int txp_plan_stream_chunk(OSSL_QUIC_TX_PACKETISER *txp, 2232 struct tx_helper *h, 2233 QUIC_SSTREAM *sstream, 2234 QUIC_TXFC *stream_txfc, 2235 size_t skip, 2236 struct chunk_info *chunk, 2237 uint64_t consumed) 2238 { 2239 uint64_t fc_credit, fc_swm, fc_limit; 2240 2241 chunk->num_stream_iovec = OSSL_NELEM(chunk->iov); 2242 chunk->valid = ossl_quic_sstream_get_stream_frame(sstream, skip, 2243 &chunk->shdr, 2244 chunk->iov, 2245 &chunk->num_stream_iovec); 2246 if (!chunk->valid) 2247 return 1; 2248 2249 if (!ossl_assert(chunk->shdr.len > 0 || chunk->shdr.is_fin)) 2250 /* Should only have 0-length chunk if FIN */ 2251 return 0; 2252 2253 chunk->orig_len = chunk->shdr.len; 2254 2255 /* Clamp according to connection and stream-level TXFC. */ 2256 fc_credit = ossl_quic_txfc_get_credit(stream_txfc, consumed); 2257 fc_swm = ossl_quic_txfc_get_swm(stream_txfc); 2258 fc_limit = fc_swm + fc_credit; 2259 2260 if (chunk->shdr.len > 0 && chunk->shdr.offset + chunk->shdr.len > fc_limit) { 2261 chunk->shdr.len = (fc_limit <= chunk->shdr.offset) 2262 ? 0 : fc_limit - chunk->shdr.offset; 2263 chunk->shdr.is_fin = 0; 2264 } 2265 2266 if (chunk->shdr.len == 0 && !chunk->shdr.is_fin) { 2267 /* 2268 * Nothing to do due to TXFC. Since SSTREAM returns chunks in ascending 2269 * order of offset we don't need to check any later chunks, so stop 2270 * iterating here. 2271 */ 2272 chunk->valid = 0; 2273 return 1; 2274 } 2275 2276 return 1; 2277 } 2278 2279 /* 2280 * Returns 0 on fatal error (e.g. allocation failure), 1 on success. 2281 * *packet_full is set to 1 if there is no longer enough room for another STREAM 2282 * frame. 2283 */ 2284 static int txp_generate_stream_frames(OSSL_QUIC_TX_PACKETISER *txp, 2285 struct txp_pkt *pkt, 2286 uint64_t id, 2287 QUIC_SSTREAM *sstream, 2288 QUIC_TXFC *stream_txfc, 2289 QUIC_STREAM *next_stream, 2290 int *have_ack_eliciting, 2291 int *packet_full, 2292 uint64_t *new_credit_consumed, 2293 uint64_t conn_consumed) 2294 { 2295 int rc = 0; 2296 struct chunk_info chunks[2] = {0}; 2297 const uint32_t enc_level = pkt->h.enc_level; 2298 QUIC_TXPIM_PKT *tpkt = pkt->tpkt; 2299 struct tx_helper *h = &pkt->h; 2300 OSSL_QUIC_FRAME_STREAM *shdr; 2301 WPACKET *wpkt; 2302 QUIC_TXPIM_CHUNK chunk; 2303 size_t i, j, space_left; 2304 int can_fill_payload, use_explicit_len; 2305 int could_have_following_chunk; 2306 uint64_t orig_len; 2307 uint64_t hdr_len_implicit, payload_len_implicit; 2308 uint64_t hdr_len_explicit, payload_len_explicit; 2309 uint64_t fc_swm, fc_new_hwm; 2310 2311 fc_swm = ossl_quic_txfc_get_swm(stream_txfc); 2312 fc_new_hwm = fc_swm; 2313 2314 /* 2315 * Load the first two chunks if any offered by the send stream. We retrieve 2316 * the next chunk in advance so we can determine if we need to send any more 2317 * chunks from the same stream after this one, which is needed when 2318 * determining when we can use an implicit length in a STREAM frame. 2319 */ 2320 for (i = 0; i < 2; ++i) { 2321 if (!txp_plan_stream_chunk(txp, h, sstream, stream_txfc, i, &chunks[i], 2322 conn_consumed)) 2323 goto err; 2324 2325 if (i == 0 && !chunks[i].valid) { 2326 /* No chunks, nothing to do. */ 2327 rc = 1; 2328 goto err; 2329 } 2330 chunks[i].shdr.stream_id = id; 2331 } 2332 2333 for (i = 0;; ++i) { 2334 space_left = tx_helper_get_space_left(h); 2335 2336 if (!chunks[i % 2].valid) { 2337 /* Out of chunks; we're done. */ 2338 rc = 1; 2339 goto err; 2340 } 2341 2342 if (space_left < MIN_FRAME_SIZE_STREAM) { 2343 *packet_full = 1; 2344 rc = 1; 2345 goto err; 2346 } 2347 2348 if (!ossl_assert(!h->done_implicit)) 2349 /* 2350 * Logic below should have ensured we didn't append an 2351 * implicit-length unless we filled the packet or didn't have 2352 * another stream to handle, so this should not be possible. 2353 */ 2354 goto err; 2355 2356 shdr = &chunks[i % 2].shdr; 2357 orig_len = chunks[i % 2].orig_len; 2358 if (i > 0) 2359 /* Load next chunk for lookahead. */ 2360 if (!txp_plan_stream_chunk(txp, h, sstream, stream_txfc, i + 1, 2361 &chunks[(i + 1) % 2], conn_consumed)) 2362 goto err; 2363 2364 /* 2365 * Find best fit (header length, payload length) combination for if we 2366 * use an implicit length. 2367 */ 2368 shdr->has_explicit_len = 0; 2369 hdr_len_implicit = payload_len_implicit = 0; 2370 if (!determine_stream_len(h, shdr, space_left, 2371 &hdr_len_implicit, &payload_len_implicit)) { 2372 *packet_full = 1; 2373 rc = 1; 2374 goto err; /* can't fit anything */ 2375 } 2376 2377 /* 2378 * If there is a next stream, we don't use the implicit length so we can 2379 * add more STREAM frames after this one, unless there is enough data 2380 * for this STREAM frame to fill the packet. 2381 */ 2382 can_fill_payload = (hdr_len_implicit + payload_len_implicit 2383 >= space_left); 2384 2385 /* 2386 * Is there is a stream after this one, or another chunk pending 2387 * transmission in this stream? 2388 */ 2389 could_have_following_chunk 2390 = (next_stream != NULL || chunks[(i + 1) % 2].valid); 2391 2392 /* Choose between explicit or implicit length representations. */ 2393 use_explicit_len = !((can_fill_payload || !could_have_following_chunk) 2394 && !pkt->force_pad); 2395 2396 if (use_explicit_len) { 2397 /* 2398 * Find best fit (header length, payload length) combination for if 2399 * we use an explicit length. 2400 */ 2401 shdr->has_explicit_len = 1; 2402 hdr_len_explicit = payload_len_explicit = 0; 2403 if (!determine_stream_len(h, shdr, space_left, 2404 &hdr_len_explicit, &payload_len_explicit)) { 2405 *packet_full = 1; 2406 rc = 1; 2407 goto err; /* can't fit anything */ 2408 } 2409 2410 shdr->len = payload_len_explicit; 2411 } else { 2412 *packet_full = 1; 2413 shdr->has_explicit_len = 0; 2414 shdr->len = payload_len_implicit; 2415 } 2416 2417 /* If this is a FIN, don't keep filling the packet with more FINs. */ 2418 if (shdr->is_fin) 2419 chunks[(i + 1) % 2].valid = 0; 2420 2421 /* 2422 * We are now committed to our length (shdr->len can't change). 2423 * If we truncated the chunk, clear the FIN bit. 2424 */ 2425 if (shdr->len < orig_len) 2426 shdr->is_fin = 0; 2427 2428 /* Truncate IOVs to match our chosen length. */ 2429 ossl_quic_sstream_adjust_iov((size_t)shdr->len, chunks[i % 2].iov, 2430 chunks[i % 2].num_stream_iovec); 2431 2432 /* 2433 * Ensure we have enough iovecs allocated (1 for the header, up to 2 for 2434 * the stream data.) 2435 */ 2436 if (!txp_el_ensure_iovec(&txp->el[enc_level], h->num_iovec + 3)) 2437 goto err; /* alloc error */ 2438 2439 /* Encode the header. */ 2440 wpkt = tx_helper_begin(h); 2441 if (wpkt == NULL) 2442 goto err; /* alloc error */ 2443 2444 if (!ossl_assert(ossl_quic_wire_encode_frame_stream_hdr(wpkt, shdr))) { 2445 /* (Should not be possible.) */ 2446 tx_helper_rollback(h); 2447 *packet_full = 1; 2448 rc = 1; 2449 goto err; /* can't fit */ 2450 } 2451 2452 if (!tx_helper_commit(h)) 2453 goto err; /* alloc error */ 2454 2455 /* Add payload iovecs to the helper (infallible). */ 2456 for (j = 0; j < chunks[i % 2].num_stream_iovec; ++j) 2457 tx_helper_append_iovec(h, chunks[i % 2].iov[j].buf, 2458 chunks[i % 2].iov[j].buf_len); 2459 2460 *have_ack_eliciting = 1; 2461 tx_helper_unrestrict(h); /* no longer need PING */ 2462 if (!shdr->has_explicit_len) 2463 h->done_implicit = 1; 2464 2465 /* Log new TXFC credit which was consumed. */ 2466 if (shdr->len > 0 && shdr->offset + shdr->len > fc_new_hwm) 2467 fc_new_hwm = shdr->offset + shdr->len; 2468 2469 /* Log chunk to TXPIM. */ 2470 chunk.stream_id = shdr->stream_id; 2471 chunk.start = shdr->offset; 2472 chunk.end = shdr->offset + shdr->len - 1; 2473 chunk.has_fin = shdr->is_fin; 2474 chunk.has_stop_sending = 0; 2475 chunk.has_reset_stream = 0; 2476 if (!ossl_quic_txpim_pkt_append_chunk(tpkt, &chunk)) 2477 goto err; /* alloc error */ 2478 2479 if (shdr->len < orig_len) { 2480 /* 2481 * If we did not serialize all of this chunk we definitely do not 2482 * want to try the next chunk 2483 */ 2484 rc = 1; 2485 goto err; 2486 } 2487 } 2488 2489 err: 2490 *new_credit_consumed = fc_new_hwm - fc_swm; 2491 return rc; 2492 } 2493 2494 static void txp_enlink_tmp(QUIC_STREAM **tmp_head, QUIC_STREAM *stream) 2495 { 2496 stream->txp_next = *tmp_head; 2497 *tmp_head = stream; 2498 } 2499 2500 static int txp_generate_stream_related(OSSL_QUIC_TX_PACKETISER *txp, 2501 struct txp_pkt *pkt, 2502 int *have_ack_eliciting, 2503 QUIC_STREAM **tmp_head) 2504 { 2505 QUIC_STREAM_ITER it; 2506 WPACKET *wpkt; 2507 uint64_t cwm; 2508 QUIC_STREAM *stream, *snext; 2509 struct tx_helper *h = &pkt->h; 2510 uint64_t conn_consumed = 0; 2511 2512 for (ossl_quic_stream_iter_init(&it, txp->args.qsm, 1); 2513 it.stream != NULL;) { 2514 2515 stream = it.stream; 2516 ossl_quic_stream_iter_next(&it); 2517 snext = it.stream; 2518 2519 stream->txp_sent_fc = 0; 2520 stream->txp_sent_stop_sending = 0; 2521 stream->txp_sent_reset_stream = 0; 2522 stream->txp_blocked = 0; 2523 stream->txp_txfc_new_credit_consumed = 0; 2524 2525 /* Stream Abort Frames (STOP_SENDING, RESET_STREAM) */ 2526 if (stream->want_stop_sending) { 2527 OSSL_QUIC_FRAME_STOP_SENDING f; 2528 2529 wpkt = tx_helper_begin(h); 2530 if (wpkt == NULL) 2531 return 0; /* alloc error */ 2532 2533 f.stream_id = stream->id; 2534 f.app_error_code = stream->stop_sending_aec; 2535 if (!ossl_quic_wire_encode_frame_stop_sending(wpkt, &f)) { 2536 tx_helper_rollback(h); /* can't fit */ 2537 txp_enlink_tmp(tmp_head, stream); 2538 break; 2539 } 2540 2541 if (!tx_helper_commit(h)) 2542 return 0; /* alloc error */ 2543 2544 *have_ack_eliciting = 1; 2545 tx_helper_unrestrict(h); /* no longer need PING */ 2546 stream->txp_sent_stop_sending = 1; 2547 } 2548 2549 if (stream->want_reset_stream) { 2550 OSSL_QUIC_FRAME_RESET_STREAM f; 2551 2552 if (!ossl_assert(stream->send_state == QUIC_SSTREAM_STATE_RESET_SENT)) 2553 return 0; 2554 2555 wpkt = tx_helper_begin(h); 2556 if (wpkt == NULL) 2557 return 0; /* alloc error */ 2558 2559 f.stream_id = stream->id; 2560 f.app_error_code = stream->reset_stream_aec; 2561 if (!ossl_quic_stream_send_get_final_size(stream, &f.final_size)) 2562 return 0; /* should not be possible */ 2563 2564 if (!ossl_quic_wire_encode_frame_reset_stream(wpkt, &f)) { 2565 tx_helper_rollback(h); /* can't fit */ 2566 txp_enlink_tmp(tmp_head, stream); 2567 break; 2568 } 2569 2570 if (!tx_helper_commit(h)) 2571 return 0; /* alloc error */ 2572 2573 *have_ack_eliciting = 1; 2574 tx_helper_unrestrict(h); /* no longer need PING */ 2575 stream->txp_sent_reset_stream = 1; 2576 2577 /* 2578 * The final size of the stream as indicated by RESET_STREAM is used 2579 * to ensure a consistent view of flow control state by both 2580 * parties; if we happen to send a RESET_STREAM that consumes more 2581 * flow control credit, make sure we account for that. 2582 */ 2583 if (!ossl_assert(f.final_size <= ossl_quic_txfc_get_swm(&stream->txfc))) 2584 return 0; 2585 2586 stream->txp_txfc_new_credit_consumed 2587 = f.final_size - ossl_quic_txfc_get_swm(&stream->txfc); 2588 } 2589 2590 /* 2591 * Stream Flow Control Frames (MAX_STREAM_DATA) 2592 * 2593 * RFC 9000 s. 13.3: "An endpoint SHOULD stop sending MAX_STREAM_DATA 2594 * frames when the receiving part of the stream enters a "Size Known" or 2595 * "Reset Recvd" state." -- In practice, RECV is the only state 2596 * in which it makes sense to generate more MAX_STREAM_DATA frames. 2597 */ 2598 if (stream->recv_state == QUIC_RSTREAM_STATE_RECV 2599 && (stream->want_max_stream_data 2600 || ossl_quic_rxfc_has_cwm_changed(&stream->rxfc, 0))) { 2601 2602 wpkt = tx_helper_begin(h); 2603 if (wpkt == NULL) 2604 return 0; /* alloc error */ 2605 2606 cwm = ossl_quic_rxfc_get_cwm(&stream->rxfc); 2607 2608 if (!ossl_quic_wire_encode_frame_max_stream_data(wpkt, stream->id, 2609 cwm)) { 2610 tx_helper_rollback(h); /* can't fit */ 2611 txp_enlink_tmp(tmp_head, stream); 2612 break; 2613 } 2614 2615 if (!tx_helper_commit(h)) 2616 return 0; /* alloc error */ 2617 2618 *have_ack_eliciting = 1; 2619 tx_helper_unrestrict(h); /* no longer need PING */ 2620 stream->txp_sent_fc = 1; 2621 } 2622 2623 /* 2624 * Stream Data Frames (STREAM) 2625 * 2626 * RFC 9000 s. 3.3: A sender MUST NOT send a STREAM [...] frame for a 2627 * stream in the "Reset Sent" state [or any terminal state]. We don't 2628 * send any more STREAM frames if we are sending, have sent, or are 2629 * planning to send, RESET_STREAM. The other terminal state is Data 2630 * Recvd, but txp_generate_stream_frames() is guaranteed to generate 2631 * nothing in this case. 2632 */ 2633 if (ossl_quic_stream_has_send_buffer(stream) 2634 && !ossl_quic_stream_send_is_reset(stream)) { 2635 int packet_full = 0; 2636 2637 if (!ossl_assert(!stream->want_reset_stream)) 2638 return 0; 2639 2640 if (!txp_generate_stream_frames(txp, pkt, 2641 stream->id, stream->sstream, 2642 &stream->txfc, 2643 snext, 2644 have_ack_eliciting, 2645 &packet_full, 2646 &stream->txp_txfc_new_credit_consumed, 2647 conn_consumed)) { 2648 /* Fatal error (allocation, etc.) */ 2649 txp_enlink_tmp(tmp_head, stream); 2650 return 0; 2651 } 2652 conn_consumed += stream->txp_txfc_new_credit_consumed; 2653 2654 if (packet_full) { 2655 txp_enlink_tmp(tmp_head, stream); 2656 break; 2657 } 2658 } 2659 2660 txp_enlink_tmp(tmp_head, stream); 2661 } 2662 2663 return 1; 2664 } 2665 2666 static int txp_generate_for_el(OSSL_QUIC_TX_PACKETISER *txp, 2667 struct txp_pkt *pkt, 2668 int chosen_for_conn_close) 2669 { 2670 int rc = TXP_ERR_SUCCESS; 2671 const uint32_t enc_level = pkt->h.enc_level; 2672 const uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level); 2673 int have_ack_eliciting = 0, done_pre_token = 0; 2674 const struct archetype_data a = pkt->geom.adata; 2675 /* 2676 * Cleared if we encode any non-ACK-eliciting frame type which rules out the 2677 * packet being a non-inflight frame. This means any non-ACK ACK-eliciting 2678 * frame, even PADDING frames. ACK eliciting frames always cause a packet to 2679 * become ineligible for non-inflight treatment so it is not necessary to 2680 * clear this in cases where have_ack_eliciting is set, as it is ignored in 2681 * that case. 2682 */ 2683 int can_be_non_inflight = 1; 2684 QUIC_CFQ_ITEM *cfq_item; 2685 QUIC_TXPIM_PKT *tpkt = NULL; 2686 struct tx_helper *h = &pkt->h; 2687 2688 /* Maximum PN reached? */ 2689 if (!ossl_quic_pn_valid(txp->next_pn[pn_space])) 2690 goto fatal_err; 2691 2692 if (!ossl_assert(pkt->tpkt == NULL)) 2693 goto fatal_err; 2694 2695 if ((pkt->tpkt = tpkt = ossl_quic_txpim_pkt_alloc(txp->args.txpim)) == NULL) 2696 goto fatal_err; 2697 2698 /* 2699 * Frame Serialization 2700 * =================== 2701 * 2702 * We now serialize frames into the packet in descending order of priority. 2703 */ 2704 2705 /* HANDSHAKE_DONE (Regenerate) */ 2706 if (a.allow_handshake_done && txp->want_handshake_done 2707 && tx_helper_get_space_left(h) >= MIN_FRAME_SIZE_HANDSHAKE_DONE) { 2708 WPACKET *wpkt = tx_helper_begin(h); 2709 2710 if (wpkt == NULL) 2711 goto fatal_err; 2712 2713 if (ossl_quic_wire_encode_frame_handshake_done(wpkt)) { 2714 tpkt->had_handshake_done_frame = 1; 2715 have_ack_eliciting = 1; 2716 2717 if (!tx_helper_commit(h)) 2718 goto fatal_err; 2719 2720 tx_helper_unrestrict(h); /* no longer need PING */ 2721 } else { 2722 tx_helper_rollback(h); 2723 } 2724 } 2725 2726 /* MAX_DATA (Regenerate) */ 2727 if (a.allow_conn_fc 2728 && (txp->want_max_data 2729 || ossl_quic_rxfc_has_cwm_changed(txp->args.conn_rxfc, 0)) 2730 && tx_helper_get_space_left(h) >= MIN_FRAME_SIZE_MAX_DATA) { 2731 WPACKET *wpkt = tx_helper_begin(h); 2732 uint64_t cwm = ossl_quic_rxfc_get_cwm(txp->args.conn_rxfc); 2733 2734 if (wpkt == NULL) 2735 goto fatal_err; 2736 2737 if (ossl_quic_wire_encode_frame_max_data(wpkt, cwm)) { 2738 tpkt->had_max_data_frame = 1; 2739 have_ack_eliciting = 1; 2740 2741 if (!tx_helper_commit(h)) 2742 goto fatal_err; 2743 2744 tx_helper_unrestrict(h); /* no longer need PING */ 2745 } else { 2746 tx_helper_rollback(h); 2747 } 2748 } 2749 2750 /* MAX_STREAMS_BIDI (Regenerate) */ 2751 if (a.allow_conn_fc 2752 && (txp->want_max_streams_bidi 2753 || ossl_quic_rxfc_has_cwm_changed(txp->args.max_streams_bidi_rxfc, 0)) 2754 && tx_helper_get_space_left(h) >= MIN_FRAME_SIZE_MAX_STREAMS_BIDI) { 2755 WPACKET *wpkt = tx_helper_begin(h); 2756 uint64_t max_streams 2757 = ossl_quic_rxfc_get_cwm(txp->args.max_streams_bidi_rxfc); 2758 2759 if (wpkt == NULL) 2760 goto fatal_err; 2761 2762 if (ossl_quic_wire_encode_frame_max_streams(wpkt, /*is_uni=*/0, 2763 max_streams)) { 2764 tpkt->had_max_streams_bidi_frame = 1; 2765 have_ack_eliciting = 1; 2766 2767 if (!tx_helper_commit(h)) 2768 goto fatal_err; 2769 2770 tx_helper_unrestrict(h); /* no longer need PING */ 2771 } else { 2772 tx_helper_rollback(h); 2773 } 2774 } 2775 2776 /* MAX_STREAMS_UNI (Regenerate) */ 2777 if (a.allow_conn_fc 2778 && (txp->want_max_streams_uni 2779 || ossl_quic_rxfc_has_cwm_changed(txp->args.max_streams_uni_rxfc, 0)) 2780 && tx_helper_get_space_left(h) >= MIN_FRAME_SIZE_MAX_STREAMS_UNI) { 2781 WPACKET *wpkt = tx_helper_begin(h); 2782 uint64_t max_streams 2783 = ossl_quic_rxfc_get_cwm(txp->args.max_streams_uni_rxfc); 2784 2785 if (wpkt == NULL) 2786 goto fatal_err; 2787 2788 if (ossl_quic_wire_encode_frame_max_streams(wpkt, /*is_uni=*/1, 2789 max_streams)) { 2790 tpkt->had_max_streams_uni_frame = 1; 2791 have_ack_eliciting = 1; 2792 2793 if (!tx_helper_commit(h)) 2794 goto fatal_err; 2795 2796 tx_helper_unrestrict(h); /* no longer need PING */ 2797 } else { 2798 tx_helper_rollback(h); 2799 } 2800 } 2801 2802 /* GCR Frames */ 2803 for (cfq_item = ossl_quic_cfq_get_priority_head(txp->args.cfq, pn_space); 2804 cfq_item != NULL; 2805 cfq_item = ossl_quic_cfq_item_get_priority_next(cfq_item, pn_space)) { 2806 uint64_t frame_type = ossl_quic_cfq_item_get_frame_type(cfq_item); 2807 const unsigned char *encoded = ossl_quic_cfq_item_get_encoded(cfq_item); 2808 size_t encoded_len = ossl_quic_cfq_item_get_encoded_len(cfq_item); 2809 2810 switch (frame_type) { 2811 case OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID: 2812 if (!a.allow_new_conn_id) 2813 continue; 2814 break; 2815 case OSSL_QUIC_FRAME_TYPE_RETIRE_CONN_ID: 2816 if (!a.allow_retire_conn_id) 2817 continue; 2818 break; 2819 case OSSL_QUIC_FRAME_TYPE_NEW_TOKEN: 2820 if (!a.allow_new_token) 2821 continue; 2822 2823 /* 2824 * NEW_TOKEN frames are handled via GCR, but some 2825 * Regenerate-strategy frames should come before them (namely 2826 * ACK, CONNECTION_CLOSE, PATH_CHALLENGE and PATH_RESPONSE). If 2827 * we find a NEW_TOKEN frame, do these now. If there are no 2828 * NEW_TOKEN frames in the GCR queue we will handle these below. 2829 */ 2830 if (!done_pre_token) 2831 if (txp_generate_pre_token(txp, pkt, 2832 chosen_for_conn_close, 2833 &can_be_non_inflight)) 2834 done_pre_token = 1; 2835 2836 break; 2837 case OSSL_QUIC_FRAME_TYPE_PATH_RESPONSE: 2838 if (!a.allow_path_response) 2839 continue; 2840 2841 /* 2842 * RFC 9000 s. 8.2.2: An endpoint MUST expand datagrams that 2843 * contain a PATH_RESPONSE frame to at least the smallest 2844 * allowed maximum datagram size of 1200 bytes. 2845 */ 2846 pkt->force_pad = 1; 2847 break; 2848 default: 2849 if (!a.allow_cfq_other) 2850 continue; 2851 break; 2852 } 2853 2854 /* 2855 * If the frame is too big, don't try to schedule any more GCR frames in 2856 * this packet rather than sending subsequent ones out of order. 2857 */ 2858 if (encoded_len > tx_helper_get_space_left(h)) 2859 break; 2860 2861 if (!tx_helper_append_iovec(h, encoded, encoded_len)) 2862 goto fatal_err; 2863 2864 ossl_quic_txpim_pkt_add_cfq_item(tpkt, cfq_item); 2865 2866 if (ossl_quic_frame_type_is_ack_eliciting(frame_type)) { 2867 have_ack_eliciting = 1; 2868 tx_helper_unrestrict(h); /* no longer need PING */ 2869 } 2870 } 2871 2872 /* 2873 * If we didn't generate ACK, CONNECTION_CLOSE, PATH_CHALLENGE or 2874 * PATH_RESPONSE (as desired) before, do so now. 2875 */ 2876 if (!done_pre_token) 2877 if (txp_generate_pre_token(txp, pkt, 2878 chosen_for_conn_close, 2879 &can_be_non_inflight)) 2880 done_pre_token = 1; 2881 2882 /* CRYPTO Frames */ 2883 if (a.allow_crypto) 2884 if (!txp_generate_crypto_frames(txp, pkt, &have_ack_eliciting)) 2885 goto fatal_err; 2886 2887 /* Stream-specific frames */ 2888 if (a.allow_stream_rel && txp->handshake_complete) 2889 if (!txp_generate_stream_related(txp, pkt, 2890 &have_ack_eliciting, 2891 &pkt->stream_head)) 2892 goto fatal_err; 2893 2894 /* PING */ 2895 tx_helper_unrestrict(h); 2896 2897 if (!have_ack_eliciting && txp_need_ping(txp, pn_space, &a)) { 2898 WPACKET *wpkt; 2899 2900 assert(h->reserve > 0); 2901 wpkt = tx_helper_begin(h); 2902 if (wpkt == NULL) 2903 goto fatal_err; 2904 2905 if (!ossl_quic_wire_encode_frame_ping(wpkt) 2906 || !tx_helper_commit(h)) 2907 /* 2908 * We treat a request to be ACK-eliciting as a requirement, so this 2909 * is an error. 2910 */ 2911 goto fatal_err; 2912 2913 have_ack_eliciting = 1; 2914 } 2915 2916 /* PADDING is added by ossl_quic_tx_packetiser_generate(). */ 2917 2918 /* 2919 * ACKM Data 2920 * ========= 2921 */ 2922 if (have_ack_eliciting) 2923 can_be_non_inflight = 0; 2924 2925 /* ACKM Data */ 2926 tpkt->ackm_pkt.num_bytes = h->bytes_appended + pkt->geom.pkt_overhead; 2927 tpkt->ackm_pkt.pkt_num = txp->next_pn[pn_space]; 2928 /* largest_acked is set in txp_generate_pre_token */ 2929 tpkt->ackm_pkt.pkt_space = pn_space; 2930 tpkt->ackm_pkt.is_inflight = !can_be_non_inflight; 2931 tpkt->ackm_pkt.is_ack_eliciting = have_ack_eliciting; 2932 tpkt->ackm_pkt.is_pto_probe = 0; 2933 tpkt->ackm_pkt.is_mtu_probe = 0; 2934 tpkt->ackm_pkt.time = txp->args.now(txp->args.now_arg); 2935 tpkt->pkt_type = pkt->phdr.type; 2936 2937 /* Done. */ 2938 return rc; 2939 2940 fatal_err: 2941 /* 2942 * Handler for fatal errors, i.e. errors causing us to abort the entire 2943 * packet rather than just one frame. Examples of such errors include 2944 * allocation errors. 2945 */ 2946 if (tpkt != NULL) { 2947 ossl_quic_txpim_pkt_release(txp->args.txpim, tpkt); 2948 pkt->tpkt = NULL; 2949 } 2950 return TXP_ERR_INTERNAL; 2951 } 2952 2953 /* 2954 * Commits and queues a packet for transmission. There is no backing out after 2955 * this. 2956 * 2957 * This: 2958 * 2959 * - Sends the packet to the QTX for encryption and transmission; 2960 * 2961 * - Records the packet as having been transmitted in FIFM. ACKM is informed, 2962 * etc. and the TXPIM record is filed. 2963 * 2964 * - Informs various subsystems of frames that were sent and clears frame 2965 * wanted flags so that we do not generate the same frames again. 2966 * 2967 * Assumptions: 2968 * 2969 * - pkt is a txp_pkt for the correct EL; 2970 * 2971 * - pkt->tpkt is valid; 2972 * 2973 * - pkt->tpkt->ackm_pkt has been fully filled in; 2974 * 2975 * - Stream chunk records have been appended to pkt->tpkt for STREAM and 2976 * CRYPTO frames, but not for RESET_STREAM or STOP_SENDING frames; 2977 * 2978 * - The chosen stream list for the packet can be fully walked from 2979 * pkt->stream_head using stream->txp_next; 2980 * 2981 * - pkt->has_ack_eliciting is set correctly. 2982 * 2983 */ 2984 static int txp_pkt_commit(OSSL_QUIC_TX_PACKETISER *txp, 2985 struct txp_pkt *pkt, 2986 uint32_t archetype, 2987 int *txpim_pkt_reffed) 2988 { 2989 int rc = 1; 2990 uint32_t enc_level = pkt->h.enc_level; 2991 uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level); 2992 QUIC_TXPIM_PKT *tpkt = pkt->tpkt; 2993 QUIC_STREAM *stream; 2994 OSSL_QTX_PKT txpkt; 2995 struct archetype_data a; 2996 2997 *txpim_pkt_reffed = 0; 2998 2999 /* Cannot send a packet with an empty payload. */ 3000 if (pkt->h.bytes_appended == 0) 3001 return 0; 3002 3003 if (!txp_get_archetype_data(enc_level, archetype, &a)) 3004 return 0; 3005 3006 /* Packet Information for QTX */ 3007 txpkt.hdr = &pkt->phdr; 3008 txpkt.iovec = txp->el[enc_level].iovec; 3009 txpkt.num_iovec = pkt->h.num_iovec; 3010 txpkt.local = NULL; 3011 txpkt.peer = BIO_ADDR_family(&txp->args.peer) == AF_UNSPEC 3012 ? NULL : &txp->args.peer; 3013 txpkt.pn = txp->next_pn[pn_space]; 3014 txpkt.flags = OSSL_QTX_PKT_FLAG_COALESCE; /* always try to coalesce */ 3015 3016 /* Generate TXPIM chunks representing STOP_SENDING and RESET_STREAM frames. */ 3017 for (stream = pkt->stream_head; stream != NULL; stream = stream->txp_next) 3018 if (stream->txp_sent_stop_sending || stream->txp_sent_reset_stream) { 3019 /* Log STOP_SENDING/RESET_STREAM chunk to TXPIM. */ 3020 QUIC_TXPIM_CHUNK chunk; 3021 3022 chunk.stream_id = stream->id; 3023 chunk.start = UINT64_MAX; 3024 chunk.end = 0; 3025 chunk.has_fin = 0; 3026 chunk.has_stop_sending = stream->txp_sent_stop_sending; 3027 chunk.has_reset_stream = stream->txp_sent_reset_stream; 3028 if (!ossl_quic_txpim_pkt_append_chunk(tpkt, &chunk)) 3029 return 0; /* alloc error */ 3030 } 3031 3032 /* Dispatch to FIFD. */ 3033 if (!ossl_quic_fifd_pkt_commit(&txp->fifd, tpkt)) 3034 return 0; 3035 3036 /* 3037 * Transmission and Post-Packet Generation Bookkeeping 3038 * =================================================== 3039 * 3040 * No backing out anymore - at this point the ACKM has recorded the packet 3041 * as having been sent, so we need to increment our next PN counter, or 3042 * the ACKM will complain when we try to record a duplicate packet with 3043 * the same PN later. At this point actually sending the packet may still 3044 * fail. In this unlikely event it will simply be handled as though it 3045 * were a lost packet. 3046 */ 3047 ++txp->next_pn[pn_space]; 3048 *txpim_pkt_reffed = 1; 3049 3050 /* Send the packet. */ 3051 if (!ossl_qtx_write_pkt(txp->args.qtx, &txpkt)) 3052 return 0; 3053 3054 /* 3055 * Record FC and stream abort frames as sent; deactivate streams which no 3056 * longer have anything to do. 3057 */ 3058 for (stream = pkt->stream_head; stream != NULL; stream = stream->txp_next) { 3059 if (stream->txp_sent_fc) { 3060 stream->want_max_stream_data = 0; 3061 ossl_quic_rxfc_has_cwm_changed(&stream->rxfc, 1); 3062 } 3063 3064 if (stream->txp_sent_stop_sending) 3065 stream->want_stop_sending = 0; 3066 3067 if (stream->txp_sent_reset_stream) 3068 stream->want_reset_stream = 0; 3069 3070 if (stream->txp_txfc_new_credit_consumed > 0) { 3071 if (!ossl_assert(ossl_quic_txfc_consume_credit(&stream->txfc, 3072 stream->txp_txfc_new_credit_consumed))) 3073 /* 3074 * Should not be possible, but we should continue with our 3075 * bookkeeping as we have already committed the packet to the 3076 * FIFD. Just change the value we return. 3077 */ 3078 rc = 0; 3079 3080 stream->txp_txfc_new_credit_consumed = 0; 3081 } 3082 3083 /* 3084 * If we no longer need to generate any flow control (MAX_STREAM_DATA), 3085 * STOP_SENDING or RESET_STREAM frames, nor any STREAM frames (because 3086 * the stream is drained of data or TXFC-blocked), we can mark the 3087 * stream as inactive. 3088 */ 3089 ossl_quic_stream_map_update_state(txp->args.qsm, stream); 3090 3091 if (ossl_quic_stream_has_send_buffer(stream) 3092 && !ossl_quic_sstream_has_pending(stream->sstream) 3093 && ossl_quic_sstream_get_final_size(stream->sstream, NULL)) 3094 /* 3095 * Transition to DATA_SENT if stream has a final size and we have 3096 * sent all data. 3097 */ 3098 ossl_quic_stream_map_notify_all_data_sent(txp->args.qsm, stream); 3099 } 3100 3101 /* We have now sent the packet, so update state accordingly. */ 3102 if (tpkt->ackm_pkt.is_ack_eliciting) 3103 txp->force_ack_eliciting &= ~(1UL << pn_space); 3104 3105 if (tpkt->had_handshake_done_frame) 3106 txp->want_handshake_done = 0; 3107 3108 if (tpkt->had_max_data_frame) { 3109 txp->want_max_data = 0; 3110 ossl_quic_rxfc_has_cwm_changed(txp->args.conn_rxfc, 1); 3111 } 3112 3113 if (tpkt->had_max_streams_bidi_frame) { 3114 txp->want_max_streams_bidi = 0; 3115 ossl_quic_rxfc_has_cwm_changed(txp->args.max_streams_bidi_rxfc, 1); 3116 } 3117 3118 if (tpkt->had_max_streams_uni_frame) { 3119 txp->want_max_streams_uni = 0; 3120 ossl_quic_rxfc_has_cwm_changed(txp->args.max_streams_uni_rxfc, 1); 3121 } 3122 3123 if (tpkt->had_ack_frame) 3124 txp->want_ack &= ~(1UL << pn_space); 3125 3126 if (tpkt->had_conn_close) 3127 txp->want_conn_close = 0; 3128 3129 /* 3130 * Decrement probe request counts if we have sent a packet that meets 3131 * the requirement of a probe, namely being ACK-eliciting. 3132 */ 3133 if (tpkt->ackm_pkt.is_ack_eliciting) { 3134 OSSL_ACKM_PROBE_INFO *probe_info 3135 = ossl_ackm_get0_probe_request(txp->args.ackm); 3136 3137 if (enc_level == QUIC_ENC_LEVEL_INITIAL 3138 && probe_info->anti_deadlock_initial > 0) 3139 --probe_info->anti_deadlock_initial; 3140 3141 if (enc_level == QUIC_ENC_LEVEL_HANDSHAKE 3142 && probe_info->anti_deadlock_handshake > 0) 3143 --probe_info->anti_deadlock_handshake; 3144 3145 if (a.allow_force_ack_eliciting /* (i.e., not for 0-RTT) */ 3146 && probe_info->pto[pn_space] > 0) 3147 --probe_info->pto[pn_space]; 3148 } 3149 3150 return rc; 3151 } 3152 3153 /* Ensure the iovec array is at least num elements long. */ 3154 static int txp_el_ensure_iovec(struct txp_el *el, size_t num) 3155 { 3156 OSSL_QTX_IOVEC *iovec; 3157 3158 if (el->alloc_iovec >= num) 3159 return 1; 3160 3161 num = el->alloc_iovec != 0 ? el->alloc_iovec * 2 : 8; 3162 3163 iovec = OPENSSL_realloc(el->iovec, sizeof(OSSL_QTX_IOVEC) * num); 3164 if (iovec == NULL) 3165 return 0; 3166 3167 el->iovec = iovec; 3168 el->alloc_iovec = num; 3169 return 1; 3170 } 3171 3172 int ossl_quic_tx_packetiser_schedule_conn_close(OSSL_QUIC_TX_PACKETISER *txp, 3173 const OSSL_QUIC_FRAME_CONN_CLOSE *f) 3174 { 3175 char *reason = NULL; 3176 size_t reason_len = f->reason_len; 3177 size_t max_reason_len = txp_get_mdpl(txp) / 2; 3178 3179 if (txp->want_conn_close) 3180 return 0; 3181 3182 /* 3183 * Arbitrarily limit the length of the reason length string to half of the 3184 * MDPL. 3185 */ 3186 if (reason_len > max_reason_len) 3187 reason_len = max_reason_len; 3188 3189 if (reason_len > 0) { 3190 reason = OPENSSL_memdup(f->reason, reason_len); 3191 if (reason == NULL) 3192 return 0; 3193 } 3194 3195 txp->conn_close_frame = *f; 3196 txp->conn_close_frame.reason = reason; 3197 txp->conn_close_frame.reason_len = reason_len; 3198 txp->want_conn_close = 1; 3199 return 1; 3200 } 3201 3202 void ossl_quic_tx_packetiser_set_msg_callback(OSSL_QUIC_TX_PACKETISER *txp, 3203 ossl_msg_cb msg_callback, 3204 SSL *msg_callback_ssl) 3205 { 3206 txp->msg_callback = msg_callback; 3207 txp->msg_callback_ssl = msg_callback_ssl; 3208 } 3209 3210 void ossl_quic_tx_packetiser_set_msg_callback_arg(OSSL_QUIC_TX_PACKETISER *txp, 3211 void *msg_callback_arg) 3212 { 3213 txp->msg_callback_arg = msg_callback_arg; 3214 } 3215 3216 QUIC_PN ossl_quic_tx_packetiser_get_next_pn(OSSL_QUIC_TX_PACKETISER *txp, 3217 uint32_t pn_space) 3218 { 3219 if (pn_space >= QUIC_PN_SPACE_NUM) 3220 return UINT64_MAX; 3221 3222 return txp->next_pn[pn_space]; 3223 } 3224 3225 OSSL_TIME ossl_quic_tx_packetiser_get_deadline(OSSL_QUIC_TX_PACKETISER *txp) 3226 { 3227 /* 3228 * TXP-specific deadline computations which rely on TXP innards. This is in 3229 * turn relied on by the QUIC_CHANNEL code to determine the channel event 3230 * handling deadline. 3231 */ 3232 OSSL_TIME deadline = ossl_time_infinite(); 3233 uint32_t enc_level, pn_space; 3234 3235 /* 3236 * ACK generation is not CC-gated - packets containing only ACKs are allowed 3237 * to bypass CC. We want to generate ACK frames even if we are currently 3238 * restricted by CC so the peer knows we have received data. The generate 3239 * call will take care of selecting the correct packet archetype. 3240 */ 3241 for (enc_level = QUIC_ENC_LEVEL_INITIAL; 3242 enc_level < QUIC_ENC_LEVEL_NUM; 3243 ++enc_level) 3244 if (ossl_qtx_is_enc_level_provisioned(txp->args.qtx, enc_level)) { 3245 pn_space = ossl_quic_enc_level_to_pn_space(enc_level); 3246 deadline = ossl_time_min(deadline, 3247 ossl_ackm_get_ack_deadline(txp->args.ackm, pn_space)); 3248 } 3249 3250 /* When will CC let us send more? */ 3251 if (txp->args.cc_method->get_tx_allowance(txp->args.cc_data) == 0) 3252 deadline = ossl_time_min(deadline, 3253 txp->args.cc_method->get_wakeup_deadline(txp->args.cc_data)); 3254 3255 return deadline; 3256 } 3257