xref: /freebsd/crypto/openssl/ssl/quic/quic_record_rx.c (revision 88b8b7f0c4e9948667a2279e78e975a784049cba)
1 /*
2  * Copyright 2022-2025 The OpenSSL Project Authors. All Rights Reserved.
3  *
4  * Licensed under the Apache License 2.0 (the "License").  You may not use
5  * this file except in compliance with the License.  You can obtain a copy
6  * in the file LICENSE in the source distribution or at
7  * https://www.openssl.org/source/license.html
8  */
9 
10 #include <openssl/ssl.h>
11 #include "internal/quic_record_rx.h"
12 #include "quic_record_shared.h"
13 #include "internal/common.h"
14 #include "internal/list.h"
15 #include "../ssl_local.h"
16 
17 /*
18  * Mark a packet in a bitfield.
19  *
20  * pkt_idx: index of packet within datagram.
21  */
pkt_mark(uint64_t * bitf,size_t pkt_idx)22 static ossl_inline void pkt_mark(uint64_t *bitf, size_t pkt_idx)
23 {
24     assert(pkt_idx < QUIC_MAX_PKT_PER_URXE);
25     *bitf |= ((uint64_t)1) << pkt_idx;
26 }
27 
28 /* Returns 1 if a packet is in the bitfield. */
pkt_is_marked(const uint64_t * bitf,size_t pkt_idx)29 static ossl_inline int pkt_is_marked(const uint64_t *bitf, size_t pkt_idx)
30 {
31     assert(pkt_idx < QUIC_MAX_PKT_PER_URXE);
32     return (*bitf & (((uint64_t)1) << pkt_idx)) != 0;
33 }
34 
35 /*
36  * RXE
37  * ===
38  *
39  * RX Entries (RXEs) store processed (i.e., decrypted) data received from the
40  * network. One RXE is used per received QUIC packet.
41  */
42 typedef struct rxe_st RXE;
43 
44 struct rxe_st {
45     OSSL_QRX_PKT        pkt;
46     OSSL_LIST_MEMBER(rxe, RXE);
47     size_t              data_len, alloc_len, refcount;
48 
49     /* Extra fields for per-packet information. */
50     QUIC_PKT_HDR        hdr; /* data/len are decrypted payload */
51 
52     /* Decoded packet number. */
53     QUIC_PN             pn;
54 
55     /* Addresses copied from URXE. */
56     BIO_ADDR            peer, local;
57 
58     /* Time we received the packet (not when we processed it). */
59     OSSL_TIME           time;
60 
61     /* Total length of the datagram which contained this packet. */
62     size_t              datagram_len;
63 
64     /*
65      * The key epoch the packet was received with. Always 0 for non-1-RTT
66      * packets.
67      */
68     uint64_t            key_epoch;
69 
70     /*
71      * Monotonically increases with each datagram received.
72      * For diagnostic use only.
73      */
74     uint64_t            datagram_id;
75 
76     /*
77      * alloc_len allocated bytes (of which data_len bytes are valid) follow this
78      * structure.
79      */
80 };
81 
82 DEFINE_LIST_OF(rxe, RXE);
83 typedef OSSL_LIST(rxe) RXE_LIST;
84 
rxe_data(const RXE * e)85 static ossl_inline unsigned char *rxe_data(const RXE *e)
86 {
87     return (unsigned char *)(e + 1);
88 }
89 
90 /*
91  * QRL
92  * ===
93  */
94 struct ossl_qrx_st {
95     OSSL_LIB_CTX               *libctx;
96     const char                 *propq;
97 
98     /* Demux to receive datagrams from. */
99     QUIC_DEMUX                 *demux;
100 
101     /* Length of connection IDs used in short-header packets in bytes. */
102     size_t                      short_conn_id_len;
103 
104     /* Maximum number of deferred datagrams buffered at any one time. */
105     size_t                      max_deferred;
106 
107     /* Current count of deferred datagrams. */
108     size_t                      num_deferred;
109 
110     /*
111      * List of URXEs which are filled with received encrypted data.
112      * These are returned to the DEMUX's free list as they are processed.
113      */
114     QUIC_URXE_LIST              urx_pending;
115 
116     /*
117      * List of URXEs which we could not decrypt immediately and which are being
118      * kept in case they can be decrypted later.
119      */
120     QUIC_URXE_LIST              urx_deferred;
121 
122     /*
123      * List of RXEs which are not currently in use. These are moved
124      * to the pending list as they are filled.
125      */
126     RXE_LIST                    rx_free;
127 
128     /*
129      * List of RXEs which are filled with decrypted packets ready to be passed
130      * to the user. A RXE is removed from all lists inside the QRL when passed
131      * to the user, then returned to the free list when the user returns it.
132      */
133     RXE_LIST                    rx_pending;
134 
135     /* Largest PN we have received and processed in a given PN space. */
136     QUIC_PN                     largest_pn[QUIC_PN_SPACE_NUM];
137 
138     /* Per encryption-level state. */
139     OSSL_QRL_ENC_LEVEL_SET      el_set;
140 
141     /* Bytes we have received since this counter was last cleared. */
142     uint64_t                    bytes_received;
143 
144     /*
145      * Number of forged packets we have received since the QRX was instantiated.
146      * Note that as per RFC 9001, this is connection-level state; it is not per
147      * EL and is not reset by a key update.
148      */
149     uint64_t                    forged_pkt_count;
150 
151     /*
152      * The PN the current key epoch started at, inclusive.
153      */
154     uint64_t                    cur_epoch_start_pn;
155 
156     /* Validation callback. */
157     ossl_qrx_late_validation_cb    *validation_cb;
158     void                           *validation_cb_arg;
159 
160     /* Key update callback. */
161     ossl_qrx_key_update_cb         *key_update_cb;
162     void                           *key_update_cb_arg;
163 
164     /* Initial key phase. For debugging use only; always 0 in real use. */
165     unsigned char                   init_key_phase_bit;
166 
167     /* Are we allowed to process 1-RTT packets yet? */
168     unsigned char                   allow_1rtt;
169 
170     /* Message callback related arguments */
171     ossl_msg_cb msg_callback;
172     void *msg_callback_arg;
173     SSL *msg_callback_ssl;
174 };
175 
176 static RXE *qrx_ensure_free_rxe(OSSL_QRX *qrx, size_t alloc_len);
177 static int qrx_validate_hdr_early(OSSL_QRX *qrx, RXE *rxe,
178                                   const QUIC_CONN_ID *first_dcid);
179 static int qrx_relocate_buffer(OSSL_QRX *qrx, RXE **prxe, size_t *pi,
180                                const unsigned char **pptr, size_t buf_len);
181 static int qrx_validate_hdr(OSSL_QRX *qrx, RXE *rxe);
182 static RXE *qrx_reserve_rxe(RXE_LIST *rxl, RXE *rxe, size_t n);
183 static int qrx_decrypt_pkt_body(OSSL_QRX *qrx, unsigned char *dst,
184                                 const unsigned char *src,
185                                 size_t src_len, size_t *dec_len,
186                                 const unsigned char *aad, size_t aad_len,
187                                 QUIC_PN pn, uint32_t enc_level,
188                                 unsigned char key_phase_bit,
189                                 uint64_t *rx_key_epoch);
190 static int qrx_validate_hdr_late(OSSL_QRX *qrx, RXE *rxe);
191 static uint32_t rxe_determine_pn_space(RXE *rxe);
192 static void ignore_res(int x);
193 
ossl_qrx_new(const OSSL_QRX_ARGS * args)194 OSSL_QRX *ossl_qrx_new(const OSSL_QRX_ARGS *args)
195 {
196     OSSL_QRX *qrx;
197     size_t i;
198 
199     if (args->demux == NULL || args->max_deferred == 0)
200         return NULL;
201 
202     qrx = OPENSSL_zalloc(sizeof(OSSL_QRX));
203     if (qrx == NULL)
204         return NULL;
205 
206     for (i = 0; i < OSSL_NELEM(qrx->largest_pn); ++i)
207         qrx->largest_pn[i] = args->init_largest_pn[i];
208 
209     qrx->libctx                 = args->libctx;
210     qrx->propq                  = args->propq;
211     qrx->demux                  = args->demux;
212     qrx->short_conn_id_len      = args->short_conn_id_len;
213     qrx->init_key_phase_bit     = args->init_key_phase_bit;
214     qrx->max_deferred           = args->max_deferred;
215     return qrx;
216 }
217 
qrx_cleanup_rxl(RXE_LIST * l)218 static void qrx_cleanup_rxl(RXE_LIST *l)
219 {
220     RXE *e, *enext;
221 
222     for (e = ossl_list_rxe_head(l); e != NULL; e = enext) {
223         enext = ossl_list_rxe_next(e);
224         ossl_list_rxe_remove(l, e);
225         OPENSSL_free(e);
226     }
227 }
228 
qrx_cleanup_urxl(OSSL_QRX * qrx,QUIC_URXE_LIST * l)229 static void qrx_cleanup_urxl(OSSL_QRX *qrx, QUIC_URXE_LIST *l)
230 {
231     QUIC_URXE *e, *enext;
232 
233     for (e = ossl_list_urxe_head(l); e != NULL; e = enext) {
234         enext = ossl_list_urxe_next(e);
235         ossl_list_urxe_remove(l, e);
236         ossl_quic_demux_release_urxe(qrx->demux, e);
237     }
238 }
239 
ossl_qrx_update_pn_space(OSSL_QRX * src,OSSL_QRX * dst)240 void ossl_qrx_update_pn_space(OSSL_QRX *src, OSSL_QRX *dst)
241 {
242     size_t i;
243 
244     for (i = 0; i < QUIC_PN_SPACE_NUM; i++)
245         dst->largest_pn[i] = src->largest_pn[i];
246 
247     return;
248 }
249 
ossl_qrx_free(OSSL_QRX * qrx)250 void ossl_qrx_free(OSSL_QRX *qrx)
251 {
252     uint32_t i;
253 
254     if (qrx == NULL)
255         return;
256 
257     /* Free RXE queue data. */
258     qrx_cleanup_rxl(&qrx->rx_free);
259     qrx_cleanup_rxl(&qrx->rx_pending);
260     qrx_cleanup_urxl(qrx, &qrx->urx_pending);
261     qrx_cleanup_urxl(qrx, &qrx->urx_deferred);
262 
263     /* Drop keying material and crypto resources. */
264     for (i = 0; i < QUIC_ENC_LEVEL_NUM; ++i)
265         ossl_qrl_enc_level_set_discard(&qrx->el_set, i);
266 
267     OPENSSL_free(qrx);
268 }
269 
ossl_qrx_inject_urxe(OSSL_QRX * qrx,QUIC_URXE * urxe)270 void ossl_qrx_inject_urxe(OSSL_QRX *qrx, QUIC_URXE *urxe)
271 {
272     /* Initialize our own fields inside the URXE and add to the pending list. */
273     urxe->processed     = 0;
274     urxe->hpr_removed   = 0;
275     urxe->deferred      = 0;
276     ossl_list_urxe_insert_tail(&qrx->urx_pending, urxe);
277 
278     if (qrx->msg_callback != NULL)
279         qrx->msg_callback(0, OSSL_QUIC1_VERSION, SSL3_RT_QUIC_DATAGRAM, urxe + 1,
280                           urxe->data_len, qrx->msg_callback_ssl,
281                           qrx->msg_callback_arg);
282 }
283 
ossl_qrx_inject_pkt(OSSL_QRX * qrx,OSSL_QRX_PKT * pkt)284 void ossl_qrx_inject_pkt(OSSL_QRX *qrx, OSSL_QRX_PKT *pkt)
285 {
286     RXE *rxe = (RXE *)pkt;
287 
288     /*
289      * port_default_packet_handler() uses ossl_qrx_read_pkt()
290      * to get pkt. Such packet has refcount 1.
291      */
292     ossl_qrx_pkt_orphan(pkt);
293     if (ossl_assert(rxe->refcount == 0))
294         ossl_list_rxe_insert_tail(&qrx->rx_pending, rxe);
295 }
296 
297 /*
298  * qrx_validate_initial_pkt() is derived from qrx_process_pkt(). Unlike
299  * qrx_process_pkt() the qrx_validate_initial_pkt() function can process
300  * initial packet only. All other packets should be discarded. This allows
301  * port_default_packet_handler() to validate incoming packet. If packet
302  * is not valid, then port_default_packet_handler() must discard the
303  * packet instead of creating a new channel for it.
304  */
qrx_validate_initial_pkt(OSSL_QRX * qrx,QUIC_URXE * urxe,const QUIC_CONN_ID * first_dcid,size_t datagram_len)305 static int qrx_validate_initial_pkt(OSSL_QRX *qrx, QUIC_URXE *urxe,
306                                     const QUIC_CONN_ID *first_dcid,
307                                     size_t datagram_len)
308 {
309     PACKET pkt, orig_pkt;
310     RXE *rxe;
311     size_t i = 0, aad_len = 0, dec_len = 0;
312     const unsigned char *sop;
313     unsigned char *dst;
314     QUIC_PKT_HDR_PTRS ptrs;
315     uint32_t pn_space;
316     OSSL_QRL_ENC_LEVEL *el = NULL;
317     uint64_t rx_key_epoch = UINT64_MAX;
318 
319     if (!PACKET_buf_init(&pkt, ossl_quic_urxe_data(urxe), urxe->data_len))
320         return 0;
321 
322     orig_pkt = pkt;
323     sop = PACKET_data(&pkt);
324 
325     /*
326      * Get a free RXE. If we need to allocate a new one, use the packet length
327      * as a good ballpark figure.
328      */
329     rxe = qrx_ensure_free_rxe(qrx, PACKET_remaining(&pkt));
330     if (rxe == NULL)
331         return 0;
332 
333     /*
334      * we expect INITIAL packet only, therefore it is OK to pass
335      * short_conn_id_len as 0.
336      */
337     if (!ossl_quic_wire_decode_pkt_hdr(&pkt,
338                                        0, /* short_conn_id_len */
339                                        1, /* need second decode */
340                                        0, /* nodata -> want to read data */
341                                        &rxe->hdr, &ptrs,
342                                        NULL))
343         goto malformed;
344 
345     if (rxe->hdr.type != QUIC_PKT_TYPE_INITIAL)
346         goto malformed;
347 
348     if (!qrx_validate_hdr_early(qrx, rxe, NULL))
349         goto malformed;
350 
351     if (ossl_qrl_enc_level_set_have_el(&qrx->el_set, QUIC_ENC_LEVEL_INITIAL) != 1)
352         goto malformed;
353 
354     if (rxe->hdr.type == QUIC_PKT_TYPE_INITIAL) {
355         const unsigned char *token = rxe->hdr.token;
356 
357         /*
358          * This may change the value of rxe and change the value of the token
359          * pointer as well. So we must make a temporary copy of the pointer to
360          * the token, and then copy it back into the new location of the rxe
361          */
362         if (!qrx_relocate_buffer(qrx, &rxe, &i, &token, rxe->hdr.token_len))
363             goto malformed;
364 
365         rxe->hdr.token = token;
366     }
367 
368     pkt = orig_pkt;
369 
370     el = ossl_qrl_enc_level_set_get(&qrx->el_set, QUIC_ENC_LEVEL_INITIAL, 1);
371     assert(el != NULL); /* Already checked above */
372 
373     if (!ossl_quic_hdr_protector_decrypt(&el->hpr, &ptrs))
374         goto malformed;
375 
376     /*
377      * We have removed header protection, so don't attempt to do it again if
378      * the packet gets deferred and processed again.
379      */
380     pkt_mark(&urxe->hpr_removed, 0);
381 
382     /* Decode the now unprotected header. */
383     if (ossl_quic_wire_decode_pkt_hdr(&pkt, 0,
384                                       0, 0, &rxe->hdr, NULL, NULL) != 1)
385         goto malformed;
386 
387     /* Validate header and decode PN. */
388     if (!qrx_validate_hdr(qrx, rxe))
389         goto malformed;
390 
391     /*
392      * The AAD data is the entire (unprotected) packet header including the PN.
393      * The packet header has been unprotected in place, so we can just reuse the
394      * PACKET buffer. The header ends where the payload begins.
395      */
396     aad_len = rxe->hdr.data - sop;
397 
398     /* Ensure the RXE buffer size is adequate for our payload. */
399     if ((rxe = qrx_reserve_rxe(&qrx->rx_free, rxe, rxe->hdr.len + i)) == NULL)
400         goto malformed;
401 
402     /*
403      * We decrypt the packet body to immediately after the token at the start of
404      * the RXE buffer (where present).
405      *
406      * Do the decryption from the PACKET (which points into URXE memory) to our
407      * RXE payload (single-copy decryption), then fixup the pointers in the
408      * header to point to our new buffer.
409      *
410      * If decryption fails this is considered a permanent error; we defer
411      * packets we don't yet have decryption keys for above, so if this fails,
412      * something has gone wrong with the handshake process or a packet has been
413      * corrupted.
414      */
415     dst = (unsigned char *)rxe_data(rxe) + i;
416     if (!qrx_decrypt_pkt_body(qrx, dst, rxe->hdr.data, rxe->hdr.len,
417                               &dec_len, sop, aad_len, rxe->pn, QUIC_ENC_LEVEL_INITIAL,
418                               rxe->hdr.key_phase, &rx_key_epoch))
419         goto malformed;
420 
421     /*
422      * -----------------------------------------------------
423      *   IMPORTANT: ANYTHING ABOVE THIS LINE IS UNVERIFIED
424      *              AND MUST BE TIMING-CHANNEL SAFE.
425      * -----------------------------------------------------
426      *
427      * At this point, we have successfully authenticated the AEAD tag and no
428      * longer need to worry about exposing the PN, PN length or Key Phase bit in
429      * timing channels. Invoke any configured validation callback to allow for
430      * rejection of duplicate PNs.
431      */
432     if (!qrx_validate_hdr_late(qrx, rxe))
433         goto malformed;
434 
435     pkt_mark(&urxe->processed, 0);
436 
437     /*
438      * Update header to point to the decrypted buffer, which may be shorter
439      * due to AEAD tags, block padding, etc.
440      */
441     rxe->hdr.data       = dst;
442     rxe->hdr.len        = dec_len;
443     rxe->data_len       = dec_len;
444     rxe->datagram_len   = datagram_len;
445     rxe->key_epoch      = rx_key_epoch;
446 
447     /* We processed the PN successfully, so update largest processed PN. */
448     pn_space = rxe_determine_pn_space(rxe);
449     if (rxe->pn > qrx->largest_pn[pn_space])
450         qrx->largest_pn[pn_space] = rxe->pn;
451 
452     /* Copy across network addresses and RX time from URXE to RXE. */
453     rxe->peer           = urxe->peer;
454     rxe->local          = urxe->local;
455     rxe->time           = urxe->time;
456     rxe->datagram_id    = urxe->datagram_id;
457 
458     /*
459      * The packet is decrypted, we are going to move it from
460      * rx_pending queue where it waits to be further processed
461      * by ch_rx().
462      */
463     ossl_list_rxe_remove(&qrx->rx_free, rxe);
464     ossl_list_rxe_insert_tail(&qrx->rx_pending, rxe);
465 
466     return 1;
467 
468 malformed:
469     /* caller (port_default_packet_handler()) should discard urxe */
470     return 0;
471 }
472 
ossl_qrx_validate_initial_packet(OSSL_QRX * qrx,QUIC_URXE * urxe,const QUIC_CONN_ID * dcid)473 int ossl_qrx_validate_initial_packet(OSSL_QRX *qrx, QUIC_URXE *urxe,
474                                      const QUIC_CONN_ID *dcid)
475 {
476     urxe->processed     = 0;
477     urxe->hpr_removed   = 0;
478     urxe->deferred      = 0;
479 
480     return qrx_validate_initial_pkt(qrx, urxe, dcid, urxe->data_len);
481 }
482 
qrx_requeue_deferred(OSSL_QRX * qrx)483 static void qrx_requeue_deferred(OSSL_QRX *qrx)
484 {
485     QUIC_URXE *e;
486 
487     while ((e = ossl_list_urxe_head(&qrx->urx_deferred)) != NULL) {
488         ossl_list_urxe_remove(&qrx->urx_deferred, e);
489         ossl_list_urxe_insert_tail(&qrx->urx_pending, e);
490     }
491 }
492 
ossl_qrx_provide_secret(OSSL_QRX * qrx,uint32_t enc_level,uint32_t suite_id,EVP_MD * md,const unsigned char * secret,size_t secret_len)493 int ossl_qrx_provide_secret(OSSL_QRX *qrx, uint32_t enc_level,
494                             uint32_t suite_id, EVP_MD *md,
495                             const unsigned char *secret, size_t secret_len)
496 {
497     if (enc_level >= QUIC_ENC_LEVEL_NUM)
498         return 0;
499 
500     if (!ossl_qrl_enc_level_set_provide_secret(&qrx->el_set,
501                                                qrx->libctx,
502                                                qrx->propq,
503                                                enc_level,
504                                                suite_id,
505                                                md,
506                                                secret,
507                                                secret_len,
508                                                qrx->init_key_phase_bit,
509                                                /*is_tx=*/0))
510         return 0;
511 
512     /*
513      * Any packets we previously could not decrypt, we may now be able to
514      * decrypt, so move any datagrams containing deferred packets from the
515      * deferred to the pending queue.
516      */
517     qrx_requeue_deferred(qrx);
518     return 1;
519 }
520 
ossl_qrx_discard_enc_level(OSSL_QRX * qrx,uint32_t enc_level)521 int ossl_qrx_discard_enc_level(OSSL_QRX *qrx, uint32_t enc_level)
522 {
523     if (enc_level >= QUIC_ENC_LEVEL_NUM)
524         return 0;
525 
526     ossl_qrl_enc_level_set_discard(&qrx->el_set, enc_level);
527     return 1;
528 }
529 
530 /* Returns 1 if there are one or more pending RXEs. */
ossl_qrx_processed_read_pending(OSSL_QRX * qrx)531 int ossl_qrx_processed_read_pending(OSSL_QRX *qrx)
532 {
533     return !ossl_list_rxe_is_empty(&qrx->rx_pending);
534 }
535 
536 /* Returns 1 if there are yet-unprocessed packets. */
ossl_qrx_unprocessed_read_pending(OSSL_QRX * qrx)537 int ossl_qrx_unprocessed_read_pending(OSSL_QRX *qrx)
538 {
539     return !ossl_list_urxe_is_empty(&qrx->urx_pending)
540            || !ossl_list_urxe_is_empty(&qrx->urx_deferred);
541 }
542 
543 /* Pop the next pending RXE. Returns NULL if no RXE is pending. */
qrx_pop_pending_rxe(OSSL_QRX * qrx)544 static RXE *qrx_pop_pending_rxe(OSSL_QRX *qrx)
545 {
546     RXE *rxe = ossl_list_rxe_head(&qrx->rx_pending);
547 
548     if (rxe == NULL)
549         return NULL;
550 
551     ossl_list_rxe_remove(&qrx->rx_pending, rxe);
552     return rxe;
553 }
554 
555 /* Allocate a new RXE. */
qrx_alloc_rxe(size_t alloc_len)556 static RXE *qrx_alloc_rxe(size_t alloc_len)
557 {
558     RXE *rxe;
559 
560     if (alloc_len >= SIZE_MAX - sizeof(RXE))
561         return NULL;
562 
563     rxe = OPENSSL_malloc(sizeof(RXE) + alloc_len);
564     if (rxe == NULL)
565         return NULL;
566 
567     ossl_list_rxe_init_elem(rxe);
568     rxe->alloc_len = alloc_len;
569     rxe->data_len  = 0;
570     rxe->refcount  = 0;
571     return rxe;
572 }
573 
574 /*
575  * Ensures there is at least one RXE in the RX free list, allocating a new entry
576  * if necessary. The returned RXE is in the RX free list; it is not popped.
577  *
578  * alloc_len is a hint which may be used to determine the RXE size if allocation
579  * is necessary. Returns NULL on allocation failure.
580  */
qrx_ensure_free_rxe(OSSL_QRX * qrx,size_t alloc_len)581 static RXE *qrx_ensure_free_rxe(OSSL_QRX *qrx, size_t alloc_len)
582 {
583     RXE *rxe;
584 
585     if (ossl_list_rxe_head(&qrx->rx_free) != NULL)
586         return ossl_list_rxe_head(&qrx->rx_free);
587 
588     rxe = qrx_alloc_rxe(alloc_len);
589     if (rxe == NULL)
590         return NULL;
591 
592     ossl_list_rxe_insert_tail(&qrx->rx_free, rxe);
593     return rxe;
594 }
595 
596 /*
597  * Resize the data buffer attached to an RXE to be n bytes in size. The address
598  * of the RXE might change; the new address is returned, or NULL on failure, in
599  * which case the original RXE remains valid.
600  */
qrx_resize_rxe(RXE_LIST * rxl,RXE * rxe,size_t n)601 static RXE *qrx_resize_rxe(RXE_LIST *rxl, RXE *rxe, size_t n)
602 {
603     RXE *rxe2, *p;
604 
605     /* Should never happen. */
606     if (rxe == NULL)
607         return NULL;
608 
609     if (n >= SIZE_MAX - sizeof(RXE))
610         return NULL;
611 
612     /* Remove the item from the list to avoid accessing freed memory */
613     p = ossl_list_rxe_prev(rxe);
614     ossl_list_rxe_remove(rxl, rxe);
615 
616     /* Should never resize an RXE which has been handed out. */
617     if (!ossl_assert(rxe->refcount == 0))
618         return NULL;
619 
620     /*
621      * NOTE: We do not clear old memory, although it does contain decrypted
622      * data.
623      */
624     rxe2 = OPENSSL_realloc(rxe, sizeof(RXE) + n);
625     if (rxe2 == NULL) {
626         /* Resize failed, restore old allocation. */
627         if (p == NULL)
628             ossl_list_rxe_insert_head(rxl, rxe);
629         else
630             ossl_list_rxe_insert_after(rxl, p, rxe);
631         return NULL;
632     }
633 
634     if (p == NULL)
635         ossl_list_rxe_insert_head(rxl, rxe2);
636     else
637         ossl_list_rxe_insert_after(rxl, p, rxe2);
638 
639     rxe2->alloc_len = n;
640     return rxe2;
641 }
642 
643 /*
644  * Ensure the data buffer attached to an RXE is at least n bytes in size.
645  * Returns NULL on failure.
646  */
qrx_reserve_rxe(RXE_LIST * rxl,RXE * rxe,size_t n)647 static RXE *qrx_reserve_rxe(RXE_LIST *rxl,
648                             RXE *rxe, size_t n)
649 {
650     if (rxe->alloc_len >= n)
651         return rxe;
652 
653     return qrx_resize_rxe(rxl, rxe, n);
654 }
655 
656 /* Return a RXE handed out to the user back to our freelist. */
qrx_recycle_rxe(OSSL_QRX * qrx,RXE * rxe)657 static void qrx_recycle_rxe(OSSL_QRX *qrx, RXE *rxe)
658 {
659     /* RXE should not be in any list */
660     assert(ossl_list_rxe_prev(rxe) == NULL && ossl_list_rxe_next(rxe) == NULL);
661     rxe->pkt.hdr    = NULL;
662     rxe->pkt.peer   = NULL;
663     rxe->pkt.local  = NULL;
664     ossl_list_rxe_insert_tail(&qrx->rx_free, rxe);
665 }
666 
667 /*
668  * Given a pointer to a pointer pointing to a buffer and the size of that
669  * buffer, copy the buffer into *prxe, expanding the RXE if necessary (its
670  * pointer may change due to realloc). *pi is the offset in bytes to copy the
671  * buffer to, and on success is updated to be the offset pointing after the
672  * copied buffer. *pptr is updated to point to the new location of the buffer.
673  */
qrx_relocate_buffer(OSSL_QRX * qrx,RXE ** prxe,size_t * pi,const unsigned char ** pptr,size_t buf_len)674 static int qrx_relocate_buffer(OSSL_QRX *qrx, RXE **prxe, size_t *pi,
675                                const unsigned char **pptr, size_t buf_len)
676 {
677     RXE *rxe;
678     unsigned char *dst;
679 
680     if (!buf_len)
681         return 1;
682 
683     if ((rxe = qrx_reserve_rxe(&qrx->rx_free, *prxe, *pi + buf_len)) == NULL)
684         return 0;
685 
686     *prxe = rxe;
687     dst = (unsigned char *)rxe_data(rxe) + *pi;
688 
689     memcpy(dst, *pptr, buf_len);
690     *pi += buf_len;
691     *pptr = dst;
692     return 1;
693 }
694 
qrx_determine_enc_level(const QUIC_PKT_HDR * hdr)695 static uint32_t qrx_determine_enc_level(const QUIC_PKT_HDR *hdr)
696 {
697     switch (hdr->type) {
698         case QUIC_PKT_TYPE_INITIAL:
699             return QUIC_ENC_LEVEL_INITIAL;
700         case QUIC_PKT_TYPE_HANDSHAKE:
701             return QUIC_ENC_LEVEL_HANDSHAKE;
702         case QUIC_PKT_TYPE_0RTT:
703             return QUIC_ENC_LEVEL_0RTT;
704         case QUIC_PKT_TYPE_1RTT:
705             return QUIC_ENC_LEVEL_1RTT;
706 
707         default:
708             assert(0);
709         case QUIC_PKT_TYPE_RETRY:
710         case QUIC_PKT_TYPE_VERSION_NEG:
711             return QUIC_ENC_LEVEL_INITIAL; /* not used */
712     }
713 }
714 
rxe_determine_pn_space(RXE * rxe)715 static uint32_t rxe_determine_pn_space(RXE *rxe)
716 {
717     uint32_t enc_level;
718 
719     enc_level = qrx_determine_enc_level(&rxe->hdr);
720     return ossl_quic_enc_level_to_pn_space(enc_level);
721 }
722 
qrx_validate_hdr_early(OSSL_QRX * qrx,RXE * rxe,const QUIC_CONN_ID * first_dcid)723 static int qrx_validate_hdr_early(OSSL_QRX *qrx, RXE *rxe,
724                                   const QUIC_CONN_ID *first_dcid)
725 {
726     /* Ensure version is what we want. */
727     if (rxe->hdr.version != QUIC_VERSION_1
728         && rxe->hdr.version != QUIC_VERSION_NONE)
729         return 0;
730 
731     /* Clients should never receive 0-RTT packets. */
732     if (rxe->hdr.type == QUIC_PKT_TYPE_0RTT)
733         return 0;
734 
735     /* Version negotiation and retry packets must be the first packet. */
736     if (first_dcid != NULL && !ossl_quic_pkt_type_can_share_dgram(rxe->hdr.type))
737         return 0;
738 
739     /*
740      * If this is not the first packet in a datagram, the destination connection
741      * ID must match the one in that packet.
742      */
743     if (first_dcid != NULL) {
744         if (!ossl_assert(first_dcid->id_len < QUIC_MAX_CONN_ID_LEN)
745             || !ossl_quic_conn_id_eq(first_dcid,
746                                      &rxe->hdr.dst_conn_id))
747         return 0;
748     }
749 
750     return 1;
751 }
752 
753 /* Validate header and decode PN. */
qrx_validate_hdr(OSSL_QRX * qrx,RXE * rxe)754 static int qrx_validate_hdr(OSSL_QRX *qrx, RXE *rxe)
755 {
756     int pn_space = rxe_determine_pn_space(rxe);
757 
758     if (!ossl_quic_wire_decode_pkt_hdr_pn(rxe->hdr.pn, rxe->hdr.pn_len,
759                                           qrx->largest_pn[pn_space],
760                                           &rxe->pn))
761         return 0;
762 
763     return 1;
764 }
765 
766 /* Late packet header validation. */
qrx_validate_hdr_late(OSSL_QRX * qrx,RXE * rxe)767 static int qrx_validate_hdr_late(OSSL_QRX *qrx, RXE *rxe)
768 {
769     int pn_space = rxe_determine_pn_space(rxe);
770 
771     /*
772      * Allow our user to decide whether to discard the packet before we try and
773      * decrypt it.
774      */
775     if (qrx->validation_cb != NULL
776         && !qrx->validation_cb(rxe->pn, pn_space, qrx->validation_cb_arg))
777         return 0;
778 
779     return 1;
780 }
781 
782 /*
783  * Retrieves the correct cipher context for an EL and key phase. Writes the key
784  * epoch number actually used for packet decryption to *rx_key_epoch.
785  */
qrx_get_cipher_ctx_idx(OSSL_QRX * qrx,OSSL_QRL_ENC_LEVEL * el,uint32_t enc_level,unsigned char key_phase_bit,uint64_t * rx_key_epoch,int * is_old_key)786 static size_t qrx_get_cipher_ctx_idx(OSSL_QRX *qrx, OSSL_QRL_ENC_LEVEL *el,
787                                      uint32_t enc_level,
788                                      unsigned char key_phase_bit,
789                                      uint64_t *rx_key_epoch,
790                                      int *is_old_key)
791 {
792     size_t idx;
793 
794     *is_old_key = 0;
795 
796     if (enc_level != QUIC_ENC_LEVEL_1RTT) {
797         *rx_key_epoch = 0;
798         return 0;
799     }
800 
801     if (!ossl_assert(key_phase_bit <= 1))
802         return SIZE_MAX;
803 
804     /*
805      * RFC 9001 requires that we not create timing channels which could reveal
806      * the decrypted value of the Key Phase bit. We usually handle this by
807      * keeping the cipher contexts for both the current and next key epochs
808      * around, so that we just select a cipher context blindly using the key
809      * phase bit, which is time-invariant.
810      *
811      * In the COOLDOWN state, we only have one keyslot/cipher context. RFC 9001
812      * suggests an implementation strategy to avoid creating a timing channel in
813      * this case:
814      *
815      *   Endpoints can use randomized packet protection keys in place of
816      *   discarded keys when key updates are not yet permitted.
817      *
818      * Rather than use a randomised key, we simply use our existing key as it
819      * will fail AEAD verification anyway. This avoids the need to keep around a
820      * dedicated garbage key.
821      *
822      * Note: Accessing different cipher contexts is technically not
823      * timing-channel safe due to microarchitectural side channels, but this is
824      * the best we can reasonably do and appears to be directly suggested by the
825      * RFC.
826      */
827     idx = (el->state == QRL_EL_STATE_PROV_COOLDOWN ? el->key_epoch & 1
828                                                    : key_phase_bit);
829 
830     /*
831      * We also need to determine the key epoch number which this index
832      * corresponds to. This is so we can report the key epoch number in the
833      * OSSL_QRX_PKT structure, which callers need to validate whether it was OK
834      * for a packet to be sent using a given key epoch's keys.
835      */
836     switch (el->state) {
837     case QRL_EL_STATE_PROV_NORMAL:
838         /*
839          * If we are in the NORMAL state, usually the KP bit will match the LSB
840          * of our key epoch, meaning no new key update is being signalled. If it
841          * does not match, this means the packet (purports to) belong to
842          * the next key epoch.
843          *
844          * IMPORTANT: The AEAD tag has not been verified yet when this function
845          * is called, so this code must be timing-channel safe, hence use of
846          * XOR. Moreover, the value output below is not yet authenticated.
847          */
848         *rx_key_epoch
849             = el->key_epoch + ((el->key_epoch & 1) ^ (uint64_t)key_phase_bit);
850         break;
851 
852     case QRL_EL_STATE_PROV_UPDATING:
853         /*
854          * If we are in the UPDATING state, usually the KP bit will match the
855          * LSB of our key epoch. If it does not match, this means that the
856          * packet (purports to) belong to the previous key epoch.
857          *
858          * As above, must be timing-channel safe.
859          */
860         *is_old_key = (el->key_epoch & 1) ^ (uint64_t)key_phase_bit;
861         *rx_key_epoch = el->key_epoch - (uint64_t)*is_old_key;
862         break;
863 
864     case QRL_EL_STATE_PROV_COOLDOWN:
865         /*
866          * If we are in COOLDOWN, there is only one key epoch we can possibly
867          * decrypt with, so just try that. If AEAD decryption fails, the
868          * value we output here isn't used anyway.
869          */
870         *rx_key_epoch = el->key_epoch;
871         break;
872     }
873 
874     return idx;
875 }
876 
877 /*
878  * Tries to decrypt a packet payload.
879  *
880  * Returns 1 on success or 0 on failure (which is permanent). The payload is
881  * decrypted from src and written to dst. The buffer dst must be of at least
882  * src_len bytes in length. The actual length of the output in bytes is written
883  * to *dec_len on success, which will always be equal to or less than (usually
884  * less than) src_len.
885  */
qrx_decrypt_pkt_body(OSSL_QRX * qrx,unsigned char * dst,const unsigned char * src,size_t src_len,size_t * dec_len,const unsigned char * aad,size_t aad_len,QUIC_PN pn,uint32_t enc_level,unsigned char key_phase_bit,uint64_t * rx_key_epoch)886 static int qrx_decrypt_pkt_body(OSSL_QRX *qrx, unsigned char *dst,
887                                 const unsigned char *src,
888                                 size_t src_len, size_t *dec_len,
889                                 const unsigned char *aad, size_t aad_len,
890                                 QUIC_PN pn, uint32_t enc_level,
891                                 unsigned char key_phase_bit,
892                                 uint64_t *rx_key_epoch)
893 {
894     int l = 0, l2 = 0, is_old_key, nonce_len;
895     unsigned char nonce[EVP_MAX_IV_LENGTH];
896     size_t i, cctx_idx;
897     OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
898                                                         enc_level, 1);
899     EVP_CIPHER_CTX *cctx;
900 
901     if (src_len > INT_MAX || aad_len > INT_MAX)
902         return 0;
903 
904     /* We should not have been called if we do not have key material. */
905     if (!ossl_assert(el != NULL))
906         return 0;
907 
908     if (el->tag_len >= src_len)
909         return 0;
910 
911     /*
912      * If we have failed to authenticate a certain number of ciphertexts, refuse
913      * to decrypt any more ciphertexts.
914      */
915     if (qrx->forged_pkt_count >= ossl_qrl_get_suite_max_forged_pkt(el->suite_id))
916         return 0;
917 
918     cctx_idx = qrx_get_cipher_ctx_idx(qrx, el, enc_level, key_phase_bit,
919                                       rx_key_epoch, &is_old_key);
920     if (!ossl_assert(cctx_idx < OSSL_NELEM(el->cctx)))
921         return 0;
922 
923     if (is_old_key && pn >= qrx->cur_epoch_start_pn)
924         /*
925          * RFC 9001 s. 5.5: Once an endpoint successfully receives a packet with
926          * a given PN, it MUST discard all packets in the same PN space with
927          * higher PNs if they cannot be successfully unprotected with the same
928          * key, or -- if there is a key update -- a subsequent packet protection
929          * key.
930          *
931          * In other words, once a PN x triggers a KU, it is invalid for us to
932          * receive a packet with a newer PN y (y > x) using the old keys.
933          */
934         return 0;
935 
936     cctx = el->cctx[cctx_idx];
937 
938     /* Construct nonce (nonce=IV ^ PN). */
939     nonce_len = EVP_CIPHER_CTX_get_iv_length(cctx);
940     if (!ossl_assert(nonce_len >= (int)sizeof(QUIC_PN)))
941         return 0;
942 
943     memcpy(nonce, el->iv[cctx_idx], nonce_len);
944     for (i = 0; i < sizeof(QUIC_PN); ++i)
945         nonce[nonce_len - i - 1] ^= (unsigned char)(pn >> (i * 8));
946 
947     /* type and key will already have been setup; feed the IV. */
948     if (EVP_CipherInit_ex(cctx, NULL,
949                           NULL, NULL, nonce, /*enc=*/0) != 1)
950         return 0;
951 
952     /* Feed the AEAD tag we got so the cipher can validate it. */
953     if (EVP_CIPHER_CTX_ctrl(cctx, EVP_CTRL_AEAD_SET_TAG,
954                             el->tag_len,
955                             (unsigned char *)src + src_len - el->tag_len) != 1)
956         return 0;
957 
958     /* Feed AAD data. */
959     if (EVP_CipherUpdate(cctx, NULL, &l, aad, aad_len) != 1)
960         return 0;
961 
962     /* Feed encrypted packet body. */
963     if (EVP_CipherUpdate(cctx, dst, &l, src, src_len - el->tag_len) != 1)
964         return 0;
965 
966 #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
967     /*
968      * Throw away what we just decrypted and just use the ciphertext instead
969      * (which should be unencrypted)
970      */
971     memcpy(dst, src, l);
972 
973     /* Pretend to authenticate the tag but ignore it */
974     if (EVP_CipherFinal_ex(cctx, NULL, &l2) != 1) {
975         /* We don't care */
976     }
977 #else
978     /* Ensure authentication succeeded. */
979     if (EVP_CipherFinal_ex(cctx, NULL, &l2) != 1) {
980         /* Authentication failed, increment failed auth counter. */
981         ++qrx->forged_pkt_count;
982         return 0;
983     }
984 #endif
985 
986     *dec_len = l;
987     return 1;
988 }
989 
ignore_res(int x)990 static ossl_inline void ignore_res(int x)
991 {
992     /* No-op. */
993 }
994 
qrx_key_update_initiated(OSSL_QRX * qrx,QUIC_PN pn)995 static void qrx_key_update_initiated(OSSL_QRX *qrx, QUIC_PN pn)
996 {
997     if (!ossl_qrl_enc_level_set_key_update(&qrx->el_set, QUIC_ENC_LEVEL_1RTT))
998         /* We are already in RXKU, so we don't call the callback again. */
999         return;
1000 
1001     qrx->cur_epoch_start_pn = pn;
1002 
1003     if (qrx->key_update_cb != NULL)
1004         qrx->key_update_cb(pn, qrx->key_update_cb_arg);
1005 }
1006 
1007 /* Process a single packet in a datagram. */
qrx_process_pkt(OSSL_QRX * qrx,QUIC_URXE * urxe,PACKET * pkt,size_t pkt_idx,QUIC_CONN_ID * first_dcid,size_t datagram_len)1008 static int qrx_process_pkt(OSSL_QRX *qrx, QUIC_URXE *urxe,
1009                            PACKET *pkt, size_t pkt_idx,
1010                            QUIC_CONN_ID *first_dcid,
1011                            size_t datagram_len)
1012 {
1013     RXE *rxe;
1014     const unsigned char *eop = NULL;
1015     size_t i, aad_len = 0, dec_len = 0;
1016     PACKET orig_pkt = *pkt;
1017     const unsigned char *sop = PACKET_data(pkt);
1018     unsigned char *dst;
1019     char need_second_decode = 0, already_processed = 0;
1020     QUIC_PKT_HDR_PTRS ptrs;
1021     uint32_t pn_space, enc_level;
1022     OSSL_QRL_ENC_LEVEL *el = NULL;
1023     uint64_t rx_key_epoch = UINT64_MAX;
1024 
1025     /*
1026      * Get a free RXE. If we need to allocate a new one, use the packet length
1027      * as a good ballpark figure.
1028      */
1029     rxe = qrx_ensure_free_rxe(qrx, PACKET_remaining(pkt));
1030     if (rxe == NULL)
1031         return 0;
1032 
1033     /* Have we already processed this packet? */
1034     if (pkt_is_marked(&urxe->processed, pkt_idx))
1035         already_processed = 1;
1036 
1037     /*
1038      * Decode the header into the RXE structure. We first decrypt and read the
1039      * unprotected part of the packet header (unless we already removed header
1040      * protection, in which case we decode all of it).
1041      */
1042     need_second_decode = !pkt_is_marked(&urxe->hpr_removed, pkt_idx);
1043     if (!ossl_quic_wire_decode_pkt_hdr(pkt,
1044                                        qrx->short_conn_id_len,
1045                                        need_second_decode, 0, &rxe->hdr, &ptrs,
1046                                        NULL))
1047         goto malformed;
1048 
1049     /*
1050      * Our successful decode above included an intelligible length and the
1051      * PACKET is now pointing to the end of the QUIC packet.
1052      */
1053     eop = PACKET_data(pkt);
1054 
1055     /*
1056      * Make a note of the first packet's DCID so we can later ensure the
1057      * destination connection IDs of all packets in a datagram match.
1058      */
1059     if (pkt_idx == 0)
1060         *first_dcid = rxe->hdr.dst_conn_id;
1061 
1062     /*
1063      * Early header validation. Since we now know the packet length, we can also
1064      * now skip over it if we already processed it.
1065      */
1066     if (already_processed
1067         || !qrx_validate_hdr_early(qrx, rxe, pkt_idx == 0 ? NULL : first_dcid))
1068         /*
1069          * Already processed packets are handled identically to malformed
1070          * packets; i.e., they are ignored.
1071          */
1072         goto malformed;
1073 
1074     if (!ossl_quic_pkt_type_is_encrypted(rxe->hdr.type)) {
1075         /*
1076          * Version negotiation and retry packets are a special case. They do not
1077          * contain a payload which needs decrypting and have no header
1078          * protection.
1079          */
1080 
1081         /* Just copy the payload from the URXE to the RXE. */
1082         if ((rxe = qrx_reserve_rxe(&qrx->rx_free, rxe, rxe->hdr.len)) == NULL)
1083             /*
1084              * Allocation failure. EOP will be pointing to the end of the
1085              * datagram so processing of this datagram will end here.
1086              */
1087             goto malformed;
1088 
1089         /* We are now committed to returning the packet. */
1090         memcpy(rxe_data(rxe), rxe->hdr.data, rxe->hdr.len);
1091         pkt_mark(&urxe->processed, pkt_idx);
1092 
1093         rxe->hdr.data   = rxe_data(rxe);
1094         rxe->pn         = QUIC_PN_INVALID;
1095 
1096         rxe->data_len       = rxe->hdr.len;
1097         rxe->datagram_len   = datagram_len;
1098         rxe->key_epoch      = 0;
1099         rxe->peer           = urxe->peer;
1100         rxe->local          = urxe->local;
1101         rxe->time           = urxe->time;
1102         rxe->datagram_id    = urxe->datagram_id;
1103 
1104         /* Move RXE to pending. */
1105         ossl_list_rxe_remove(&qrx->rx_free, rxe);
1106         ossl_list_rxe_insert_tail(&qrx->rx_pending, rxe);
1107         return 0; /* success, did not defer */
1108     }
1109 
1110     /* Determine encryption level of packet. */
1111     enc_level = qrx_determine_enc_level(&rxe->hdr);
1112 
1113     /* If we do not have keying material for this encryption level yet, defer. */
1114     switch (ossl_qrl_enc_level_set_have_el(&qrx->el_set, enc_level)) {
1115         case 1:
1116             /* We have keys. */
1117             if (enc_level == QUIC_ENC_LEVEL_1RTT && !qrx->allow_1rtt)
1118                 /*
1119                  * But we cannot process 1-RTT packets until the handshake is
1120                  * completed (RFC 9000 s. 5.7).
1121                  */
1122                 goto cannot_decrypt;
1123 
1124             break;
1125         case 0:
1126             /* No keys yet. */
1127             goto cannot_decrypt;
1128         default:
1129             /* We already discarded keys for this EL, we will never process this.*/
1130             goto malformed;
1131     }
1132 
1133     /*
1134      * We will copy any token included in the packet to the start of our RXE
1135      * data buffer (so that we don't reference the URXE buffer any more and can
1136      * recycle it). Track our position in the RXE buffer by index instead of
1137      * pointer as the pointer may change as reallocs occur.
1138      */
1139     i = 0;
1140 
1141     /*
1142      * rxe->hdr.data is now pointing at the (encrypted) packet payload. rxe->hdr
1143      * also has fields pointing into the PACKET buffer which will be going away
1144      * soon (the URXE will be reused for another incoming packet).
1145      *
1146      * Firstly, relocate some of these fields into the RXE as needed.
1147      *
1148      * Relocate token buffer and fix pointer.
1149      */
1150     if (rxe->hdr.type == QUIC_PKT_TYPE_INITIAL) {
1151         const unsigned char *token = rxe->hdr.token;
1152 
1153         /*
1154          * This may change the value of rxe and change the value of the token
1155          * pointer as well. So we must make a temporary copy of the pointer to
1156          * the token, and then copy it back into the new location of the rxe
1157          */
1158         if (!qrx_relocate_buffer(qrx, &rxe, &i, &token, rxe->hdr.token_len))
1159             goto malformed;
1160 
1161         rxe->hdr.token = token;
1162     }
1163 
1164     /* Now remove header protection. */
1165     *pkt = orig_pkt;
1166 
1167     el = ossl_qrl_enc_level_set_get(&qrx->el_set, enc_level, 1);
1168     assert(el != NULL); /* Already checked above */
1169 
1170     if (need_second_decode) {
1171         if (!ossl_quic_hdr_protector_decrypt(&el->hpr, &ptrs))
1172             goto malformed;
1173 
1174         /*
1175          * We have removed header protection, so don't attempt to do it again if
1176          * the packet gets deferred and processed again.
1177          */
1178         pkt_mark(&urxe->hpr_removed, pkt_idx);
1179 
1180         /* Decode the now unprotected header. */
1181         if (ossl_quic_wire_decode_pkt_hdr(pkt, qrx->short_conn_id_len,
1182                                           0, 0, &rxe->hdr, NULL, NULL) != 1)
1183             goto malformed;
1184     }
1185 
1186     /* Validate header and decode PN. */
1187     if (!qrx_validate_hdr(qrx, rxe))
1188         goto malformed;
1189 
1190     if (qrx->msg_callback != NULL)
1191         qrx->msg_callback(0, OSSL_QUIC1_VERSION, SSL3_RT_QUIC_PACKET, sop,
1192                           eop - sop - rxe->hdr.len, qrx->msg_callback_ssl,
1193                           qrx->msg_callback_arg);
1194 
1195     /*
1196      * The AAD data is the entire (unprotected) packet header including the PN.
1197      * The packet header has been unprotected in place, so we can just reuse the
1198      * PACKET buffer. The header ends where the payload begins.
1199      */
1200     aad_len = rxe->hdr.data - sop;
1201 
1202     /* Ensure the RXE buffer size is adequate for our payload. */
1203     if ((rxe = qrx_reserve_rxe(&qrx->rx_free, rxe, rxe->hdr.len + i)) == NULL) {
1204         /*
1205          * Allocation failure, treat as malformed and do not bother processing
1206          * any further packets in the datagram as they are likely to also
1207          * encounter allocation failures.
1208          */
1209         eop = NULL;
1210         goto malformed;
1211     }
1212 
1213     /*
1214      * We decrypt the packet body to immediately after the token at the start of
1215      * the RXE buffer (where present).
1216      *
1217      * Do the decryption from the PACKET (which points into URXE memory) to our
1218      * RXE payload (single-copy decryption), then fixup the pointers in the
1219      * header to point to our new buffer.
1220      *
1221      * If decryption fails this is considered a permanent error; we defer
1222      * packets we don't yet have decryption keys for above, so if this fails,
1223      * something has gone wrong with the handshake process or a packet has been
1224      * corrupted.
1225      */
1226     dst = (unsigned char *)rxe_data(rxe) + i;
1227     if (!qrx_decrypt_pkt_body(qrx, dst, rxe->hdr.data, rxe->hdr.len,
1228                               &dec_len, sop, aad_len, rxe->pn, enc_level,
1229                               rxe->hdr.key_phase, &rx_key_epoch))
1230         goto malformed;
1231 
1232     /*
1233      * -----------------------------------------------------
1234      *   IMPORTANT: ANYTHING ABOVE THIS LINE IS UNVERIFIED
1235      *              AND MUST BE TIMING-CHANNEL SAFE.
1236      * -----------------------------------------------------
1237      *
1238      * At this point, we have successfully authenticated the AEAD tag and no
1239      * longer need to worry about exposing the PN, PN length or Key Phase bit in
1240      * timing channels. Invoke any configured validation callback to allow for
1241      * rejection of duplicate PNs.
1242      */
1243     if (!qrx_validate_hdr_late(qrx, rxe))
1244         goto malformed;
1245 
1246     /* Check for a Key Phase bit differing from our expectation. */
1247     if (rxe->hdr.type == QUIC_PKT_TYPE_1RTT
1248         && rxe->hdr.key_phase != (el->key_epoch & 1))
1249         qrx_key_update_initiated(qrx, rxe->pn);
1250 
1251     /*
1252      * We have now successfully decrypted the packet payload. If there are
1253      * additional packets in the datagram, it is possible we will fail to
1254      * decrypt them and need to defer them until we have some key material we
1255      * don't currently possess. If this happens, the URXE will be moved to the
1256      * deferred queue. Since a URXE corresponds to one datagram, which may
1257      * contain multiple packets, we must ensure any packets we have already
1258      * processed in the URXE are not processed again (this is an RFC
1259      * requirement). We do this by marking the nth packet in the datagram as
1260      * processed.
1261      *
1262      * We are now committed to returning this decrypted packet to the user,
1263      * meaning we now consider the packet processed and must mark it
1264      * accordingly.
1265      */
1266     pkt_mark(&urxe->processed, pkt_idx);
1267 
1268     /*
1269      * Update header to point to the decrypted buffer, which may be shorter
1270      * due to AEAD tags, block padding, etc.
1271      */
1272     rxe->hdr.data       = dst;
1273     rxe->hdr.len        = dec_len;
1274     rxe->data_len       = dec_len;
1275     rxe->datagram_len   = datagram_len;
1276     rxe->key_epoch      = rx_key_epoch;
1277 
1278     /* We processed the PN successfully, so update largest processed PN. */
1279     pn_space = rxe_determine_pn_space(rxe);
1280     if (rxe->pn > qrx->largest_pn[pn_space])
1281         qrx->largest_pn[pn_space] = rxe->pn;
1282 
1283     /* Copy across network addresses and RX time from URXE to RXE. */
1284     rxe->peer           = urxe->peer;
1285     rxe->local          = urxe->local;
1286     rxe->time           = urxe->time;
1287     rxe->datagram_id    = urxe->datagram_id;
1288 
1289     /* Move RXE to pending. */
1290     ossl_list_rxe_remove(&qrx->rx_free, rxe);
1291     ossl_list_rxe_insert_tail(&qrx->rx_pending, rxe);
1292     return 0; /* success, did not defer; not distinguished from failure */
1293 
1294 cannot_decrypt:
1295     /*
1296      * We cannot process this packet right now (but might be able to later). We
1297      * MUST attempt to process any other packets in the datagram, so defer it
1298      * and skip over it.
1299      */
1300     assert(eop != NULL && eop >= PACKET_data(pkt));
1301     /*
1302      * We don't care if this fails as it will just result in the packet being at
1303      * the end of the datagram buffer.
1304      */
1305     ignore_res(PACKET_forward(pkt, eop - PACKET_data(pkt)));
1306     return 1; /* deferred */
1307 
1308 malformed:
1309     if (eop != NULL) {
1310         /*
1311          * This packet cannot be processed and will never be processable. We
1312          * were at least able to decode its header and determine its length, so
1313          * we can skip over it and try to process any subsequent packets in the
1314          * datagram.
1315          *
1316          * Mark as processed as an optimization.
1317          */
1318         assert(eop >= PACKET_data(pkt));
1319         pkt_mark(&urxe->processed, pkt_idx);
1320         /* We don't care if this fails (see above) */
1321         ignore_res(PACKET_forward(pkt, eop - PACKET_data(pkt)));
1322     } else {
1323         /*
1324          * This packet cannot be processed and will never be processable.
1325          * Because even its header is not intelligible, we cannot examine any
1326          * further packets in the datagram because its length cannot be
1327          * discerned.
1328          *
1329          * Advance over the entire remainder of the datagram, and mark it as
1330          * processed as an optimization.
1331          */
1332         pkt_mark(&urxe->processed, pkt_idx);
1333         /* We don't care if this fails (see above) */
1334         ignore_res(PACKET_forward(pkt, PACKET_remaining(pkt)));
1335     }
1336     return 0; /* failure, did not defer; not distinguished from success */
1337 }
1338 
1339 /* Process a datagram which was received. */
qrx_process_datagram(OSSL_QRX * qrx,QUIC_URXE * e,const unsigned char * data,size_t data_len)1340 static int qrx_process_datagram(OSSL_QRX *qrx, QUIC_URXE *e,
1341                                 const unsigned char *data,
1342                                 size_t data_len)
1343 {
1344     int have_deferred = 0;
1345     PACKET pkt;
1346     size_t pkt_idx = 0;
1347     QUIC_CONN_ID first_dcid = { 255 };
1348 
1349     qrx->bytes_received += data_len;
1350 
1351     if (!PACKET_buf_init(&pkt, data, data_len))
1352         return 0;
1353 
1354     for (; PACKET_remaining(&pkt) > 0; ++pkt_idx) {
1355         /*
1356          * A packet smaller than the minimum possible QUIC packet size is not
1357          * considered valid. We also ignore more than a certain number of
1358          * packets within the same datagram.
1359          */
1360         if (PACKET_remaining(&pkt) < QUIC_MIN_VALID_PKT_LEN
1361             || pkt_idx >= QUIC_MAX_PKT_PER_URXE)
1362             break;
1363 
1364         /*
1365          * We note whether packet processing resulted in a deferral since
1366          * this means we need to move the URXE to the deferred list rather
1367          * than the free list after we're finished dealing with it for now.
1368          *
1369          * However, we don't otherwise care here whether processing succeeded or
1370          * failed, as the RFC says even if a packet in a datagram is malformed,
1371          * we should still try to process any packets following it.
1372          *
1373          * In the case where the packet is so malformed we can't determine its
1374          * length, qrx_process_pkt will take care of advancing to the end of
1375          * the packet, so we will exit the loop automatically in this case.
1376          */
1377         if (qrx_process_pkt(qrx, e, &pkt, pkt_idx, &first_dcid, data_len))
1378             have_deferred = 1;
1379     }
1380 
1381     /* Only report whether there were any deferrals. */
1382     return have_deferred;
1383 }
1384 
1385 /* Process a single pending URXE. */
qrx_process_one_urxe(OSSL_QRX * qrx,QUIC_URXE * e)1386 static int qrx_process_one_urxe(OSSL_QRX *qrx, QUIC_URXE *e)
1387 {
1388     int was_deferred;
1389 
1390     /* The next URXE we process should be at the head of the pending list. */
1391     if (!ossl_assert(e == ossl_list_urxe_head(&qrx->urx_pending)))
1392         return 0;
1393 
1394     /*
1395      * Attempt to process the datagram. The return value indicates only if
1396      * processing of the datagram was deferred. If we failed to process the
1397      * datagram, we do not attempt to process it again and silently eat the
1398      * error.
1399      */
1400     was_deferred = qrx_process_datagram(qrx, e, ossl_quic_urxe_data(e),
1401                                         e->data_len);
1402 
1403     /*
1404      * Remove the URXE from the pending list and return it to
1405      * either the free or deferred list.
1406      */
1407     ossl_list_urxe_remove(&qrx->urx_pending, e);
1408     if (was_deferred > 0 &&
1409             (e->deferred || qrx->num_deferred < qrx->max_deferred)) {
1410         ossl_list_urxe_insert_tail(&qrx->urx_deferred, e);
1411         if (!e->deferred) {
1412             e->deferred = 1;
1413             ++qrx->num_deferred;
1414         }
1415     } else {
1416         if (e->deferred) {
1417             e->deferred = 0;
1418             --qrx->num_deferred;
1419         }
1420         ossl_quic_demux_release_urxe(qrx->demux, e);
1421     }
1422 
1423     return 1;
1424 }
1425 
1426 /* Process any pending URXEs to generate pending RXEs. */
qrx_process_pending_urxl(OSSL_QRX * qrx)1427 static int qrx_process_pending_urxl(OSSL_QRX *qrx)
1428 {
1429     QUIC_URXE *e;
1430 
1431     while ((e = ossl_list_urxe_head(&qrx->urx_pending)) != NULL)
1432         if (!qrx_process_one_urxe(qrx, e))
1433             return 0;
1434 
1435     return 1;
1436 }
1437 
ossl_qrx_read_pkt(OSSL_QRX * qrx,OSSL_QRX_PKT ** ppkt)1438 int ossl_qrx_read_pkt(OSSL_QRX *qrx, OSSL_QRX_PKT **ppkt)
1439 {
1440     RXE *rxe;
1441 
1442     if (!ossl_qrx_processed_read_pending(qrx)) {
1443         if (!qrx_process_pending_urxl(qrx))
1444             return 0;
1445 
1446         if (!ossl_qrx_processed_read_pending(qrx))
1447             return 0;
1448     }
1449 
1450     rxe = qrx_pop_pending_rxe(qrx);
1451     if (!ossl_assert(rxe != NULL))
1452         return 0;
1453 
1454     assert(rxe->refcount == 0);
1455     rxe->refcount = 1;
1456 
1457     rxe->pkt.hdr            = &rxe->hdr;
1458     rxe->pkt.pn             = rxe->pn;
1459     rxe->pkt.time           = rxe->time;
1460     rxe->pkt.datagram_len   = rxe->datagram_len;
1461     rxe->pkt.peer
1462         = BIO_ADDR_family(&rxe->peer) != AF_UNSPEC ? &rxe->peer : NULL;
1463     rxe->pkt.local
1464         = BIO_ADDR_family(&rxe->local) != AF_UNSPEC ? &rxe->local : NULL;
1465     rxe->pkt.key_epoch      = rxe->key_epoch;
1466     rxe->pkt.datagram_id    = rxe->datagram_id;
1467     rxe->pkt.qrx            = qrx;
1468     *ppkt = &rxe->pkt;
1469 
1470     return 1;
1471 }
1472 
ossl_qrx_pkt_release(OSSL_QRX_PKT * pkt)1473 void ossl_qrx_pkt_release(OSSL_QRX_PKT *pkt)
1474 {
1475     RXE *rxe;
1476 
1477     if (pkt == NULL)
1478         return;
1479 
1480     rxe = (RXE *)pkt;
1481     assert(rxe->refcount > 0);
1482     if (--rxe->refcount == 0)
1483         qrx_recycle_rxe(pkt->qrx, rxe);
1484 }
1485 
ossl_qrx_pkt_orphan(OSSL_QRX_PKT * pkt)1486 void ossl_qrx_pkt_orphan(OSSL_QRX_PKT *pkt)
1487 {
1488     RXE *rxe;
1489 
1490     if (pkt == NULL)
1491         return;
1492     rxe = (RXE *)pkt;
1493     assert(rxe->refcount > 0);
1494     rxe->refcount--;
1495     assert(ossl_list_rxe_prev(rxe) == NULL && ossl_list_rxe_next(rxe) == NULL);
1496     return;
1497 }
1498 
ossl_qrx_pkt_up_ref(OSSL_QRX_PKT * pkt)1499 void ossl_qrx_pkt_up_ref(OSSL_QRX_PKT *pkt)
1500 {
1501     RXE *rxe = (RXE *)pkt;
1502 
1503     assert(rxe->refcount > 0);
1504     ++rxe->refcount;
1505 }
1506 
ossl_qrx_get_bytes_received(OSSL_QRX * qrx,int clear)1507 uint64_t ossl_qrx_get_bytes_received(OSSL_QRX *qrx, int clear)
1508 {
1509     uint64_t v = qrx->bytes_received;
1510 
1511     if (clear)
1512         qrx->bytes_received = 0;
1513 
1514     return v;
1515 }
1516 
ossl_qrx_set_late_validation_cb(OSSL_QRX * qrx,ossl_qrx_late_validation_cb * cb,void * cb_arg)1517 int ossl_qrx_set_late_validation_cb(OSSL_QRX *qrx,
1518                                     ossl_qrx_late_validation_cb *cb,
1519                                     void *cb_arg)
1520 {
1521     qrx->validation_cb       = cb;
1522     qrx->validation_cb_arg   = cb_arg;
1523     return 1;
1524 }
1525 
ossl_qrx_set_key_update_cb(OSSL_QRX * qrx,ossl_qrx_key_update_cb * cb,void * cb_arg)1526 int ossl_qrx_set_key_update_cb(OSSL_QRX *qrx,
1527                                ossl_qrx_key_update_cb *cb,
1528                                void *cb_arg)
1529 {
1530     qrx->key_update_cb      = cb;
1531     qrx->key_update_cb_arg  = cb_arg;
1532     return 1;
1533 }
1534 
ossl_qrx_get_key_epoch(OSSL_QRX * qrx)1535 uint64_t ossl_qrx_get_key_epoch(OSSL_QRX *qrx)
1536 {
1537     OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
1538                                                         QUIC_ENC_LEVEL_1RTT, 1);
1539 
1540     return el == NULL ? UINT64_MAX : el->key_epoch;
1541 }
1542 
ossl_qrx_key_update_timeout(OSSL_QRX * qrx,int normal)1543 int ossl_qrx_key_update_timeout(OSSL_QRX *qrx, int normal)
1544 {
1545     OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
1546                                                         QUIC_ENC_LEVEL_1RTT, 1);
1547 
1548     if (el == NULL)
1549         return 0;
1550 
1551     if (el->state == QRL_EL_STATE_PROV_UPDATING
1552         && !ossl_qrl_enc_level_set_key_update_done(&qrx->el_set,
1553                                                    QUIC_ENC_LEVEL_1RTT))
1554         return 0;
1555 
1556     if (normal && el->state == QRL_EL_STATE_PROV_COOLDOWN
1557         && !ossl_qrl_enc_level_set_key_cooldown_done(&qrx->el_set,
1558                                                      QUIC_ENC_LEVEL_1RTT))
1559         return 0;
1560 
1561     return 1;
1562 }
1563 
ossl_qrx_get_cur_forged_pkt_count(OSSL_QRX * qrx)1564 uint64_t ossl_qrx_get_cur_forged_pkt_count(OSSL_QRX *qrx)
1565 {
1566     return qrx->forged_pkt_count;
1567 }
1568 
ossl_qrx_get_max_forged_pkt_count(OSSL_QRX * qrx,uint32_t enc_level)1569 uint64_t ossl_qrx_get_max_forged_pkt_count(OSSL_QRX *qrx,
1570                                            uint32_t enc_level)
1571 {
1572     OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
1573                                                         enc_level, 1);
1574 
1575     return el == NULL ? UINT64_MAX
1576         : ossl_qrl_get_suite_max_forged_pkt(el->suite_id);
1577 }
1578 
ossl_qrx_allow_1rtt_processing(OSSL_QRX * qrx)1579 void ossl_qrx_allow_1rtt_processing(OSSL_QRX *qrx)
1580 {
1581     if (qrx->allow_1rtt)
1582         return;
1583 
1584     qrx->allow_1rtt = 1;
1585     qrx_requeue_deferred(qrx);
1586 }
1587 
ossl_qrx_set_msg_callback(OSSL_QRX * qrx,ossl_msg_cb msg_callback,SSL * msg_callback_ssl)1588 void ossl_qrx_set_msg_callback(OSSL_QRX *qrx, ossl_msg_cb msg_callback,
1589                                SSL *msg_callback_ssl)
1590 {
1591     qrx->msg_callback = msg_callback;
1592     qrx->msg_callback_ssl = msg_callback_ssl;
1593 }
1594 
ossl_qrx_set_msg_callback_arg(OSSL_QRX * qrx,void * msg_callback_arg)1595 void ossl_qrx_set_msg_callback_arg(OSSL_QRX *qrx, void *msg_callback_arg)
1596 {
1597     qrx->msg_callback_arg = msg_callback_arg;
1598 }
1599 
ossl_qrx_get_short_hdr_conn_id_len(OSSL_QRX * qrx)1600 size_t ossl_qrx_get_short_hdr_conn_id_len(OSSL_QRX *qrx)
1601 {
1602     return qrx->short_conn_id_len;
1603 }
1604