1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2021 Netflix Inc. 5 * Written by: John Baldwin <jhb@FreeBSD.org> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/types.h> 30 #include <sys/endian.h> 31 #include <sys/event.h> 32 #include <sys/ktls.h> 33 #include <sys/socket.h> 34 #include <sys/sysctl.h> 35 #include <netinet/in.h> 36 #include <netinet/tcp.h> 37 #include <crypto/cryptodev.h> 38 #include <assert.h> 39 #include <err.h> 40 #include <fcntl.h> 41 #include <poll.h> 42 #include <stdbool.h> 43 #include <stdlib.h> 44 #include <atf-c.h> 45 46 #include <openssl/err.h> 47 #include <openssl/evp.h> 48 #include <openssl/hmac.h> 49 50 static void 51 require_ktls(void) 52 { 53 size_t len; 54 bool enable; 55 56 len = sizeof(enable); 57 if (sysctlbyname("kern.ipc.tls.enable", &enable, &len, NULL, 0) == -1) { 58 if (errno == ENOENT) 59 atf_tc_skip("kernel does not support TLS offload"); 60 atf_libc_error(errno, "Failed to read kern.ipc.tls.enable"); 61 } 62 63 if (!enable) 64 atf_tc_skip("Kernel TLS is disabled"); 65 } 66 67 #define ATF_REQUIRE_KTLS() require_ktls() 68 69 static char 70 rdigit(void) 71 { 72 /* ASCII printable values between 0x20 and 0x7e */ 73 return (0x20 + random() % (0x7f - 0x20)); 74 } 75 76 static char * 77 alloc_buffer(size_t len) 78 { 79 char *buf; 80 size_t i; 81 82 if (len == 0) 83 return (NULL); 84 buf = malloc(len); 85 for (i = 0; i < len; i++) 86 buf[i] = rdigit(); 87 return (buf); 88 } 89 90 static bool 91 socketpair_tcp(int *sv) 92 { 93 struct pollfd pfd; 94 struct sockaddr_in sin; 95 socklen_t len; 96 int as, cs, ls; 97 98 ls = socket(PF_INET, SOCK_STREAM, 0); 99 if (ls == -1) { 100 warn("socket() for listen"); 101 return (false); 102 } 103 104 memset(&sin, 0, sizeof(sin)); 105 sin.sin_len = sizeof(sin); 106 sin.sin_family = AF_INET; 107 sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); 108 if (bind(ls, (struct sockaddr *)&sin, sizeof(sin)) == -1) { 109 warn("bind"); 110 close(ls); 111 return (false); 112 } 113 114 if (listen(ls, 1) == -1) { 115 warn("listen"); 116 close(ls); 117 return (false); 118 } 119 120 len = sizeof(sin); 121 if (getsockname(ls, (struct sockaddr *)&sin, &len) == -1) { 122 warn("getsockname"); 123 close(ls); 124 return (false); 125 } 126 127 cs = socket(PF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0); 128 if (cs == -1) { 129 warn("socket() for connect"); 130 close(ls); 131 return (false); 132 } 133 134 if (connect(cs, (struct sockaddr *)&sin, sizeof(sin)) == -1) { 135 if (errno != EINPROGRESS) { 136 warn("connect"); 137 close(ls); 138 close(cs); 139 return (false); 140 } 141 } 142 143 as = accept4(ls, NULL, NULL, SOCK_NONBLOCK); 144 if (as == -1) { 145 warn("accept4"); 146 close(ls); 147 close(cs); 148 return (false); 149 } 150 151 close(ls); 152 153 pfd.fd = cs; 154 pfd.events = POLLOUT; 155 pfd.revents = 0; 156 ATF_REQUIRE(poll(&pfd, 1, INFTIM) == 1); 157 ATF_REQUIRE(pfd.revents == POLLOUT); 158 159 sv[0] = cs; 160 sv[1] = as; 161 return (true); 162 } 163 164 static void 165 fd_set_blocking(int fd) 166 { 167 int flags; 168 169 ATF_REQUIRE((flags = fcntl(fd, F_GETFL)) != -1); 170 flags &= ~O_NONBLOCK; 171 ATF_REQUIRE(fcntl(fd, F_SETFL, flags) != -1); 172 } 173 174 static bool 175 cbc_decrypt(const EVP_CIPHER *cipher, const char *key, const char *iv, 176 const char *input, char *output, size_t size) 177 { 178 EVP_CIPHER_CTX *ctx; 179 int outl, total; 180 181 ctx = EVP_CIPHER_CTX_new(); 182 if (ctx == NULL) { 183 warnx("EVP_CIPHER_CTX_new failed: %s", 184 ERR_error_string(ERR_get_error(), NULL)); 185 return (false); 186 } 187 if (EVP_CipherInit_ex(ctx, cipher, NULL, (const u_char *)key, 188 (const u_char *)iv, 0) != 1) { 189 warnx("EVP_CipherInit_ex failed: %s", 190 ERR_error_string(ERR_get_error(), NULL)); 191 EVP_CIPHER_CTX_free(ctx); 192 return (false); 193 } 194 EVP_CIPHER_CTX_set_padding(ctx, 0); 195 if (EVP_CipherUpdate(ctx, (u_char *)output, &outl, 196 (const u_char *)input, size) != 1) { 197 warnx("EVP_CipherUpdate failed: %s", 198 ERR_error_string(ERR_get_error(), NULL)); 199 EVP_CIPHER_CTX_free(ctx); 200 return (false); 201 } 202 total = outl; 203 if (EVP_CipherFinal_ex(ctx, (u_char *)output + outl, &outl) != 1) { 204 warnx("EVP_CipherFinal_ex failed: %s", 205 ERR_error_string(ERR_get_error(), NULL)); 206 EVP_CIPHER_CTX_free(ctx); 207 return (false); 208 } 209 total += outl; 210 if ((size_t)total != size) { 211 warnx("decrypt size mismatch: %zu vs %d", size, total); 212 EVP_CIPHER_CTX_free(ctx); 213 return (false); 214 } 215 EVP_CIPHER_CTX_free(ctx); 216 return (true); 217 } 218 219 static bool 220 verify_hash(const EVP_MD *md, const void *key, size_t key_len, const void *aad, 221 size_t aad_len, const void *buffer, size_t len, const void *digest) 222 { 223 HMAC_CTX *ctx; 224 unsigned char digest2[EVP_MAX_MD_SIZE]; 225 u_int digest_len; 226 227 ctx = HMAC_CTX_new(); 228 if (ctx == NULL) { 229 warnx("HMAC_CTX_new failed: %s", 230 ERR_error_string(ERR_get_error(), NULL)); 231 return (false); 232 } 233 if (HMAC_Init_ex(ctx, key, key_len, md, NULL) != 1) { 234 warnx("HMAC_Init_ex failed: %s", 235 ERR_error_string(ERR_get_error(), NULL)); 236 HMAC_CTX_free(ctx); 237 return (false); 238 } 239 if (HMAC_Update(ctx, aad, aad_len) != 1) { 240 warnx("HMAC_Update (aad) failed: %s", 241 ERR_error_string(ERR_get_error(), NULL)); 242 HMAC_CTX_free(ctx); 243 return (false); 244 } 245 if (HMAC_Update(ctx, buffer, len) != 1) { 246 warnx("HMAC_Update (payload) failed: %s", 247 ERR_error_string(ERR_get_error(), NULL)); 248 HMAC_CTX_free(ctx); 249 return (false); 250 } 251 if (HMAC_Final(ctx, digest2, &digest_len) != 1) { 252 warnx("HMAC_Final failed: %s", 253 ERR_error_string(ERR_get_error(), NULL)); 254 HMAC_CTX_free(ctx); 255 return (false); 256 } 257 HMAC_CTX_free(ctx); 258 if (memcmp(digest, digest2, digest_len) != 0) { 259 warnx("HMAC mismatch"); 260 return (false); 261 } 262 return (true); 263 } 264 265 static bool 266 aead_encrypt(const EVP_CIPHER *cipher, const char *key, const char *nonce, 267 const void *aad, size_t aad_len, const char *input, char *output, 268 size_t size, char *tag, size_t tag_len) 269 { 270 EVP_CIPHER_CTX *ctx; 271 int outl, total; 272 273 ctx = EVP_CIPHER_CTX_new(); 274 if (ctx == NULL) { 275 warnx("EVP_CIPHER_CTX_new failed: %s", 276 ERR_error_string(ERR_get_error(), NULL)); 277 return (false); 278 } 279 if (EVP_EncryptInit_ex(ctx, cipher, NULL, (const u_char *)key, 280 (const u_char *)nonce) != 1) { 281 warnx("EVP_EncryptInit_ex failed: %s", 282 ERR_error_string(ERR_get_error(), NULL)); 283 EVP_CIPHER_CTX_free(ctx); 284 return (false); 285 } 286 EVP_CIPHER_CTX_set_padding(ctx, 0); 287 if (aad != NULL) { 288 if (EVP_EncryptUpdate(ctx, NULL, &outl, (const u_char *)aad, 289 aad_len) != 1) { 290 warnx("EVP_EncryptUpdate for AAD failed: %s", 291 ERR_error_string(ERR_get_error(), NULL)); 292 EVP_CIPHER_CTX_free(ctx); 293 return (false); 294 } 295 } 296 if (EVP_EncryptUpdate(ctx, (u_char *)output, &outl, 297 (const u_char *)input, size) != 1) { 298 warnx("EVP_EncryptUpdate failed: %s", 299 ERR_error_string(ERR_get_error(), NULL)); 300 EVP_CIPHER_CTX_free(ctx); 301 return (false); 302 } 303 total = outl; 304 if (EVP_EncryptFinal_ex(ctx, (u_char *)output + outl, &outl) != 1) { 305 warnx("EVP_EncryptFinal_ex failed: %s", 306 ERR_error_string(ERR_get_error(), NULL)); 307 EVP_CIPHER_CTX_free(ctx); 308 return (false); 309 } 310 total += outl; 311 if ((size_t)total != size) { 312 warnx("encrypt size mismatch: %zu vs %d", size, total); 313 EVP_CIPHER_CTX_free(ctx); 314 return (false); 315 } 316 if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, tag_len, tag) != 317 1) { 318 warnx("EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_GET_TAG) failed: %s", 319 ERR_error_string(ERR_get_error(), NULL)); 320 EVP_CIPHER_CTX_free(ctx); 321 return (false); 322 } 323 EVP_CIPHER_CTX_free(ctx); 324 return (true); 325 } 326 327 static bool 328 aead_decrypt(const EVP_CIPHER *cipher, const char *key, const char *nonce, 329 const void *aad, size_t aad_len, const char *input, char *output, 330 size_t size, const char *tag, size_t tag_len) 331 { 332 EVP_CIPHER_CTX *ctx; 333 int outl, total; 334 bool valid; 335 336 ctx = EVP_CIPHER_CTX_new(); 337 if (ctx == NULL) { 338 warnx("EVP_CIPHER_CTX_new failed: %s", 339 ERR_error_string(ERR_get_error(), NULL)); 340 return (false); 341 } 342 if (EVP_DecryptInit_ex(ctx, cipher, NULL, (const u_char *)key, 343 (const u_char *)nonce) != 1) { 344 warnx("EVP_DecryptInit_ex failed: %s", 345 ERR_error_string(ERR_get_error(), NULL)); 346 EVP_CIPHER_CTX_free(ctx); 347 return (false); 348 } 349 EVP_CIPHER_CTX_set_padding(ctx, 0); 350 if (aad != NULL) { 351 if (EVP_DecryptUpdate(ctx, NULL, &outl, (const u_char *)aad, 352 aad_len) != 1) { 353 warnx("EVP_DecryptUpdate for AAD failed: %s", 354 ERR_error_string(ERR_get_error(), NULL)); 355 EVP_CIPHER_CTX_free(ctx); 356 return (false); 357 } 358 } 359 if (EVP_DecryptUpdate(ctx, (u_char *)output, &outl, 360 (const u_char *)input, size) != 1) { 361 warnx("EVP_DecryptUpdate failed: %s", 362 ERR_error_string(ERR_get_error(), NULL)); 363 EVP_CIPHER_CTX_free(ctx); 364 return (false); 365 } 366 total = outl; 367 if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, tag_len, 368 __DECONST(char *, tag)) != 1) { 369 warnx("EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_TAG) failed: %s", 370 ERR_error_string(ERR_get_error(), NULL)); 371 EVP_CIPHER_CTX_free(ctx); 372 return (false); 373 } 374 valid = (EVP_DecryptFinal_ex(ctx, (u_char *)output + outl, &outl) == 1); 375 total += outl; 376 if ((size_t)total != size) { 377 warnx("decrypt size mismatch: %zu vs %d", size, total); 378 EVP_CIPHER_CTX_free(ctx); 379 return (false); 380 } 381 if (!valid) 382 warnx("tag mismatch"); 383 EVP_CIPHER_CTX_free(ctx); 384 return (valid); 385 } 386 387 static void 388 build_tls_enable(int cipher_alg, size_t cipher_key_len, int auth_alg, 389 int minor, uint64_t seqno, struct tls_enable *en) 390 { 391 u_int auth_key_len, iv_len; 392 393 memset(en, 0, sizeof(*en)); 394 395 switch (cipher_alg) { 396 case CRYPTO_AES_CBC: 397 if (minor == TLS_MINOR_VER_ZERO) 398 iv_len = AES_BLOCK_LEN; 399 else 400 iv_len = 0; 401 break; 402 case CRYPTO_AES_NIST_GCM_16: 403 if (minor == TLS_MINOR_VER_TWO) 404 iv_len = TLS_AEAD_GCM_LEN; 405 else 406 iv_len = TLS_1_3_GCM_IV_LEN; 407 break; 408 case CRYPTO_CHACHA20_POLY1305: 409 iv_len = TLS_CHACHA20_IV_LEN; 410 break; 411 default: 412 iv_len = 0; 413 break; 414 } 415 switch (auth_alg) { 416 case CRYPTO_SHA1_HMAC: 417 auth_key_len = SHA1_HASH_LEN; 418 break; 419 case CRYPTO_SHA2_256_HMAC: 420 auth_key_len = SHA2_256_HASH_LEN; 421 break; 422 case CRYPTO_SHA2_384_HMAC: 423 auth_key_len = SHA2_384_HASH_LEN; 424 break; 425 default: 426 auth_key_len = 0; 427 break; 428 } 429 en->cipher_key = alloc_buffer(cipher_key_len); 430 en->iv = alloc_buffer(iv_len); 431 en->auth_key = alloc_buffer(auth_key_len); 432 en->cipher_algorithm = cipher_alg; 433 en->cipher_key_len = cipher_key_len; 434 en->iv_len = iv_len; 435 en->auth_algorithm = auth_alg; 436 en->auth_key_len = auth_key_len; 437 en->tls_vmajor = TLS_MAJOR_VER_ONE; 438 en->tls_vminor = minor; 439 be64enc(en->rec_seq, seqno); 440 } 441 442 static void 443 free_tls_enable(struct tls_enable *en) 444 { 445 free(__DECONST(void *, en->cipher_key)); 446 free(__DECONST(void *, en->iv)); 447 free(__DECONST(void *, en->auth_key)); 448 } 449 450 static const EVP_CIPHER * 451 tls_EVP_CIPHER(const struct tls_enable *en) 452 { 453 switch (en->cipher_algorithm) { 454 case CRYPTO_AES_CBC: 455 switch (en->cipher_key_len) { 456 case 128 / 8: 457 return (EVP_aes_128_cbc()); 458 case 256 / 8: 459 return (EVP_aes_256_cbc()); 460 default: 461 return (NULL); 462 } 463 break; 464 case CRYPTO_AES_NIST_GCM_16: 465 switch (en->cipher_key_len) { 466 case 128 / 8: 467 return (EVP_aes_128_gcm()); 468 case 256 / 8: 469 return (EVP_aes_256_gcm()); 470 default: 471 return (NULL); 472 } 473 break; 474 case CRYPTO_CHACHA20_POLY1305: 475 return (EVP_chacha20_poly1305()); 476 default: 477 return (NULL); 478 } 479 } 480 481 static const EVP_MD * 482 tls_EVP_MD(const struct tls_enable *en) 483 { 484 switch (en->auth_algorithm) { 485 case CRYPTO_SHA1_HMAC: 486 return (EVP_sha1()); 487 case CRYPTO_SHA2_256_HMAC: 488 return (EVP_sha256()); 489 case CRYPTO_SHA2_384_HMAC: 490 return (EVP_sha384()); 491 default: 492 return (NULL); 493 } 494 } 495 496 static size_t 497 tls_header_len(struct tls_enable *en) 498 { 499 size_t len; 500 501 len = sizeof(struct tls_record_layer); 502 switch (en->cipher_algorithm) { 503 case CRYPTO_AES_CBC: 504 if (en->tls_vminor != TLS_MINOR_VER_ZERO) 505 len += AES_BLOCK_LEN; 506 return (len); 507 case CRYPTO_AES_NIST_GCM_16: 508 if (en->tls_vminor == TLS_MINOR_VER_TWO) 509 len += sizeof(uint64_t); 510 return (len); 511 case CRYPTO_CHACHA20_POLY1305: 512 return (len); 513 default: 514 return (0); 515 } 516 } 517 518 static size_t 519 tls_mac_len(struct tls_enable *en) 520 { 521 switch (en->cipher_algorithm) { 522 case CRYPTO_AES_CBC: 523 switch (en->auth_algorithm) { 524 case CRYPTO_SHA1_HMAC: 525 return (SHA1_HASH_LEN); 526 case CRYPTO_SHA2_256_HMAC: 527 return (SHA2_256_HASH_LEN); 528 case CRYPTO_SHA2_384_HMAC: 529 return (SHA2_384_HASH_LEN); 530 default: 531 return (0); 532 } 533 case CRYPTO_AES_NIST_GCM_16: 534 return (AES_GMAC_HASH_LEN); 535 case CRYPTO_CHACHA20_POLY1305: 536 return (POLY1305_HASH_LEN); 537 default: 538 return (0); 539 } 540 } 541 542 /* Includes maximum padding for MTE. */ 543 static size_t 544 tls_trailer_len(struct tls_enable *en) 545 { 546 size_t len; 547 548 len = tls_mac_len(en); 549 if (en->cipher_algorithm == CRYPTO_AES_CBC) 550 len += AES_BLOCK_LEN; 551 if (en->tls_vminor == TLS_MINOR_VER_THREE) 552 len++; 553 return (len); 554 } 555 556 /* 'len' is the length of the payload application data. */ 557 static void 558 tls_mte_aad(struct tls_enable *en, size_t len, 559 const struct tls_record_layer *hdr, uint64_t seqno, struct tls_mac_data *ad) 560 { 561 ad->seq = htobe64(seqno); 562 ad->type = hdr->tls_type; 563 ad->tls_vmajor = hdr->tls_vmajor; 564 ad->tls_vminor = hdr->tls_vminor; 565 ad->tls_length = htons(len); 566 } 567 568 static void 569 tls_12_aead_aad(struct tls_enable *en, size_t len, 570 const struct tls_record_layer *hdr, uint64_t seqno, 571 struct tls_aead_data *ad) 572 { 573 ad->seq = htobe64(seqno); 574 ad->type = hdr->tls_type; 575 ad->tls_vmajor = hdr->tls_vmajor; 576 ad->tls_vminor = hdr->tls_vminor; 577 ad->tls_length = htons(len); 578 } 579 580 static void 581 tls_13_aad(struct tls_enable *en, const struct tls_record_layer *hdr, 582 uint64_t seqno, struct tls_aead_data_13 *ad) 583 { 584 ad->type = hdr->tls_type; 585 ad->tls_vmajor = hdr->tls_vmajor; 586 ad->tls_vminor = hdr->tls_vminor; 587 ad->tls_length = hdr->tls_length; 588 } 589 590 static void 591 tls_12_gcm_nonce(struct tls_enable *en, const struct tls_record_layer *hdr, 592 char *nonce) 593 { 594 memcpy(nonce, en->iv, TLS_AEAD_GCM_LEN); 595 memcpy(nonce + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t)); 596 } 597 598 static void 599 tls_13_nonce(struct tls_enable *en, uint64_t seqno, char *nonce) 600 { 601 static_assert(TLS_1_3_GCM_IV_LEN == TLS_CHACHA20_IV_LEN, 602 "TLS 1.3 nonce length mismatch"); 603 memcpy(nonce, en->iv, TLS_1_3_GCM_IV_LEN); 604 *(uint64_t *)(nonce + 4) ^= htobe64(seqno); 605 } 606 607 /* 608 * Decrypt a TLS record 'len' bytes long at 'src' and store the result at 609 * 'dst'. If the TLS record header length doesn't match or 'dst' doesn't 610 * have sufficient room ('avail'), fail the test. 611 */ 612 static size_t 613 decrypt_tls_aes_cbc_mte(struct tls_enable *en, uint64_t seqno, const void *src, 614 size_t len, void *dst, size_t avail, uint8_t *record_type) 615 { 616 const struct tls_record_layer *hdr; 617 struct tls_mac_data aad; 618 const char *iv; 619 char *buf; 620 size_t hdr_len, mac_len, payload_len; 621 int padding; 622 623 hdr = src; 624 hdr_len = tls_header_len(en); 625 mac_len = tls_mac_len(en); 626 ATF_REQUIRE(hdr->tls_vmajor == TLS_MAJOR_VER_ONE); 627 ATF_REQUIRE(hdr->tls_vminor == en->tls_vminor); 628 629 /* First, decrypt the outer payload into a temporary buffer. */ 630 payload_len = len - hdr_len; 631 buf = malloc(payload_len); 632 if (en->tls_vminor == TLS_MINOR_VER_ZERO) 633 iv = en->iv; 634 else 635 iv = (void *)(hdr + 1); 636 ATF_REQUIRE(cbc_decrypt(tls_EVP_CIPHER(en), en->cipher_key, iv, 637 (const u_char *)src + hdr_len, buf, payload_len)); 638 639 /* 640 * Copy the last encrypted block to use as the IV for the next 641 * record for TLS 1.0. 642 */ 643 if (en->tls_vminor == TLS_MINOR_VER_ZERO) 644 memcpy(__DECONST(uint8_t *, en->iv), (const u_char *)src + 645 (len - AES_BLOCK_LEN), AES_BLOCK_LEN); 646 647 /* 648 * Verify trailing padding and strip. 649 * 650 * The kernel always generates the smallest amount of padding. 651 */ 652 padding = buf[payload_len - 1] + 1; 653 ATF_REQUIRE(padding > 0 && padding <= AES_BLOCK_LEN); 654 ATF_REQUIRE(payload_len >= mac_len + padding); 655 payload_len -= padding; 656 657 /* Verify HMAC. */ 658 payload_len -= mac_len; 659 tls_mte_aad(en, payload_len, hdr, seqno, &aad); 660 ATF_REQUIRE(verify_hash(tls_EVP_MD(en), en->auth_key, en->auth_key_len, 661 &aad, sizeof(aad), buf, payload_len, buf + payload_len)); 662 663 ATF_REQUIRE(payload_len <= avail); 664 memcpy(dst, buf, payload_len); 665 *record_type = hdr->tls_type; 666 return (payload_len); 667 } 668 669 static size_t 670 decrypt_tls_12_aead(struct tls_enable *en, uint64_t seqno, const void *src, 671 size_t len, void *dst, uint8_t *record_type) 672 { 673 const struct tls_record_layer *hdr; 674 struct tls_aead_data aad; 675 char nonce[12]; 676 size_t hdr_len, mac_len, payload_len; 677 678 hdr = src; 679 680 hdr_len = tls_header_len(en); 681 mac_len = tls_mac_len(en); 682 payload_len = len - (hdr_len + mac_len); 683 ATF_REQUIRE(hdr->tls_vmajor == TLS_MAJOR_VER_ONE); 684 ATF_REQUIRE(hdr->tls_vminor == TLS_MINOR_VER_TWO); 685 686 tls_12_aead_aad(en, payload_len, hdr, seqno, &aad); 687 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 688 tls_12_gcm_nonce(en, hdr, nonce); 689 else 690 tls_13_nonce(en, seqno, nonce); 691 692 ATF_REQUIRE(aead_decrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 693 &aad, sizeof(aad), (const char *)src + hdr_len, dst, payload_len, 694 (const char *)src + hdr_len + payload_len, mac_len)); 695 696 *record_type = hdr->tls_type; 697 return (payload_len); 698 } 699 700 static size_t 701 decrypt_tls_13_aead(struct tls_enable *en, uint64_t seqno, const void *src, 702 size_t len, void *dst, uint8_t *record_type) 703 { 704 const struct tls_record_layer *hdr; 705 struct tls_aead_data_13 aad; 706 char nonce[12]; 707 char *buf; 708 size_t hdr_len, mac_len, payload_len; 709 710 hdr = src; 711 712 hdr_len = tls_header_len(en); 713 mac_len = tls_mac_len(en); 714 payload_len = len - (hdr_len + mac_len); 715 ATF_REQUIRE(payload_len >= 1); 716 ATF_REQUIRE(hdr->tls_type == TLS_RLTYPE_APP); 717 ATF_REQUIRE(hdr->tls_vmajor == TLS_MAJOR_VER_ONE); 718 ATF_REQUIRE(hdr->tls_vminor == TLS_MINOR_VER_TWO); 719 720 tls_13_aad(en, hdr, seqno, &aad); 721 tls_13_nonce(en, seqno, nonce); 722 723 /* 724 * Have to use a temporary buffer for the output due to the 725 * record type as the last byte of the trailer. 726 */ 727 buf = malloc(payload_len); 728 729 ATF_REQUIRE(aead_decrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 730 &aad, sizeof(aad), (const char *)src + hdr_len, buf, payload_len, 731 (const char *)src + hdr_len + payload_len, mac_len)); 732 733 /* Trim record type. */ 734 *record_type = buf[payload_len - 1]; 735 payload_len--; 736 737 memcpy(dst, buf, payload_len); 738 free(buf); 739 740 return (payload_len); 741 } 742 743 static size_t 744 decrypt_tls_aead(struct tls_enable *en, uint64_t seqno, const void *src, 745 size_t len, void *dst, size_t avail, uint8_t *record_type) 746 { 747 const struct tls_record_layer *hdr; 748 size_t payload_len; 749 750 hdr = src; 751 ATF_REQUIRE(ntohs(hdr->tls_length) + sizeof(*hdr) == len); 752 753 payload_len = len - (tls_header_len(en) + tls_trailer_len(en)); 754 ATF_REQUIRE(payload_len <= avail); 755 756 if (en->tls_vminor == TLS_MINOR_VER_TWO) { 757 ATF_REQUIRE(decrypt_tls_12_aead(en, seqno, src, len, dst, 758 record_type) == payload_len); 759 } else { 760 ATF_REQUIRE(decrypt_tls_13_aead(en, seqno, src, len, dst, 761 record_type) == payload_len); 762 } 763 764 return (payload_len); 765 } 766 767 static size_t 768 decrypt_tls_record(struct tls_enable *en, uint64_t seqno, const void *src, 769 size_t len, void *dst, size_t avail, uint8_t *record_type) 770 { 771 if (en->cipher_algorithm == CRYPTO_AES_CBC) 772 return (decrypt_tls_aes_cbc_mte(en, seqno, src, len, dst, avail, 773 record_type)); 774 else 775 return (decrypt_tls_aead(en, seqno, src, len, dst, avail, 776 record_type)); 777 } 778 779 /* 780 * Encrypt a TLS record of type 'record_type' with payload 'len' bytes 781 * long at 'src' and store the result at 'dst'. If 'dst' doesn't have 782 * sufficient room ('avail'), fail the test. 783 */ 784 static size_t 785 encrypt_tls_12_aead(struct tls_enable *en, uint8_t record_type, uint64_t seqno, 786 const void *src, size_t len, void *dst) 787 { 788 struct tls_record_layer *hdr; 789 struct tls_aead_data aad; 790 char nonce[12]; 791 size_t hdr_len, mac_len, record_len; 792 793 hdr = dst; 794 795 hdr_len = tls_header_len(en); 796 mac_len = tls_mac_len(en); 797 record_len = hdr_len + len + mac_len; 798 799 hdr->tls_type = record_type; 800 hdr->tls_vmajor = TLS_MAJOR_VER_ONE; 801 hdr->tls_vminor = TLS_MINOR_VER_TWO; 802 hdr->tls_length = htons(record_len - sizeof(*hdr)); 803 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 804 memcpy(hdr + 1, &seqno, sizeof(seqno)); 805 806 tls_12_aead_aad(en, len, hdr, seqno, &aad); 807 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 808 tls_12_gcm_nonce(en, hdr, nonce); 809 else 810 tls_13_nonce(en, seqno, nonce); 811 812 ATF_REQUIRE(aead_encrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 813 &aad, sizeof(aad), src, (char *)dst + hdr_len, len, 814 (char *)dst + hdr_len + len, mac_len)); 815 816 return (record_len); 817 } 818 819 static size_t 820 encrypt_tls_aead(struct tls_enable *en, uint8_t record_type, uint64_t seqno, 821 const void *src, size_t len, void *dst, size_t avail) 822 { 823 size_t record_len; 824 825 record_len = tls_header_len(en) + len + tls_trailer_len(en); 826 ATF_REQUIRE(record_len <= avail); 827 828 ATF_REQUIRE(encrypt_tls_12_aead(en, record_type, seqno, src, len, 829 dst) == record_len); 830 831 return (record_len); 832 } 833 834 static size_t 835 encrypt_tls_record(struct tls_enable *en, uint8_t record_type, uint64_t seqno, 836 const void *src, size_t len, void *dst, size_t avail) 837 { 838 return (encrypt_tls_aead(en, record_type, seqno, src, len, dst, avail)); 839 } 840 841 static void 842 test_ktls_transmit_app_data(struct tls_enable *en, uint64_t seqno, size_t len) 843 { 844 struct kevent ev; 845 struct tls_record_layer *hdr; 846 char *plaintext, *decrypted, *outbuf; 847 size_t decrypted_len, outbuf_len, outbuf_cap, record_len, written; 848 ssize_t rv; 849 int kq, sockets[2]; 850 uint8_t record_type; 851 852 plaintext = alloc_buffer(len); 853 decrypted = malloc(len); 854 outbuf_cap = tls_header_len(en) + TLS_MAX_MSG_SIZE_V10_2 + 855 tls_trailer_len(en); 856 outbuf = malloc(outbuf_cap); 857 hdr = (struct tls_record_layer *)outbuf; 858 859 ATF_REQUIRE((kq = kqueue()) != -1); 860 861 ATF_REQUIRE_MSG(socketpair_tcp(sockets), "failed to create sockets"); 862 863 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en, 864 sizeof(*en)) == 0); 865 866 EV_SET(&ev, sockets[0], EVFILT_READ, EV_ADD, 0, 0, NULL); 867 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 868 EV_SET(&ev, sockets[1], EVFILT_WRITE, EV_ADD, 0, 0, NULL); 869 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 870 871 decrypted_len = 0; 872 outbuf_len = 0; 873 written = 0; 874 875 while (decrypted_len != len) { 876 ATF_REQUIRE(kevent(kq, NULL, 0, &ev, 1, NULL) == 1); 877 878 switch (ev.filter) { 879 case EVFILT_WRITE: 880 /* Try to write any remaining data. */ 881 rv = write(ev.ident, plaintext + written, 882 len - written); 883 ATF_REQUIRE_MSG(rv > 0, 884 "failed to write to socket"); 885 written += rv; 886 if (written == len) { 887 ev.flags = EV_DISABLE; 888 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, 889 NULL) == 0); 890 } 891 break; 892 893 case EVFILT_READ: 894 ATF_REQUIRE((ev.flags & EV_EOF) == 0); 895 896 /* 897 * Try to read data for the next TLS record 898 * into outbuf. Start by reading the header 899 * to determine how much additional data to 900 * read. 901 */ 902 if (outbuf_len < sizeof(struct tls_record_layer)) { 903 rv = read(ev.ident, outbuf + outbuf_len, 904 sizeof(struct tls_record_layer) - 905 outbuf_len); 906 ATF_REQUIRE_MSG(rv > 0, 907 "failed to read from socket"); 908 outbuf_len += rv; 909 } 910 911 if (outbuf_len < sizeof(struct tls_record_layer)) 912 break; 913 914 record_len = sizeof(struct tls_record_layer) + 915 ntohs(hdr->tls_length); 916 ATF_REQUIRE(record_len <= outbuf_cap); 917 ATF_REQUIRE(record_len > outbuf_len); 918 rv = read(ev.ident, outbuf + outbuf_len, 919 record_len - outbuf_len); 920 if (rv == -1 && errno == EAGAIN) 921 break; 922 ATF_REQUIRE_MSG(rv > 0, "failed to read from socket"); 923 924 outbuf_len += rv; 925 if (outbuf_len == record_len) { 926 decrypted_len += decrypt_tls_record(en, seqno, 927 outbuf, outbuf_len, 928 decrypted + decrypted_len, 929 len - decrypted_len, &record_type); 930 ATF_REQUIRE(record_type == TLS_RLTYPE_APP); 931 932 seqno++; 933 outbuf_len = 0; 934 } 935 break; 936 } 937 } 938 939 ATF_REQUIRE_MSG(written == decrypted_len, 940 "read %zu decrypted bytes, but wrote %zu", decrypted_len, written); 941 942 ATF_REQUIRE(memcmp(plaintext, decrypted, len) == 0); 943 944 free(outbuf); 945 free(decrypted); 946 free(plaintext); 947 948 close(sockets[1]); 949 close(sockets[0]); 950 close(kq); 951 } 952 953 static void 954 ktls_send_control_message(int fd, uint8_t type, void *data, size_t len) 955 { 956 struct msghdr msg; 957 struct cmsghdr *cmsg; 958 char cbuf[CMSG_SPACE(sizeof(type))]; 959 struct iovec iov; 960 961 memset(&msg, 0, sizeof(msg)); 962 963 msg.msg_control = cbuf; 964 msg.msg_controllen = sizeof(cbuf); 965 cmsg = CMSG_FIRSTHDR(&msg); 966 cmsg->cmsg_level = IPPROTO_TCP; 967 cmsg->cmsg_type = TLS_SET_RECORD_TYPE; 968 cmsg->cmsg_len = CMSG_LEN(sizeof(type)); 969 *(uint8_t *)CMSG_DATA(cmsg) = type; 970 971 iov.iov_base = data; 972 iov.iov_len = len; 973 msg.msg_iov = &iov; 974 msg.msg_iovlen = 1; 975 976 ATF_REQUIRE(sendmsg(fd, &msg, 0) == (ssize_t)len); 977 } 978 979 static void 980 test_ktls_transmit_control(struct tls_enable *en, uint64_t seqno, uint8_t type, 981 size_t len) 982 { 983 struct tls_record_layer *hdr; 984 char *plaintext, *decrypted, *outbuf; 985 size_t outbuf_cap, payload_len, record_len; 986 ssize_t rv; 987 int sockets[2]; 988 uint8_t record_type; 989 990 ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2); 991 992 plaintext = alloc_buffer(len); 993 decrypted = malloc(len); 994 outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en); 995 outbuf = malloc(outbuf_cap); 996 hdr = (struct tls_record_layer *)outbuf; 997 998 ATF_REQUIRE_MSG(socketpair_tcp(sockets), "failed to create sockets"); 999 1000 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en, 1001 sizeof(*en)) == 0); 1002 1003 fd_set_blocking(sockets[0]); 1004 fd_set_blocking(sockets[1]); 1005 1006 ktls_send_control_message(sockets[1], type, plaintext, len); 1007 1008 /* 1009 * First read the header to determine how much additional data 1010 * to read. 1011 */ 1012 rv = read(sockets[0], outbuf, sizeof(struct tls_record_layer)); 1013 ATF_REQUIRE(rv == sizeof(struct tls_record_layer)); 1014 payload_len = ntohs(hdr->tls_length); 1015 record_len = payload_len + sizeof(struct tls_record_layer); 1016 ATF_REQUIRE(record_len <= outbuf_cap); 1017 rv = read(sockets[0], outbuf + sizeof(struct tls_record_layer), 1018 payload_len); 1019 ATF_REQUIRE(rv == (ssize_t)payload_len); 1020 1021 rv = decrypt_tls_record(en, seqno, outbuf, record_len, decrypted, len, 1022 &record_type); 1023 1024 ATF_REQUIRE_MSG((ssize_t)len == rv, 1025 "read %zd decrypted bytes, but wrote %zu", rv, len); 1026 ATF_REQUIRE(record_type == type); 1027 1028 ATF_REQUIRE(memcmp(plaintext, decrypted, len) == 0); 1029 1030 free(outbuf); 1031 free(decrypted); 1032 free(plaintext); 1033 1034 close(sockets[1]); 1035 close(sockets[0]); 1036 } 1037 1038 static void 1039 test_ktls_transmit_empty_fragment(struct tls_enable *en, uint64_t seqno) 1040 { 1041 struct tls_record_layer *hdr; 1042 char *outbuf; 1043 size_t outbuf_cap, payload_len, record_len; 1044 ssize_t rv; 1045 int sockets[2]; 1046 uint8_t record_type; 1047 1048 outbuf_cap = tls_header_len(en) + tls_trailer_len(en); 1049 outbuf = malloc(outbuf_cap); 1050 hdr = (struct tls_record_layer *)outbuf; 1051 1052 ATF_REQUIRE_MSG(socketpair_tcp(sockets), "failed to create sockets"); 1053 1054 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en, 1055 sizeof(*en)) == 0); 1056 1057 fd_set_blocking(sockets[0]); 1058 fd_set_blocking(sockets[1]); 1059 1060 /* A write of zero bytes should send an empty fragment. */ 1061 rv = write(sockets[1], NULL, 0); 1062 ATF_REQUIRE(rv == 0); 1063 1064 /* 1065 * First read the header to determine how much additional data 1066 * to read. 1067 */ 1068 rv = read(sockets[0], outbuf, sizeof(struct tls_record_layer)); 1069 ATF_REQUIRE(rv == sizeof(struct tls_record_layer)); 1070 payload_len = ntohs(hdr->tls_length); 1071 record_len = payload_len + sizeof(struct tls_record_layer); 1072 ATF_REQUIRE(record_len <= outbuf_cap); 1073 rv = read(sockets[0], outbuf + sizeof(struct tls_record_layer), 1074 payload_len); 1075 ATF_REQUIRE(rv == (ssize_t)payload_len); 1076 1077 rv = decrypt_tls_record(en, seqno, outbuf, record_len, NULL, 0, 1078 &record_type); 1079 1080 ATF_REQUIRE_MSG(rv == 0, 1081 "read %zd decrypted bytes for an empty fragment", rv); 1082 ATF_REQUIRE(record_type == TLS_RLTYPE_APP); 1083 1084 free(outbuf); 1085 1086 close(sockets[1]); 1087 close(sockets[0]); 1088 } 1089 1090 static size_t 1091 ktls_receive_tls_record(struct tls_enable *en, int fd, uint8_t record_type, 1092 void *data, size_t len) 1093 { 1094 struct msghdr msg; 1095 struct cmsghdr *cmsg; 1096 struct tls_get_record *tgr; 1097 char cbuf[CMSG_SPACE(sizeof(*tgr))]; 1098 struct iovec iov; 1099 ssize_t rv; 1100 1101 memset(&msg, 0, sizeof(msg)); 1102 1103 msg.msg_control = cbuf; 1104 msg.msg_controllen = sizeof(cbuf); 1105 1106 iov.iov_base = data; 1107 iov.iov_len = len; 1108 msg.msg_iov = &iov; 1109 msg.msg_iovlen = 1; 1110 1111 ATF_REQUIRE((rv = recvmsg(fd, &msg, 0)) > 0); 1112 1113 ATF_REQUIRE((msg.msg_flags & (MSG_EOR | MSG_CTRUNC)) == MSG_EOR); 1114 1115 cmsg = CMSG_FIRSTHDR(&msg); 1116 ATF_REQUIRE(cmsg != NULL); 1117 ATF_REQUIRE(cmsg->cmsg_level == IPPROTO_TCP); 1118 ATF_REQUIRE(cmsg->cmsg_type == TLS_GET_RECORD); 1119 ATF_REQUIRE(cmsg->cmsg_len == CMSG_LEN(sizeof(*tgr))); 1120 1121 tgr = (struct tls_get_record *)CMSG_DATA(cmsg); 1122 ATF_REQUIRE(tgr->tls_type == record_type); 1123 ATF_REQUIRE(tgr->tls_vmajor == en->tls_vmajor); 1124 ATF_REQUIRE(tgr->tls_vminor == en->tls_vminor); 1125 ATF_REQUIRE(tgr->tls_length == htons(rv)); 1126 1127 return (rv); 1128 } 1129 1130 static void 1131 test_ktls_receive_app_data(struct tls_enable *en, uint64_t seqno, size_t len) 1132 { 1133 struct kevent ev; 1134 char *plaintext, *received, *outbuf; 1135 size_t outbuf_cap, outbuf_len, outbuf_sent, received_len, todo, written; 1136 ssize_t rv; 1137 int kq, sockets[2]; 1138 1139 plaintext = alloc_buffer(len); 1140 received = malloc(len); 1141 outbuf_cap = tls_header_len(en) + TLS_MAX_MSG_SIZE_V10_2 + 1142 tls_trailer_len(en); 1143 outbuf = malloc(outbuf_cap); 1144 1145 ATF_REQUIRE((kq = kqueue()) != -1); 1146 1147 ATF_REQUIRE_MSG(socketpair_tcp(sockets), "failed to create sockets"); 1148 1149 ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1150 sizeof(*en)) == 0); 1151 1152 EV_SET(&ev, sockets[0], EVFILT_READ, EV_ADD, 0, 0, NULL); 1153 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 1154 EV_SET(&ev, sockets[1], EVFILT_WRITE, EV_ADD, 0, 0, NULL); 1155 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 1156 1157 received_len = 0; 1158 outbuf_len = 0; 1159 written = 0; 1160 1161 while (received_len != len) { 1162 ATF_REQUIRE(kevent(kq, NULL, 0, &ev, 1, NULL) == 1); 1163 1164 switch (ev.filter) { 1165 case EVFILT_WRITE: 1166 /* 1167 * Compose the next TLS record to send. 1168 */ 1169 if (outbuf_len == 0) { 1170 ATF_REQUIRE(written < len); 1171 todo = len - written; 1172 if (todo > TLS_MAX_MSG_SIZE_V10_2) 1173 todo = TLS_MAX_MSG_SIZE_V10_2; 1174 outbuf_len = encrypt_tls_record(en, 1175 TLS_RLTYPE_APP, seqno, plaintext + written, 1176 todo, outbuf, outbuf_cap); 1177 outbuf_sent = 0; 1178 written += todo; 1179 seqno++; 1180 } 1181 1182 /* 1183 * Try to write the remainder of the current 1184 * TLS record. 1185 */ 1186 rv = write(ev.ident, outbuf + outbuf_sent, 1187 outbuf_len - outbuf_sent); 1188 ATF_REQUIRE_MSG(rv > 0, 1189 "failed to write to socket"); 1190 outbuf_sent += rv; 1191 if (outbuf_sent == outbuf_len) { 1192 outbuf_len = 0; 1193 if (written == len) { 1194 ev.flags = EV_DISABLE; 1195 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, 1196 NULL) == 0); 1197 } 1198 } 1199 break; 1200 1201 case EVFILT_READ: 1202 ATF_REQUIRE((ev.flags & EV_EOF) == 0); 1203 1204 rv = ktls_receive_tls_record(en, ev.ident, 1205 TLS_RLTYPE_APP, received + received_len, 1206 len - received_len); 1207 received_len += rv; 1208 break; 1209 } 1210 } 1211 1212 ATF_REQUIRE_MSG(written == received_len, 1213 "read %zu decrypted bytes, but wrote %zu", received_len, written); 1214 1215 ATF_REQUIRE(memcmp(plaintext, received, len) == 0); 1216 1217 free(outbuf); 1218 free(received); 1219 free(plaintext); 1220 1221 close(sockets[1]); 1222 close(sockets[0]); 1223 close(kq); 1224 } 1225 1226 #define TLS_10_TESTS(M) \ 1227 M(aes128_cbc_1_0_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1228 CRYPTO_SHA1_HMAC) \ 1229 M(aes256_cbc_1_0_sha1, CRYPTO_AES_CBC, 256 / 8, \ 1230 CRYPTO_SHA1_HMAC) 1231 1232 #define TLS_12_TESTS(M) \ 1233 M(aes128_gcm_1_2, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1234 TLS_MINOR_VER_TWO) \ 1235 M(aes256_gcm_1_2, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0, \ 1236 TLS_MINOR_VER_TWO) \ 1237 M(chacha20_poly1305_1_2, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1238 TLS_MINOR_VER_TWO) 1239 1240 #define TLS_13_TESTS(M) \ 1241 M(aes128_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1242 TLS_MINOR_VER_THREE) \ 1243 M(aes256_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0, \ 1244 TLS_MINOR_VER_THREE) \ 1245 M(chacha20_poly1305_1_3, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1246 TLS_MINOR_VER_THREE) 1247 1248 #define AES_CBC_TESTS(M) \ 1249 M(aes128_cbc_1_0_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1250 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ZERO) \ 1251 M(aes256_cbc_1_0_sha1, CRYPTO_AES_CBC, 256 / 8, \ 1252 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ZERO) \ 1253 M(aes128_cbc_1_1_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1254 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ONE) \ 1255 M(aes256_cbc_1_1_sha1, CRYPTO_AES_CBC, 256 / 8, \ 1256 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ONE) \ 1257 M(aes128_cbc_1_2_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1258 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_TWO) \ 1259 M(aes256_cbc_1_2_sha1, CRYPTO_AES_CBC, 256 / 8, \ 1260 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_TWO) \ 1261 M(aes128_cbc_1_2_sha256, CRYPTO_AES_CBC, 128 / 8, \ 1262 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_TWO) \ 1263 M(aes256_cbc_1_2_sha256, CRYPTO_AES_CBC, 256 / 8, \ 1264 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_TWO) \ 1265 M(aes128_cbc_1_2_sha384, CRYPTO_AES_CBC, 128 / 8, \ 1266 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_TWO) \ 1267 M(aes256_cbc_1_2_sha384, CRYPTO_AES_CBC, 256 / 8, \ 1268 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_TWO) \ 1269 1270 #define AES_GCM_TESTS(M) \ 1271 M(aes128_gcm_1_2, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1272 TLS_MINOR_VER_TWO) \ 1273 M(aes256_gcm_1_2, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0, \ 1274 TLS_MINOR_VER_TWO) \ 1275 M(aes128_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1276 TLS_MINOR_VER_THREE) \ 1277 M(aes256_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0, \ 1278 TLS_MINOR_VER_THREE) 1279 1280 #define CHACHA20_TESTS(M) \ 1281 M(chacha20_poly1305_1_2, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1282 TLS_MINOR_VER_TWO) \ 1283 M(chacha20_poly1305_1_3, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1284 TLS_MINOR_VER_THREE) 1285 1286 #define GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1287 auth_alg, minor, name, len) \ 1288 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_##name); \ 1289 ATF_TC_BODY(ktls_transmit_##cipher_name##_##name, tc) \ 1290 { \ 1291 struct tls_enable en; \ 1292 uint64_t seqno; \ 1293 \ 1294 ATF_REQUIRE_KTLS(); \ 1295 seqno = random(); \ 1296 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1297 &en); \ 1298 test_ktls_transmit_app_data(&en, seqno, len); \ 1299 free_tls_enable(&en); \ 1300 } 1301 1302 #define ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1303 auth_alg, minor, name) \ 1304 ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_##name); 1305 1306 #define GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1307 auth_alg, minor, name, type, len) \ 1308 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_##name); \ 1309 ATF_TC_BODY(ktls_transmit_##cipher_name##_##name, tc) \ 1310 { \ 1311 struct tls_enable en; \ 1312 uint64_t seqno; \ 1313 \ 1314 ATF_REQUIRE_KTLS(); \ 1315 seqno = random(); \ 1316 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1317 &en); \ 1318 test_ktls_transmit_control(&en, seqno, type, len); \ 1319 free_tls_enable(&en); \ 1320 } 1321 1322 #define ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1323 auth_alg, minor, name) \ 1324 ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_##name); 1325 1326 #define GEN_TRANSMIT_EMPTY_FRAGMENT_TEST(cipher_name, cipher_alg, \ 1327 key_size, auth_alg) \ 1328 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_empty_fragment); \ 1329 ATF_TC_BODY(ktls_transmit_##cipher_name##_empty_fragment, tc) \ 1330 { \ 1331 struct tls_enable en; \ 1332 uint64_t seqno; \ 1333 \ 1334 ATF_REQUIRE_KTLS(); \ 1335 seqno = random(); \ 1336 build_tls_enable(cipher_alg, key_size, auth_alg, \ 1337 TLS_MINOR_VER_ZERO, seqno, &en); \ 1338 test_ktls_transmit_empty_fragment(&en, seqno); \ 1339 free_tls_enable(&en); \ 1340 } 1341 1342 #define ADD_TRANSMIT_EMPTY_FRAGMENT_TEST(cipher_name, cipher_alg, \ 1343 key_size, auth_alg) \ 1344 ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_empty_fragment); 1345 1346 #define GEN_TRANSMIT_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 1347 minor) \ 1348 GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1349 auth_alg, minor, short, 64) \ 1350 GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1351 auth_alg, minor, long, 64 * 1024) \ 1352 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1353 auth_alg, minor, control, 0x21 /* Alert */, 32) 1354 1355 #define ADD_TRANSMIT_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 1356 minor) \ 1357 ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1358 auth_alg, minor, short) \ 1359 ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1360 auth_alg, minor, long) \ 1361 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1362 auth_alg, minor, control) 1363 1364 /* 1365 * For each supported cipher suite, run three transmit tests: 1366 * 1367 * - a short test which sends 64 bytes of application data (likely as 1368 * a single TLS record) 1369 * 1370 * - a long test which sends 64KB of application data (split across 1371 * multiple TLS records) 1372 * 1373 * - a control test which sends a single record with a specific 1374 * content type via sendmsg() 1375 */ 1376 AES_CBC_TESTS(GEN_TRANSMIT_TESTS); 1377 AES_GCM_TESTS(GEN_TRANSMIT_TESTS); 1378 CHACHA20_TESTS(GEN_TRANSMIT_TESTS); 1379 1380 #define GEN_TRANSMIT_PADDING_TESTS(cipher_name, cipher_alg, key_size, \ 1381 auth_alg, minor) \ 1382 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1383 auth_alg, minor, padding_1, 0x21 /* Alert */, 1) \ 1384 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1385 auth_alg, minor, padding_2, 0x21 /* Alert */, 2) \ 1386 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1387 auth_alg, minor, padding_3, 0x21 /* Alert */, 3) \ 1388 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1389 auth_alg, minor, padding_4, 0x21 /* Alert */, 4) \ 1390 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1391 auth_alg, minor, padding_5, 0x21 /* Alert */, 5) \ 1392 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1393 auth_alg, minor, padding_6, 0x21 /* Alert */, 6) \ 1394 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1395 auth_alg, minor, padding_7, 0x21 /* Alert */, 7) \ 1396 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1397 auth_alg, minor, padding_8, 0x21 /* Alert */, 8) \ 1398 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1399 auth_alg, minor, padding_9, 0x21 /* Alert */, 9) \ 1400 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1401 auth_alg, minor, padding_10, 0x21 /* Alert */, 10) \ 1402 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1403 auth_alg, minor, padding_11, 0x21 /* Alert */, 11) \ 1404 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1405 auth_alg, minor, padding_12, 0x21 /* Alert */, 12) \ 1406 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1407 auth_alg, minor, padding_13, 0x21 /* Alert */, 13) \ 1408 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1409 auth_alg, minor, padding_14, 0x21 /* Alert */, 14) \ 1410 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1411 auth_alg, minor, padding_15, 0x21 /* Alert */, 15) \ 1412 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1413 auth_alg, minor, padding_16, 0x21 /* Alert */, 16) 1414 1415 #define ADD_TRANSMIT_PADDING_TESTS(cipher_name, cipher_alg, key_size, \ 1416 auth_alg, minor) \ 1417 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1418 auth_alg, minor, padding_1) \ 1419 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1420 auth_alg, minor, padding_2) \ 1421 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1422 auth_alg, minor, padding_3) \ 1423 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1424 auth_alg, minor, padding_4) \ 1425 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1426 auth_alg, minor, padding_5) \ 1427 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1428 auth_alg, minor, padding_6) \ 1429 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1430 auth_alg, minor, padding_7) \ 1431 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1432 auth_alg, minor, padding_8) \ 1433 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1434 auth_alg, minor, padding_9) \ 1435 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1436 auth_alg, minor, padding_10) \ 1437 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1438 auth_alg, minor, padding_11) \ 1439 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1440 auth_alg, minor, padding_12) \ 1441 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1442 auth_alg, minor, padding_13) \ 1443 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1444 auth_alg, minor, padding_14) \ 1445 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1446 auth_alg, minor, padding_15) \ 1447 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1448 auth_alg, minor, padding_16) 1449 1450 /* 1451 * For AES-CBC MTE cipher suites using padding, add tests of messages 1452 * with each possible padding size. Note that the padding_<N> tests 1453 * do not necessarily test <N> bytes of padding as the padding is a 1454 * function of the cipher suite's MAC length. However, cycling 1455 * through all of the payload sizes from 1 to 16 should exercise all 1456 * of the possible padding lengths for each suite. 1457 */ 1458 AES_CBC_TESTS(GEN_TRANSMIT_PADDING_TESTS); 1459 1460 /* 1461 * Test "empty fragments" which are TLS records with no payload that 1462 * OpenSSL can send for TLS 1.0 connections. 1463 */ 1464 TLS_10_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST); 1465 1466 static void 1467 test_ktls_invalid_transmit_cipher_suite(struct tls_enable *en) 1468 { 1469 int sockets[2]; 1470 1471 ATF_REQUIRE_MSG(socketpair_tcp(sockets), "failed to create sockets"); 1472 1473 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en, 1474 sizeof(*en)) == -1); 1475 ATF_REQUIRE(errno == EINVAL); 1476 1477 close(sockets[1]); 1478 close(sockets[0]); 1479 } 1480 1481 #define GEN_INVALID_TRANSMIT_TEST(name, cipher_alg, key_size, auth_alg, \ 1482 minor) \ 1483 ATF_TC_WITHOUT_HEAD(ktls_transmit_invalid_##name); \ 1484 ATF_TC_BODY(ktls_transmit_invalid_##name, tc) \ 1485 { \ 1486 struct tls_enable en; \ 1487 uint64_t seqno; \ 1488 \ 1489 ATF_REQUIRE_KTLS(); \ 1490 seqno = random(); \ 1491 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1492 &en); \ 1493 test_ktls_invalid_transmit_cipher_suite(&en); \ 1494 free_tls_enable(&en); \ 1495 } 1496 1497 #define ADD_INVALID_TRANSMIT_TEST(name, cipher_alg, key_size, auth_alg, \ 1498 minor) \ 1499 ATF_TP_ADD_TC(tp, ktls_transmit_invalid_##name); 1500 1501 #define INVALID_CIPHER_SUITES(M) \ 1502 M(aes128_cbc_1_0_sha256, CRYPTO_AES_CBC, 128 / 8, \ 1503 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_ZERO) \ 1504 M(aes128_cbc_1_0_sha384, CRYPTO_AES_CBC, 128 / 8, \ 1505 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_ZERO) \ 1506 M(aes128_gcm_1_0, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1507 TLS_MINOR_VER_ZERO) \ 1508 M(chacha20_poly1305_1_0, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1509 TLS_MINOR_VER_ZERO) \ 1510 M(aes128_cbc_1_1_sha256, CRYPTO_AES_CBC, 128 / 8, \ 1511 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_ONE) \ 1512 M(aes128_cbc_1_1_sha384, CRYPTO_AES_CBC, 128 / 8, \ 1513 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_ONE) \ 1514 M(aes128_gcm_1_1, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1515 TLS_MINOR_VER_ONE) \ 1516 M(chacha20_poly1305_1_1, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1517 TLS_MINOR_VER_ONE) \ 1518 M(aes128_cbc_1_3_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1519 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_THREE) \ 1520 M(aes128_cbc_1_3_sha256, CRYPTO_AES_CBC, 128 / 8, \ 1521 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_THREE) \ 1522 M(aes128_cbc_1_3_sha384, CRYPTO_AES_CBC, 128 / 8, \ 1523 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_THREE) 1524 1525 /* 1526 * Ensure that invalid cipher suites are rejected for transmit. 1527 */ 1528 INVALID_CIPHER_SUITES(GEN_INVALID_TRANSMIT_TEST); 1529 1530 #define GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1531 auth_alg, minor, name, len) \ 1532 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_##name); \ 1533 ATF_TC_BODY(ktls_receive_##cipher_name##_##name, tc) \ 1534 { \ 1535 struct tls_enable en; \ 1536 uint64_t seqno; \ 1537 \ 1538 ATF_REQUIRE_KTLS(); \ 1539 seqno = random(); \ 1540 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1541 &en); \ 1542 test_ktls_receive_app_data(&en, seqno, len); \ 1543 free_tls_enable(&en); \ 1544 } 1545 1546 #define ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1547 auth_alg, minor, name) \ 1548 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_##name); 1549 1550 #define GEN_RECEIVE_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 1551 minor) \ 1552 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1553 auth_alg, minor, short, 64) \ 1554 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1555 auth_alg, minor, long, 64 * 1024) 1556 1557 #define ADD_RECEIVE_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 1558 minor) \ 1559 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1560 auth_alg, minor, short) \ 1561 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1562 auth_alg, minor, long) 1563 1564 /* 1565 * For each supported cipher suite, run two receive tests: 1566 * 1567 * - a short test which sends 64 bytes of application data (likely as 1568 * a single TLS record) 1569 * 1570 * - a long test which sends 64KB of application data (split across 1571 * multiple TLS records) 1572 * 1573 * Note that receive is currently only supported for TLS 1.2 AEAD 1574 * cipher suites. 1575 */ 1576 TLS_12_TESTS(GEN_RECEIVE_TESTS); 1577 1578 static void 1579 test_ktls_invalid_receive_cipher_suite(struct tls_enable *en) 1580 { 1581 int sockets[2]; 1582 1583 ATF_REQUIRE_MSG(socketpair_tcp(sockets), "failed to create sockets"); 1584 1585 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1586 sizeof(*en)) == -1); 1587 1588 /* 1589 * XXX: TLS 1.3 fails with ENOTSUP before checking for invalid 1590 * ciphers. 1591 */ 1592 ATF_REQUIRE(errno == EINVAL || errno == ENOTSUP); 1593 1594 close(sockets[1]); 1595 close(sockets[0]); 1596 } 1597 1598 #define GEN_INVALID_RECEIVE_TEST(name, cipher_alg, key_size, auth_alg, \ 1599 minor) \ 1600 ATF_TC_WITHOUT_HEAD(ktls_receive_invalid_##name); \ 1601 ATF_TC_BODY(ktls_receive_invalid_##name, tc) \ 1602 { \ 1603 struct tls_enable en; \ 1604 uint64_t seqno; \ 1605 \ 1606 ATF_REQUIRE_KTLS(); \ 1607 seqno = random(); \ 1608 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1609 &en); \ 1610 test_ktls_invalid_receive_cipher_suite(&en); \ 1611 free_tls_enable(&en); \ 1612 } 1613 1614 #define ADD_INVALID_RECEIVE_TEST(name, cipher_alg, key_size, auth_alg, \ 1615 minor) \ 1616 ATF_TP_ADD_TC(tp, ktls_receive_invalid_##name); 1617 1618 /* 1619 * Ensure that invalid cipher suites are rejected for receive. 1620 */ 1621 INVALID_CIPHER_SUITES(GEN_INVALID_RECEIVE_TEST); 1622 1623 static void 1624 test_ktls_unsupported_receive_cipher_suite(struct tls_enable *en) 1625 { 1626 int sockets[2]; 1627 1628 ATF_REQUIRE_MSG(socketpair_tcp(sockets), "failed to create sockets"); 1629 1630 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1631 sizeof(*en)) == -1); 1632 ATF_REQUIRE(errno == EPROTONOSUPPORT || errno == ENOTSUP); 1633 1634 close(sockets[1]); 1635 close(sockets[0]); 1636 } 1637 1638 #define GEN_UNSUPPORTED_RECEIVE_TEST(name, cipher_alg, key_size, \ 1639 auth_alg, minor) \ 1640 ATF_TC_WITHOUT_HEAD(ktls_receive_unsupported_##name); \ 1641 ATF_TC_BODY(ktls_receive_unsupported_##name, tc) \ 1642 { \ 1643 struct tls_enable en; \ 1644 uint64_t seqno; \ 1645 \ 1646 ATF_REQUIRE_KTLS(); \ 1647 seqno = random(); \ 1648 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1649 &en); \ 1650 test_ktls_unsupported_receive_cipher_suite(&en); \ 1651 free_tls_enable(&en); \ 1652 } 1653 1654 #define ADD_UNSUPPORTED_RECEIVE_TEST(name, cipher_alg, key_size, \ 1655 auth_alg, minor) \ 1656 ATF_TP_ADD_TC(tp, ktls_receive_unsupported_##name); 1657 1658 /* 1659 * Ensure that valid cipher suites not supported for receive are 1660 * rejected. 1661 */ 1662 AES_CBC_TESTS(GEN_UNSUPPORTED_RECEIVE_TEST); 1663 TLS_13_TESTS(GEN_UNSUPPORTED_RECEIVE_TEST); 1664 1665 ATF_TP_ADD_TCS(tp) 1666 { 1667 /* Transmit tests */ 1668 AES_CBC_TESTS(ADD_TRANSMIT_TESTS); 1669 AES_GCM_TESTS(ADD_TRANSMIT_TESTS); 1670 CHACHA20_TESTS(ADD_TRANSMIT_TESTS); 1671 AES_CBC_TESTS(ADD_TRANSMIT_PADDING_TESTS); 1672 TLS_10_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST); 1673 INVALID_CIPHER_SUITES(ADD_INVALID_TRANSMIT_TEST); 1674 1675 /* Receive tests */ 1676 AES_CBC_TESTS(ADD_UNSUPPORTED_RECEIVE_TEST); 1677 TLS_12_TESTS(ADD_RECEIVE_TESTS); 1678 TLS_13_TESTS(ADD_UNSUPPORTED_RECEIVE_TEST); 1679 INVALID_CIPHER_SUITES(ADD_INVALID_RECEIVE_TEST); 1680 1681 return (atf_no_error()); 1682 } 1683