1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2021 Netflix Inc. 5 * Written by: John Baldwin <jhb@FreeBSD.org> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/param.h> 30 #include <sys/endian.h> 31 #include <sys/event.h> 32 #include <sys/ktls.h> 33 #include <sys/socket.h> 34 #include <sys/sysctl.h> 35 #include <netinet/in.h> 36 #include <netinet/tcp.h> 37 #include <crypto/cryptodev.h> 38 #include <assert.h> 39 #include <err.h> 40 #include <fcntl.h> 41 #include <libutil.h> 42 #include <netdb.h> 43 #include <poll.h> 44 #include <stdbool.h> 45 #include <stdlib.h> 46 #include <atf-c.h> 47 48 #include <openssl/err.h> 49 #include <openssl/evp.h> 50 #include <openssl/hmac.h> 51 52 static void 53 require_ktls(void) 54 { 55 size_t len; 56 bool enable; 57 58 len = sizeof(enable); 59 if (sysctlbyname("kern.ipc.tls.enable", &enable, &len, NULL, 0) == -1) { 60 if (errno == ENOENT) 61 atf_tc_skip("kernel does not support TLS offload"); 62 atf_libc_error(errno, "Failed to read kern.ipc.tls.enable"); 63 } 64 65 if (!enable) 66 atf_tc_skip("Kernel TLS is disabled"); 67 } 68 69 #define ATF_REQUIRE_KTLS() require_ktls() 70 71 static void 72 check_tls_mode(const atf_tc_t *tc, int s, int sockopt) 73 { 74 if (atf_tc_get_config_var_as_bool_wd(tc, "ktls.require_ifnet", false)) { 75 socklen_t len; 76 int mode; 77 78 len = sizeof(mode); 79 if (getsockopt(s, IPPROTO_TCP, sockopt, &mode, &len) == -1) 80 atf_libc_error(errno, "Failed to fetch TLS mode"); 81 82 if (mode != TCP_TLS_MODE_IFNET) 83 atf_tc_skip("connection did not use ifnet TLS"); 84 } 85 86 if (atf_tc_get_config_var_as_bool_wd(tc, "ktls.require_toe", false)) { 87 socklen_t len; 88 int mode; 89 90 len = sizeof(mode); 91 if (getsockopt(s, IPPROTO_TCP, sockopt, &mode, &len) == -1) 92 atf_libc_error(errno, "Failed to fetch TLS mode"); 93 94 if (mode != TCP_TLS_MODE_TOE) 95 atf_tc_skip("connection did not use TOE TLS"); 96 } 97 } 98 99 static void __printflike(2, 3) 100 debug(const atf_tc_t *tc, const char *fmt, ...) 101 { 102 if (!atf_tc_get_config_var_as_bool_wd(tc, "ktls.debug", false)) 103 return; 104 105 va_list ap; 106 va_start(ap, fmt); 107 vprintf(fmt, ap); 108 va_end(ap); 109 } 110 111 static void 112 debug_hexdump(const atf_tc_t *tc, const void *buf, int length, 113 const char *label) 114 { 115 if (!atf_tc_get_config_var_as_bool_wd(tc, "ktls.debug", false)) 116 return; 117 118 if (label != NULL) 119 printf("%s:\n", label); 120 hexdump(buf, length, NULL, 0); 121 } 122 123 static char 124 rdigit(void) 125 { 126 /* ASCII printable values between 0x20 and 0x7e */ 127 return (0x20 + random() % (0x7f - 0x20)); 128 } 129 130 static char * 131 alloc_buffer(size_t len) 132 { 133 char *buf; 134 size_t i; 135 136 if (len == 0) 137 return (NULL); 138 buf = malloc(len); 139 for (i = 0; i < len; i++) 140 buf[i] = rdigit(); 141 return (buf); 142 } 143 144 static bool 145 socketpair_tcp(int sv[2]) 146 { 147 struct pollfd pfd; 148 struct sockaddr_in sin; 149 socklen_t len; 150 int as, cs, ls; 151 152 ls = socket(PF_INET, SOCK_STREAM, 0); 153 if (ls == -1) { 154 warn("socket() for listen"); 155 return (false); 156 } 157 158 memset(&sin, 0, sizeof(sin)); 159 sin.sin_len = sizeof(sin); 160 sin.sin_family = AF_INET; 161 sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); 162 if (bind(ls, (struct sockaddr *)&sin, sizeof(sin)) == -1) { 163 warn("bind"); 164 close(ls); 165 return (false); 166 } 167 168 if (listen(ls, 1) == -1) { 169 warn("listen"); 170 close(ls); 171 return (false); 172 } 173 174 len = sizeof(sin); 175 if (getsockname(ls, (struct sockaddr *)&sin, &len) == -1) { 176 warn("getsockname"); 177 close(ls); 178 return (false); 179 } 180 181 cs = socket(PF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0); 182 if (cs == -1) { 183 warn("socket() for connect"); 184 close(ls); 185 return (false); 186 } 187 188 if (connect(cs, (struct sockaddr *)&sin, sizeof(sin)) == -1) { 189 if (errno != EINPROGRESS) { 190 warn("connect"); 191 close(ls); 192 close(cs); 193 return (false); 194 } 195 } 196 197 as = accept4(ls, NULL, NULL, SOCK_NONBLOCK); 198 if (as == -1) { 199 warn("accept4"); 200 close(ls); 201 close(cs); 202 return (false); 203 } 204 205 close(ls); 206 207 pfd.fd = cs; 208 pfd.events = POLLOUT; 209 pfd.revents = 0; 210 ATF_REQUIRE_INTEQ(1, poll(&pfd, 1, INFTIM)); 211 ATF_REQUIRE_INTEQ(POLLOUT, pfd.revents); 212 213 sv[0] = cs; 214 sv[1] = as; 215 return (true); 216 } 217 218 static bool 219 echo_socket(const atf_tc_t *tc, int sv[2]) 220 { 221 const char *cause, *host, *port; 222 struct addrinfo hints, *ai, *tofree; 223 int error, flags, s; 224 225 host = atf_tc_get_config_var(tc, "ktls.host"); 226 port = atf_tc_get_config_var_wd(tc, "ktls.port", "echo"); 227 memset(&hints, 0, sizeof(hints)); 228 hints.ai_family = AF_UNSPEC; 229 hints.ai_socktype = SOCK_STREAM; 230 hints.ai_protocol = IPPROTO_TCP; 231 error = getaddrinfo(host, port, &hints, &tofree); 232 if (error != 0) { 233 warnx("getaddrinfo(%s:%s) failed: %s", host, port, 234 gai_strerror(error)); 235 return (false); 236 } 237 238 cause = NULL; 239 for (ai = tofree; ai != NULL; ai = ai->ai_next) { 240 s = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol); 241 if (s == -1) { 242 cause = "socket"; 243 error = errno; 244 continue; 245 } 246 247 if (connect(s, ai->ai_addr, ai->ai_addrlen) == -1) { 248 cause = "connect"; 249 error = errno; 250 close(s); 251 continue; 252 } 253 254 freeaddrinfo(tofree); 255 256 ATF_REQUIRE((flags = fcntl(s, F_GETFL)) != -1); 257 flags |= O_NONBLOCK; 258 ATF_REQUIRE(fcntl(s, F_SETFL, flags) != -1); 259 260 sv[0] = s; 261 sv[1] = s; 262 return (true); 263 } 264 265 warnc(error, "%s", cause); 266 freeaddrinfo(tofree); 267 return (false); 268 } 269 270 static bool 271 open_sockets(const atf_tc_t *tc, int sv[2]) 272 { 273 if (atf_tc_has_config_var(tc, "ktls.host")) 274 return (echo_socket(tc, sv)); 275 else 276 return (socketpair_tcp(sv)); 277 } 278 279 static void 280 close_sockets(int sv[2]) 281 { 282 if (sv[0] != sv[1]) 283 ATF_REQUIRE(close(sv[1]) == 0); 284 ATF_REQUIRE(close(sv[0]) == 0); 285 } 286 287 static void 288 close_sockets_ignore_errors(int sv[2]) 289 { 290 if (sv[0] != sv[1]) 291 close(sv[1]); 292 close(sv[0]); 293 } 294 295 static void 296 fd_set_blocking(int fd) 297 { 298 int flags; 299 300 ATF_REQUIRE((flags = fcntl(fd, F_GETFL)) != -1); 301 flags &= ~O_NONBLOCK; 302 ATF_REQUIRE(fcntl(fd, F_SETFL, flags) != -1); 303 } 304 305 static bool 306 cbc_crypt(const EVP_CIPHER *cipher, const char *key, const char *iv, 307 const char *input, char *output, size_t size, int enc) 308 { 309 EVP_CIPHER_CTX *ctx; 310 int outl, total; 311 312 ctx = EVP_CIPHER_CTX_new(); 313 if (ctx == NULL) { 314 warnx("EVP_CIPHER_CTX_new failed: %s", 315 ERR_error_string(ERR_get_error(), NULL)); 316 return (false); 317 } 318 if (EVP_CipherInit_ex(ctx, cipher, NULL, (const u_char *)key, 319 (const u_char *)iv, enc) != 1) { 320 warnx("EVP_CipherInit_ex failed: %s", 321 ERR_error_string(ERR_get_error(), NULL)); 322 EVP_CIPHER_CTX_free(ctx); 323 return (false); 324 } 325 EVP_CIPHER_CTX_set_padding(ctx, 0); 326 if (EVP_CipherUpdate(ctx, (u_char *)output, &outl, 327 (const u_char *)input, size) != 1) { 328 warnx("EVP_CipherUpdate failed: %s", 329 ERR_error_string(ERR_get_error(), NULL)); 330 EVP_CIPHER_CTX_free(ctx); 331 return (false); 332 } 333 total = outl; 334 if (EVP_CipherFinal_ex(ctx, (u_char *)output + outl, &outl) != 1) { 335 warnx("EVP_CipherFinal_ex failed: %s", 336 ERR_error_string(ERR_get_error(), NULL)); 337 EVP_CIPHER_CTX_free(ctx); 338 return (false); 339 } 340 total += outl; 341 if ((size_t)total != size) { 342 warnx("decrypt size mismatch: %zu vs %d", size, total); 343 EVP_CIPHER_CTX_free(ctx); 344 return (false); 345 } 346 EVP_CIPHER_CTX_free(ctx); 347 return (true); 348 } 349 350 static bool 351 cbc_encrypt(const EVP_CIPHER *cipher, const char *key, const char *iv, 352 const char *input, char *output, size_t size) 353 { 354 return (cbc_crypt(cipher, key, iv, input, output, size, 1)); 355 } 356 357 static bool 358 cbc_decrypt(const EVP_CIPHER *cipher, const char *key, const char *iv, 359 const char *input, char *output, size_t size) 360 { 361 return (cbc_crypt(cipher, key, iv, input, output, size, 0)); 362 } 363 364 static bool 365 compute_hash(const EVP_MD *md, const void *key, size_t key_len, const void *aad, 366 size_t aad_len, const void *buffer, size_t len, void *digest, 367 u_int *digest_len) 368 { 369 HMAC_CTX *ctx; 370 371 ctx = HMAC_CTX_new(); 372 if (ctx == NULL) { 373 warnx("HMAC_CTX_new failed: %s", 374 ERR_error_string(ERR_get_error(), NULL)); 375 return (false); 376 } 377 if (HMAC_Init_ex(ctx, key, key_len, md, NULL) != 1) { 378 warnx("HMAC_Init_ex failed: %s", 379 ERR_error_string(ERR_get_error(), NULL)); 380 HMAC_CTX_free(ctx); 381 return (false); 382 } 383 if (HMAC_Update(ctx, aad, aad_len) != 1) { 384 warnx("HMAC_Update (aad) failed: %s", 385 ERR_error_string(ERR_get_error(), NULL)); 386 HMAC_CTX_free(ctx); 387 return (false); 388 } 389 if (HMAC_Update(ctx, buffer, len) != 1) { 390 warnx("HMAC_Update (payload) failed: %s", 391 ERR_error_string(ERR_get_error(), NULL)); 392 HMAC_CTX_free(ctx); 393 return (false); 394 } 395 if (HMAC_Final(ctx, digest, digest_len) != 1) { 396 warnx("HMAC_Final failed: %s", 397 ERR_error_string(ERR_get_error(), NULL)); 398 HMAC_CTX_free(ctx); 399 return (false); 400 } 401 HMAC_CTX_free(ctx); 402 return (true); 403 } 404 405 static bool 406 verify_hash(const EVP_MD *md, const void *key, size_t key_len, const void *aad, 407 size_t aad_len, const void *buffer, size_t len, const void *digest) 408 { 409 unsigned char digest2[EVP_MAX_MD_SIZE]; 410 u_int digest_len; 411 412 if (!compute_hash(md, key, key_len, aad, aad_len, buffer, len, digest2, 413 &digest_len)) 414 return (false); 415 if (memcmp(digest, digest2, digest_len) != 0) { 416 warnx("HMAC mismatch"); 417 return (false); 418 } 419 return (true); 420 } 421 422 static bool 423 aead_encrypt(const EVP_CIPHER *cipher, const char *key, const char *nonce, 424 const void *aad, size_t aad_len, const char *input, char *output, 425 size_t size, char *tag, size_t tag_len) 426 { 427 EVP_CIPHER_CTX *ctx; 428 int outl, total; 429 430 ctx = EVP_CIPHER_CTX_new(); 431 if (ctx == NULL) { 432 warnx("EVP_CIPHER_CTX_new failed: %s", 433 ERR_error_string(ERR_get_error(), NULL)); 434 return (false); 435 } 436 if (EVP_EncryptInit_ex(ctx, cipher, NULL, (const u_char *)key, 437 (const u_char *)nonce) != 1) { 438 warnx("EVP_EncryptInit_ex failed: %s", 439 ERR_error_string(ERR_get_error(), NULL)); 440 EVP_CIPHER_CTX_free(ctx); 441 return (false); 442 } 443 EVP_CIPHER_CTX_set_padding(ctx, 0); 444 if (aad != NULL) { 445 if (EVP_EncryptUpdate(ctx, NULL, &outl, (const u_char *)aad, 446 aad_len) != 1) { 447 warnx("EVP_EncryptUpdate for AAD failed: %s", 448 ERR_error_string(ERR_get_error(), NULL)); 449 EVP_CIPHER_CTX_free(ctx); 450 return (false); 451 } 452 } 453 if (EVP_EncryptUpdate(ctx, (u_char *)output, &outl, 454 (const u_char *)input, size) != 1) { 455 warnx("EVP_EncryptUpdate failed: %s", 456 ERR_error_string(ERR_get_error(), NULL)); 457 EVP_CIPHER_CTX_free(ctx); 458 return (false); 459 } 460 total = outl; 461 if (EVP_EncryptFinal_ex(ctx, (u_char *)output + outl, &outl) != 1) { 462 warnx("EVP_EncryptFinal_ex failed: %s", 463 ERR_error_string(ERR_get_error(), NULL)); 464 EVP_CIPHER_CTX_free(ctx); 465 return (false); 466 } 467 total += outl; 468 if ((size_t)total != size) { 469 warnx("encrypt size mismatch: %zu vs %d", size, total); 470 EVP_CIPHER_CTX_free(ctx); 471 return (false); 472 } 473 if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, tag_len, tag) != 474 1) { 475 warnx("EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_GET_TAG) failed: %s", 476 ERR_error_string(ERR_get_error(), NULL)); 477 EVP_CIPHER_CTX_free(ctx); 478 return (false); 479 } 480 EVP_CIPHER_CTX_free(ctx); 481 return (true); 482 } 483 484 static bool 485 aead_decrypt(const EVP_CIPHER *cipher, const char *key, const char *nonce, 486 const void *aad, size_t aad_len, const char *input, char *output, 487 size_t size, const char *tag, size_t tag_len) 488 { 489 EVP_CIPHER_CTX *ctx; 490 int outl, total; 491 bool valid; 492 493 ctx = EVP_CIPHER_CTX_new(); 494 if (ctx == NULL) { 495 warnx("EVP_CIPHER_CTX_new failed: %s", 496 ERR_error_string(ERR_get_error(), NULL)); 497 return (false); 498 } 499 if (EVP_DecryptInit_ex(ctx, cipher, NULL, (const u_char *)key, 500 (const u_char *)nonce) != 1) { 501 warnx("EVP_DecryptInit_ex failed: %s", 502 ERR_error_string(ERR_get_error(), NULL)); 503 EVP_CIPHER_CTX_free(ctx); 504 return (false); 505 } 506 EVP_CIPHER_CTX_set_padding(ctx, 0); 507 if (aad != NULL) { 508 if (EVP_DecryptUpdate(ctx, NULL, &outl, (const u_char *)aad, 509 aad_len) != 1) { 510 warnx("EVP_DecryptUpdate for AAD failed: %s", 511 ERR_error_string(ERR_get_error(), NULL)); 512 EVP_CIPHER_CTX_free(ctx); 513 return (false); 514 } 515 } 516 if (EVP_DecryptUpdate(ctx, (u_char *)output, &outl, 517 (const u_char *)input, size) != 1) { 518 warnx("EVP_DecryptUpdate failed: %s", 519 ERR_error_string(ERR_get_error(), NULL)); 520 EVP_CIPHER_CTX_free(ctx); 521 return (false); 522 } 523 total = outl; 524 if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, tag_len, 525 __DECONST(char *, tag)) != 1) { 526 warnx("EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_TAG) failed: %s", 527 ERR_error_string(ERR_get_error(), NULL)); 528 EVP_CIPHER_CTX_free(ctx); 529 return (false); 530 } 531 valid = (EVP_DecryptFinal_ex(ctx, (u_char *)output + outl, &outl) == 1); 532 total += outl; 533 if ((size_t)total != size) { 534 warnx("decrypt size mismatch: %zu vs %d", size, total); 535 EVP_CIPHER_CTX_free(ctx); 536 return (false); 537 } 538 if (!valid) 539 warnx("tag mismatch"); 540 EVP_CIPHER_CTX_free(ctx); 541 return (valid); 542 } 543 544 static void 545 build_tls_enable(const atf_tc_t *tc, int cipher_alg, size_t cipher_key_len, 546 int auth_alg, int minor, uint64_t seqno, struct tls_enable *en) 547 { 548 u_int auth_key_len, iv_len; 549 550 memset(en, 0, sizeof(*en)); 551 552 switch (cipher_alg) { 553 case CRYPTO_AES_CBC: 554 if (minor == TLS_MINOR_VER_ZERO) 555 iv_len = AES_BLOCK_LEN; 556 else 557 iv_len = 0; 558 break; 559 case CRYPTO_AES_NIST_GCM_16: 560 if (minor == TLS_MINOR_VER_TWO) 561 iv_len = TLS_AEAD_GCM_LEN; 562 else 563 iv_len = TLS_1_3_GCM_IV_LEN; 564 break; 565 case CRYPTO_CHACHA20_POLY1305: 566 iv_len = TLS_CHACHA20_IV_LEN; 567 break; 568 default: 569 iv_len = 0; 570 break; 571 } 572 switch (auth_alg) { 573 case CRYPTO_SHA1_HMAC: 574 auth_key_len = SHA1_HASH_LEN; 575 break; 576 case CRYPTO_SHA2_256_HMAC: 577 auth_key_len = SHA2_256_HASH_LEN; 578 break; 579 case CRYPTO_SHA2_384_HMAC: 580 auth_key_len = SHA2_384_HASH_LEN; 581 break; 582 default: 583 auth_key_len = 0; 584 break; 585 } 586 en->cipher_key = alloc_buffer(cipher_key_len); 587 debug_hexdump(tc, en->cipher_key, cipher_key_len, "cipher key"); 588 en->iv = alloc_buffer(iv_len); 589 if (iv_len != 0) 590 debug_hexdump(tc, en->iv, iv_len, "iv"); 591 en->auth_key = alloc_buffer(auth_key_len); 592 if (auth_key_len != 0) 593 debug_hexdump(tc, en->auth_key, auth_key_len, "auth key"); 594 en->cipher_algorithm = cipher_alg; 595 en->cipher_key_len = cipher_key_len; 596 en->iv_len = iv_len; 597 en->auth_algorithm = auth_alg; 598 en->auth_key_len = auth_key_len; 599 en->tls_vmajor = TLS_MAJOR_VER_ONE; 600 en->tls_vminor = minor; 601 be64enc(en->rec_seq, seqno); 602 debug(tc, "seqno: %ju\n", (uintmax_t)seqno); 603 } 604 605 static void 606 free_tls_enable(struct tls_enable *en) 607 { 608 free(__DECONST(void *, en->cipher_key)); 609 free(__DECONST(void *, en->iv)); 610 free(__DECONST(void *, en->auth_key)); 611 } 612 613 static const EVP_CIPHER * 614 tls_EVP_CIPHER(const struct tls_enable *en) 615 { 616 switch (en->cipher_algorithm) { 617 case CRYPTO_AES_CBC: 618 switch (en->cipher_key_len) { 619 case 128 / 8: 620 return (EVP_aes_128_cbc()); 621 case 256 / 8: 622 return (EVP_aes_256_cbc()); 623 default: 624 return (NULL); 625 } 626 break; 627 case CRYPTO_AES_NIST_GCM_16: 628 switch (en->cipher_key_len) { 629 case 128 / 8: 630 return (EVP_aes_128_gcm()); 631 case 256 / 8: 632 return (EVP_aes_256_gcm()); 633 default: 634 return (NULL); 635 } 636 break; 637 case CRYPTO_CHACHA20_POLY1305: 638 return (EVP_chacha20_poly1305()); 639 default: 640 return (NULL); 641 } 642 } 643 644 static const EVP_MD * 645 tls_EVP_MD(const struct tls_enable *en) 646 { 647 switch (en->auth_algorithm) { 648 case CRYPTO_SHA1_HMAC: 649 return (EVP_sha1()); 650 case CRYPTO_SHA2_256_HMAC: 651 return (EVP_sha256()); 652 case CRYPTO_SHA2_384_HMAC: 653 return (EVP_sha384()); 654 default: 655 return (NULL); 656 } 657 } 658 659 static size_t 660 tls_header_len(struct tls_enable *en) 661 { 662 size_t len; 663 664 len = sizeof(struct tls_record_layer); 665 switch (en->cipher_algorithm) { 666 case CRYPTO_AES_CBC: 667 if (en->tls_vminor != TLS_MINOR_VER_ZERO) 668 len += AES_BLOCK_LEN; 669 return (len); 670 case CRYPTO_AES_NIST_GCM_16: 671 if (en->tls_vminor == TLS_MINOR_VER_TWO) 672 len += sizeof(uint64_t); 673 return (len); 674 case CRYPTO_CHACHA20_POLY1305: 675 return (len); 676 default: 677 return (0); 678 } 679 } 680 681 static size_t 682 tls_mac_len(struct tls_enable *en) 683 { 684 switch (en->cipher_algorithm) { 685 case CRYPTO_AES_CBC: 686 switch (en->auth_algorithm) { 687 case CRYPTO_SHA1_HMAC: 688 return (SHA1_HASH_LEN); 689 case CRYPTO_SHA2_256_HMAC: 690 return (SHA2_256_HASH_LEN); 691 case CRYPTO_SHA2_384_HMAC: 692 return (SHA2_384_HASH_LEN); 693 default: 694 return (0); 695 } 696 case CRYPTO_AES_NIST_GCM_16: 697 return (AES_GMAC_HASH_LEN); 698 case CRYPTO_CHACHA20_POLY1305: 699 return (POLY1305_HASH_LEN); 700 default: 701 return (0); 702 } 703 } 704 705 /* Includes maximum padding for MTE. */ 706 static size_t 707 tls_trailer_len(struct tls_enable *en) 708 { 709 size_t len; 710 711 len = tls_mac_len(en); 712 if (en->cipher_algorithm == CRYPTO_AES_CBC) 713 len += AES_BLOCK_LEN; 714 if (en->tls_vminor == TLS_MINOR_VER_THREE) 715 len++; 716 return (len); 717 } 718 719 /* Minimum valid record payload size for a given cipher suite. */ 720 static size_t 721 tls_minimum_record_payload(struct tls_enable *en) 722 { 723 size_t len; 724 725 len = tls_header_len(en); 726 if (en->cipher_algorithm == CRYPTO_AES_CBC) 727 len += roundup2(tls_mac_len(en) + 1, AES_BLOCK_LEN); 728 else 729 len += tls_mac_len(en); 730 if (en->tls_vminor == TLS_MINOR_VER_THREE) 731 len++; 732 return (len - sizeof(struct tls_record_layer)); 733 } 734 735 /* 'len' is the length of the payload application data. */ 736 static void 737 tls_mte_aad(struct tls_enable *en, size_t len, 738 const struct tls_record_layer *hdr, uint64_t seqno, struct tls_mac_data *ad) 739 { 740 ad->seq = htobe64(seqno); 741 ad->type = hdr->tls_type; 742 ad->tls_vmajor = hdr->tls_vmajor; 743 ad->tls_vminor = hdr->tls_vminor; 744 ad->tls_length = htons(len); 745 } 746 747 static void 748 tls_12_aead_aad(struct tls_enable *en, size_t len, 749 const struct tls_record_layer *hdr, uint64_t seqno, 750 struct tls_aead_data *ad) 751 { 752 ad->seq = htobe64(seqno); 753 ad->type = hdr->tls_type; 754 ad->tls_vmajor = hdr->tls_vmajor; 755 ad->tls_vminor = hdr->tls_vminor; 756 ad->tls_length = htons(len); 757 } 758 759 static void 760 tls_13_aad(struct tls_enable *en, const struct tls_record_layer *hdr, 761 uint64_t seqno, struct tls_aead_data_13 *ad) 762 { 763 ad->type = hdr->tls_type; 764 ad->tls_vmajor = hdr->tls_vmajor; 765 ad->tls_vminor = hdr->tls_vminor; 766 ad->tls_length = hdr->tls_length; 767 } 768 769 static void 770 tls_12_gcm_nonce(struct tls_enable *en, const struct tls_record_layer *hdr, 771 char *nonce) 772 { 773 memcpy(nonce, en->iv, TLS_AEAD_GCM_LEN); 774 memcpy(nonce + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t)); 775 } 776 777 static void 778 tls_13_nonce(struct tls_enable *en, uint64_t seqno, char *nonce) 779 { 780 static_assert(TLS_1_3_GCM_IV_LEN == TLS_CHACHA20_IV_LEN, 781 "TLS 1.3 nonce length mismatch"); 782 memcpy(nonce, en->iv, TLS_1_3_GCM_IV_LEN); 783 *(uint64_t *)(nonce + 4) ^= htobe64(seqno); 784 } 785 786 /* 787 * Decrypt a TLS record 'len' bytes long at 'src' and store the result at 788 * 'dst'. If the TLS record header length doesn't match or 'dst' doesn't 789 * have sufficient room ('avail'), fail the test. 790 */ 791 static size_t 792 decrypt_tls_aes_cbc_mte(const atf_tc_t *tc, struct tls_enable *en, 793 uint64_t seqno, const void *src, size_t len, void *dst, size_t avail, 794 uint8_t *record_type) 795 { 796 const struct tls_record_layer *hdr; 797 struct tls_mac_data aad; 798 const char *iv; 799 char *buf; 800 size_t hdr_len, mac_len, payload_len; 801 int padding; 802 803 hdr = src; 804 hdr_len = tls_header_len(en); 805 mac_len = tls_mac_len(en); 806 ATF_REQUIRE_INTEQ(TLS_MAJOR_VER_ONE, hdr->tls_vmajor); 807 ATF_REQUIRE_INTEQ(en->tls_vminor, hdr->tls_vminor); 808 debug(tc, "decrypting MTE record seqno %ju:\n", (uintmax_t)seqno); 809 debug_hexdump(tc, src, len, NULL); 810 811 /* First, decrypt the outer payload into a temporary buffer. */ 812 payload_len = len - hdr_len; 813 buf = malloc(payload_len); 814 if (en->tls_vminor == TLS_MINOR_VER_ZERO) 815 iv = en->iv; 816 else 817 iv = (void *)(hdr + 1); 818 debug_hexdump(tc, iv, AES_BLOCK_LEN, "iv"); 819 ATF_REQUIRE(cbc_decrypt(tls_EVP_CIPHER(en), en->cipher_key, iv, 820 (const u_char *)src + hdr_len, buf, payload_len)); 821 debug_hexdump(tc, buf, payload_len, "decrypted buffer"); 822 823 /* 824 * Copy the last encrypted block to use as the IV for the next 825 * record for TLS 1.0. 826 */ 827 if (en->tls_vminor == TLS_MINOR_VER_ZERO) 828 memcpy(__DECONST(uint8_t *, en->iv), (const u_char *)src + 829 (len - AES_BLOCK_LEN), AES_BLOCK_LEN); 830 831 /* 832 * Verify trailing padding and strip. 833 * 834 * The kernel always generates the smallest amount of padding. 835 */ 836 padding = buf[payload_len - 1] + 1; 837 ATF_REQUIRE_MSG(padding > 0 && padding <= AES_BLOCK_LEN, 838 "invalid padding %d", padding); 839 ATF_REQUIRE_MSG(payload_len >= mac_len + padding, 840 "payload_len (%zu) < mac_len (%zu) + padding (%d)", payload_len, 841 mac_len, padding); 842 payload_len -= padding; 843 844 /* Verify HMAC. */ 845 payload_len -= mac_len; 846 tls_mte_aad(en, payload_len, hdr, seqno, &aad); 847 debug_hexdump(tc, &aad, sizeof(aad), "aad"); 848 ATF_REQUIRE(verify_hash(tls_EVP_MD(en), en->auth_key, en->auth_key_len, 849 &aad, sizeof(aad), buf, payload_len, buf + payload_len)); 850 851 ATF_REQUIRE_MSG(payload_len <= avail, "payload_len (%zu) < avail (%zu)", 852 payload_len, avail); 853 memcpy(dst, buf, payload_len); 854 *record_type = hdr->tls_type; 855 return (payload_len); 856 } 857 858 static size_t 859 decrypt_tls_12_aead(const atf_tc_t *tc, struct tls_enable *en, uint64_t seqno, 860 const void *src, size_t len, void *dst, uint8_t *record_type) 861 { 862 const struct tls_record_layer *hdr; 863 struct tls_aead_data aad; 864 char nonce[12]; 865 size_t hdr_len, mac_len, payload_len; 866 867 hdr = src; 868 869 hdr_len = tls_header_len(en); 870 mac_len = tls_mac_len(en); 871 payload_len = len - (hdr_len + mac_len); 872 ATF_REQUIRE_INTEQ(TLS_MAJOR_VER_ONE, hdr->tls_vmajor); 873 ATF_REQUIRE_INTEQ(TLS_MINOR_VER_TWO, hdr->tls_vminor); 874 debug(tc, "decrypting TLS 1.2 record seqno %ju:\n", (uintmax_t)seqno); 875 debug_hexdump(tc, src, len, NULL); 876 877 tls_12_aead_aad(en, payload_len, hdr, seqno, &aad); 878 debug_hexdump(tc, &aad, sizeof(aad), "aad"); 879 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 880 tls_12_gcm_nonce(en, hdr, nonce); 881 else 882 tls_13_nonce(en, seqno, nonce); 883 debug_hexdump(tc, nonce, sizeof(nonce), "nonce"); 884 885 ATF_REQUIRE(aead_decrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 886 &aad, sizeof(aad), (const char *)src + hdr_len, dst, payload_len, 887 (const char *)src + hdr_len + payload_len, mac_len)); 888 889 *record_type = hdr->tls_type; 890 return (payload_len); 891 } 892 893 static size_t 894 decrypt_tls_13_aead(const atf_tc_t *tc, struct tls_enable *en, uint64_t seqno, 895 const void *src, size_t len, void *dst, uint8_t *record_type) 896 { 897 const struct tls_record_layer *hdr; 898 struct tls_aead_data_13 aad; 899 char nonce[12]; 900 char *buf; 901 size_t hdr_len, mac_len, payload_len; 902 903 hdr = src; 904 905 hdr_len = tls_header_len(en); 906 mac_len = tls_mac_len(en); 907 payload_len = len - (hdr_len + mac_len); 908 ATF_REQUIRE_MSG(payload_len >= 1, 909 "payload_len (%zu) too short: len %zu hdr_len %zu mac_len %zu", 910 payload_len, len, hdr_len, mac_len); 911 ATF_REQUIRE_INTEQ(TLS_RLTYPE_APP, hdr->tls_type); 912 ATF_REQUIRE_INTEQ(TLS_MAJOR_VER_ONE, hdr->tls_vmajor); 913 ATF_REQUIRE_INTEQ(TLS_MINOR_VER_TWO, hdr->tls_vminor); 914 debug(tc, "decrypting TLS 1.3 record seqno %ju:\n", (uintmax_t)seqno); 915 debug_hexdump(tc, src, len, NULL); 916 917 tls_13_aad(en, hdr, seqno, &aad); 918 debug_hexdump(tc, &aad, sizeof(aad), "aad"); 919 tls_13_nonce(en, seqno, nonce); 920 debug_hexdump(tc, nonce, sizeof(nonce), "nonce"); 921 922 /* 923 * Have to use a temporary buffer for the output due to the 924 * record type as the last byte of the trailer. 925 */ 926 buf = malloc(payload_len); 927 928 ATF_REQUIRE(aead_decrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 929 &aad, sizeof(aad), (const char *)src + hdr_len, buf, payload_len, 930 (const char *)src + hdr_len + payload_len, mac_len)); 931 debug_hexdump(tc, buf, payload_len, "decrypted buffer"); 932 933 /* Trim record type. */ 934 *record_type = buf[payload_len - 1]; 935 payload_len--; 936 937 memcpy(dst, buf, payload_len); 938 free(buf); 939 940 return (payload_len); 941 } 942 943 static size_t 944 decrypt_tls_aead(const atf_tc_t *tc, struct tls_enable *en, uint64_t seqno, 945 const void *src, size_t len, void *dst, size_t avail, uint8_t *record_type) 946 { 947 const struct tls_record_layer *hdr; 948 size_t payload_len; 949 950 hdr = src; 951 ATF_REQUIRE_INTEQ(len, ntohs(hdr->tls_length) + sizeof(*hdr)); 952 953 payload_len = len - (tls_header_len(en) + tls_trailer_len(en)); 954 ATF_REQUIRE_MSG(payload_len <= avail, "payload_len (%zu) > avail (%zu)", 955 payload_len, avail); 956 957 if (en->tls_vminor == TLS_MINOR_VER_TWO) { 958 ATF_REQUIRE_INTEQ(payload_len, decrypt_tls_12_aead(tc, en, 959 seqno, src, len, dst, record_type)); 960 } else { 961 ATF_REQUIRE_INTEQ(payload_len, decrypt_tls_13_aead(tc, en, 962 seqno, src, len, dst, record_type)); 963 } 964 965 return (payload_len); 966 } 967 968 static size_t 969 decrypt_tls_record(const atf_tc_t *tc, struct tls_enable *en, uint64_t seqno, 970 const void *src, size_t len, void *dst, size_t avail, uint8_t *record_type) 971 { 972 if (en->cipher_algorithm == CRYPTO_AES_CBC) 973 return (decrypt_tls_aes_cbc_mte(tc, en, seqno, src, len, dst, 974 avail, record_type)); 975 else 976 return (decrypt_tls_aead(tc, en, seqno, src, len, dst, avail, 977 record_type)); 978 } 979 980 /* 981 * Encrypt a TLS record of type 'record_type' with payload 'len' bytes 982 * long at 'src' and store the result at 'dst'. If 'dst' doesn't have 983 * sufficient room ('avail'), fail the test. 'padding' is the amount 984 * of additional padding to include beyond any amount mandated by the 985 * cipher suite. 986 */ 987 static size_t 988 encrypt_tls_aes_cbc_mte(const atf_tc_t *tc, struct tls_enable *en, 989 uint8_t record_type, uint64_t seqno, const void *src, size_t len, void *dst, 990 size_t avail, size_t padding) 991 { 992 struct tls_record_layer *hdr; 993 struct tls_mac_data aad; 994 char *buf, *iv; 995 size_t hdr_len, mac_len, record_len; 996 u_int digest_len, i; 997 998 ATF_REQUIRE_INTEQ(0, padding % 16); 999 1000 hdr = dst; 1001 buf = dst; 1002 1003 debug(tc, "encrypting MTE record seqno %ju:\n", (uintmax_t)seqno); 1004 hdr_len = tls_header_len(en); 1005 mac_len = tls_mac_len(en); 1006 padding += (AES_BLOCK_LEN - (len + mac_len) % AES_BLOCK_LEN); 1007 ATF_REQUIRE_MSG(padding > 0 && padding <= 255, "invalid padding (%zu)", 1008 padding); 1009 1010 record_len = hdr_len + len + mac_len + padding; 1011 ATF_REQUIRE_MSG(record_len <= avail, "record_len (%zu) > avail (%zu): " 1012 "hdr_len %zu, len %zu, mac_len %zu, padding %zu", record_len, 1013 avail, hdr_len, len, mac_len, padding); 1014 1015 hdr->tls_type = record_type; 1016 hdr->tls_vmajor = TLS_MAJOR_VER_ONE; 1017 hdr->tls_vminor = en->tls_vminor; 1018 hdr->tls_length = htons(record_len - sizeof(*hdr)); 1019 iv = (char *)(hdr + 1); 1020 for (i = 0; i < AES_BLOCK_LEN; i++) 1021 iv[i] = rdigit(); 1022 debug_hexdump(tc, iv, AES_BLOCK_LEN, "explicit IV"); 1023 1024 /* Copy plaintext to ciphertext region. */ 1025 memcpy(buf + hdr_len, src, len); 1026 1027 /* Compute HMAC. */ 1028 tls_mte_aad(en, len, hdr, seqno, &aad); 1029 debug_hexdump(tc, &aad, sizeof(aad), "aad"); 1030 debug_hexdump(tc, src, len, "plaintext"); 1031 ATF_REQUIRE(compute_hash(tls_EVP_MD(en), en->auth_key, en->auth_key_len, 1032 &aad, sizeof(aad), src, len, buf + hdr_len + len, &digest_len)); 1033 ATF_REQUIRE_INTEQ(mac_len, digest_len); 1034 1035 /* Store padding. */ 1036 for (i = 0; i < padding; i++) 1037 buf[hdr_len + len + mac_len + i] = padding - 1; 1038 debug_hexdump(tc, buf + hdr_len + len, mac_len + padding, 1039 "MAC and padding"); 1040 1041 /* Encrypt the record. */ 1042 ATF_REQUIRE(cbc_encrypt(tls_EVP_CIPHER(en), en->cipher_key, iv, 1043 buf + hdr_len, buf + hdr_len, len + mac_len + padding)); 1044 debug_hexdump(tc, dst, record_len, "encrypted record"); 1045 1046 return (record_len); 1047 } 1048 1049 static size_t 1050 encrypt_tls_12_aead(const atf_tc_t *tc, struct tls_enable *en, 1051 uint8_t record_type, uint64_t seqno, const void *src, size_t len, void *dst) 1052 { 1053 struct tls_record_layer *hdr; 1054 struct tls_aead_data aad; 1055 char nonce[12]; 1056 size_t hdr_len, mac_len, record_len; 1057 1058 hdr = dst; 1059 1060 debug(tc, "encrypting TLS 1.2 record seqno %ju:\n", (uintmax_t)seqno); 1061 hdr_len = tls_header_len(en); 1062 mac_len = tls_mac_len(en); 1063 record_len = hdr_len + len + mac_len; 1064 1065 hdr->tls_type = record_type; 1066 hdr->tls_vmajor = TLS_MAJOR_VER_ONE; 1067 hdr->tls_vminor = TLS_MINOR_VER_TWO; 1068 hdr->tls_length = htons(record_len - sizeof(*hdr)); 1069 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 1070 memcpy(hdr + 1, &seqno, sizeof(seqno)); 1071 1072 tls_12_aead_aad(en, len, hdr, seqno, &aad); 1073 debug_hexdump(tc, &aad, sizeof(aad), "aad"); 1074 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 1075 tls_12_gcm_nonce(en, hdr, nonce); 1076 else 1077 tls_13_nonce(en, seqno, nonce); 1078 debug_hexdump(tc, nonce, sizeof(nonce), "nonce"); 1079 1080 debug_hexdump(tc, src, len, "plaintext"); 1081 ATF_REQUIRE(aead_encrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 1082 &aad, sizeof(aad), src, (char *)dst + hdr_len, len, 1083 (char *)dst + hdr_len + len, mac_len)); 1084 debug_hexdump(tc, dst, record_len, "encrypted record"); 1085 1086 return (record_len); 1087 } 1088 1089 static size_t 1090 encrypt_tls_13_aead(const atf_tc_t *tc, struct tls_enable *en, 1091 uint8_t record_type, uint64_t seqno, const void *src, size_t len, void *dst, 1092 size_t padding) 1093 { 1094 struct tls_record_layer *hdr; 1095 struct tls_aead_data_13 aad; 1096 char nonce[12]; 1097 char *buf; 1098 size_t hdr_len, mac_len, record_len; 1099 1100 hdr = dst; 1101 1102 debug(tc, "encrypting TLS 1.3 record seqno %ju:\n", (uintmax_t)seqno); 1103 hdr_len = tls_header_len(en); 1104 mac_len = tls_mac_len(en); 1105 record_len = hdr_len + len + 1 + padding + mac_len; 1106 1107 hdr->tls_type = TLS_RLTYPE_APP; 1108 hdr->tls_vmajor = TLS_MAJOR_VER_ONE; 1109 hdr->tls_vminor = TLS_MINOR_VER_TWO; 1110 hdr->tls_length = htons(record_len - sizeof(*hdr)); 1111 1112 tls_13_aad(en, hdr, seqno, &aad); 1113 debug_hexdump(tc, &aad, sizeof(aad), "aad"); 1114 tls_13_nonce(en, seqno, nonce); 1115 debug_hexdump(tc, nonce, sizeof(nonce), "nonce"); 1116 1117 /* 1118 * Have to use a temporary buffer for the input so that the record 1119 * type can be appended. 1120 */ 1121 buf = malloc(len + 1 + padding); 1122 memcpy(buf, src, len); 1123 buf[len] = record_type; 1124 memset(buf + len + 1, 0, padding); 1125 debug_hexdump(tc, buf, len + 1 + padding, "plaintext + type + padding"); 1126 1127 ATF_REQUIRE(aead_encrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 1128 &aad, sizeof(aad), buf, (char *)dst + hdr_len, len + 1 + padding, 1129 (char *)dst + hdr_len + len + 1 + padding, mac_len)); 1130 debug_hexdump(tc, dst, record_len, "encrypted record"); 1131 1132 free(buf); 1133 1134 return (record_len); 1135 } 1136 1137 static size_t 1138 encrypt_tls_aead(const atf_tc_t *tc, struct tls_enable *en, 1139 uint8_t record_type, uint64_t seqno, const void *src, size_t len, void *dst, 1140 size_t avail, size_t padding) 1141 { 1142 size_t record_len; 1143 1144 record_len = tls_header_len(en) + len + padding + tls_trailer_len(en); 1145 ATF_REQUIRE_MSG(record_len <= avail, "record_len (%zu) > avail (%zu): " 1146 "header %zu len %zu padding %zu trailer %zu", record_len, avail, 1147 tls_header_len(en), len, padding, tls_trailer_len(en)); 1148 1149 if (en->tls_vminor == TLS_MINOR_VER_TWO) { 1150 ATF_REQUIRE_INTEQ(0, padding); 1151 ATF_REQUIRE_INTEQ(record_len, encrypt_tls_12_aead(tc, en, 1152 record_type, seqno, src, len, dst)); 1153 } else 1154 ATF_REQUIRE_INTEQ(record_len, encrypt_tls_13_aead(tc, en, 1155 record_type, seqno, src, len, dst, padding)); 1156 1157 return (record_len); 1158 } 1159 1160 static size_t 1161 encrypt_tls_record(const atf_tc_t *tc, struct tls_enable *en, 1162 uint8_t record_type, uint64_t seqno, const void *src, size_t len, void *dst, 1163 size_t avail, size_t padding) 1164 { 1165 if (en->cipher_algorithm == CRYPTO_AES_CBC) 1166 return (encrypt_tls_aes_cbc_mte(tc, en, record_type, seqno, src, 1167 len, dst, avail, padding)); 1168 else 1169 return (encrypt_tls_aead(tc, en, record_type, seqno, src, len, 1170 dst, avail, padding)); 1171 } 1172 1173 static void 1174 test_ktls_transmit_app_data(const atf_tc_t *tc, struct tls_enable *en, 1175 uint64_t seqno, size_t len) 1176 { 1177 struct kevent ev; 1178 struct tls_record_layer *hdr; 1179 char *plaintext, *decrypted, *outbuf; 1180 size_t decrypted_len, outbuf_len, outbuf_cap, record_len, written; 1181 ssize_t rv; 1182 int kq, sockets[2]; 1183 uint8_t record_type; 1184 1185 plaintext = alloc_buffer(len); 1186 debug_hexdump(tc, plaintext, len, "plaintext"); 1187 decrypted = malloc(len); 1188 outbuf_cap = tls_header_len(en) + TLS_MAX_MSG_SIZE_V10_2 + 1189 tls_trailer_len(en); 1190 outbuf = malloc(outbuf_cap); 1191 hdr = (struct tls_record_layer *)outbuf; 1192 1193 ATF_REQUIRE((kq = kqueue()) != -1); 1194 1195 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1196 1197 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en, 1198 sizeof(*en)) == 0); 1199 check_tls_mode(tc, sockets[1], TCP_TXTLS_MODE); 1200 1201 EV_SET(&ev, sockets[0], EVFILT_READ, EV_ADD, 0, 0, NULL); 1202 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 1203 EV_SET(&ev, sockets[1], EVFILT_WRITE, EV_ADD, 0, 0, NULL); 1204 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 1205 1206 decrypted_len = 0; 1207 outbuf_len = 0; 1208 written = 0; 1209 1210 while (decrypted_len != len) { 1211 ATF_REQUIRE(kevent(kq, NULL, 0, &ev, 1, NULL) == 1); 1212 1213 switch (ev.filter) { 1214 case EVFILT_WRITE: 1215 /* Try to write any remaining data. */ 1216 rv = write(ev.ident, plaintext + written, 1217 len - written); 1218 ATF_REQUIRE_MSG(rv > 0, 1219 "failed to write to socket"); 1220 written += rv; 1221 if (written == len) { 1222 ev.flags = EV_DISABLE; 1223 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, 1224 NULL) == 0); 1225 } 1226 break; 1227 1228 case EVFILT_READ: 1229 ATF_REQUIRE((ev.flags & EV_EOF) == 0); 1230 1231 /* 1232 * Try to read data for the next TLS record 1233 * into outbuf. Start by reading the header 1234 * to determine how much additional data to 1235 * read. 1236 */ 1237 if (outbuf_len < sizeof(struct tls_record_layer)) { 1238 rv = read(ev.ident, outbuf + outbuf_len, 1239 sizeof(struct tls_record_layer) - 1240 outbuf_len); 1241 ATF_REQUIRE_MSG(rv > 0, 1242 "failed to read from socket"); 1243 outbuf_len += rv; 1244 1245 if (outbuf_len == 1246 sizeof(struct tls_record_layer)) { 1247 debug(tc, "TLS header for seqno %ju:\n", 1248 (uintmax_t)seqno); 1249 debug_hexdump(tc, outbuf, outbuf_len, 1250 NULL); 1251 } 1252 } 1253 1254 if (outbuf_len < sizeof(struct tls_record_layer)) 1255 break; 1256 1257 record_len = sizeof(struct tls_record_layer) + 1258 ntohs(hdr->tls_length); 1259 debug(tc, "record_len %zu outbuf_cap %zu\n", 1260 record_len, outbuf_cap); 1261 ATF_REQUIRE(record_len <= outbuf_cap); 1262 ATF_REQUIRE(record_len > outbuf_len); 1263 rv = read(ev.ident, outbuf + outbuf_len, 1264 record_len - outbuf_len); 1265 if (rv == -1 && errno == EAGAIN) 1266 break; 1267 ATF_REQUIRE_MSG(rv > 0, 1268 "failed to read from socket: %s", strerror(errno)); 1269 1270 outbuf_len += rv; 1271 if (outbuf_len == record_len) { 1272 decrypted_len += decrypt_tls_record(tc, en, 1273 seqno, outbuf, outbuf_len, 1274 decrypted + decrypted_len, 1275 len - decrypted_len, &record_type); 1276 ATF_REQUIRE_INTEQ(TLS_RLTYPE_APP, record_type); 1277 1278 seqno++; 1279 outbuf_len = 0; 1280 } 1281 break; 1282 } 1283 } 1284 1285 ATF_REQUIRE_MSG(written == decrypted_len, 1286 "read %zu decrypted bytes, but wrote %zu", decrypted_len, written); 1287 1288 ATF_REQUIRE(memcmp(plaintext, decrypted, len) == 0); 1289 1290 free(outbuf); 1291 free(decrypted); 1292 free(plaintext); 1293 1294 close_sockets(sockets); 1295 ATF_REQUIRE(close(kq) == 0); 1296 } 1297 1298 static void 1299 ktls_send_control_message(int fd, uint8_t type, void *data, size_t len) 1300 { 1301 struct msghdr msg; 1302 struct cmsghdr *cmsg; 1303 char cbuf[CMSG_SPACE(sizeof(type))]; 1304 struct iovec iov; 1305 1306 memset(&msg, 0, sizeof(msg)); 1307 1308 msg.msg_control = cbuf; 1309 msg.msg_controllen = sizeof(cbuf); 1310 cmsg = CMSG_FIRSTHDR(&msg); 1311 cmsg->cmsg_level = IPPROTO_TCP; 1312 cmsg->cmsg_type = TLS_SET_RECORD_TYPE; 1313 cmsg->cmsg_len = CMSG_LEN(sizeof(type)); 1314 *(uint8_t *)CMSG_DATA(cmsg) = type; 1315 1316 iov.iov_base = data; 1317 iov.iov_len = len; 1318 msg.msg_iov = &iov; 1319 msg.msg_iovlen = 1; 1320 1321 ATF_REQUIRE_INTEQ((ssize_t)len, sendmsg(fd, &msg, 0)); 1322 } 1323 1324 static void 1325 test_ktls_transmit_control(const atf_tc_t *tc, struct tls_enable *en, 1326 uint64_t seqno, uint8_t type, size_t len) 1327 { 1328 struct tls_record_layer *hdr; 1329 char *plaintext, *decrypted, *outbuf; 1330 size_t outbuf_cap, payload_len, record_len; 1331 ssize_t rv; 1332 int sockets[2]; 1333 uint8_t record_type; 1334 1335 ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2); 1336 1337 plaintext = alloc_buffer(len); 1338 decrypted = malloc(len); 1339 outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en); 1340 outbuf = malloc(outbuf_cap); 1341 hdr = (struct tls_record_layer *)outbuf; 1342 1343 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1344 1345 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en, 1346 sizeof(*en)) == 0); 1347 check_tls_mode(tc, sockets[1], TCP_TXTLS_MODE); 1348 1349 fd_set_blocking(sockets[0]); 1350 fd_set_blocking(sockets[1]); 1351 1352 ktls_send_control_message(sockets[1], type, plaintext, len); 1353 1354 /* 1355 * First read the header to determine how much additional data 1356 * to read. 1357 */ 1358 rv = read(sockets[0], outbuf, sizeof(struct tls_record_layer)); 1359 ATF_REQUIRE_INTEQ(sizeof(struct tls_record_layer), rv); 1360 payload_len = ntohs(hdr->tls_length); 1361 record_len = payload_len + sizeof(struct tls_record_layer); 1362 ATF_REQUIRE_MSG(record_len <= outbuf_cap, 1363 "record_len (%zu) > outbuf_cap (%zu)", record_len, outbuf_cap); 1364 rv = read(sockets[0], outbuf + sizeof(struct tls_record_layer), 1365 payload_len); 1366 ATF_REQUIRE_INTEQ((ssize_t)payload_len, rv); 1367 1368 rv = decrypt_tls_record(tc, en, seqno, outbuf, record_len, decrypted, 1369 len, &record_type); 1370 1371 ATF_REQUIRE_MSG((ssize_t)len == rv, 1372 "read %zd decrypted bytes, but wrote %zu", rv, len); 1373 ATF_REQUIRE_INTEQ(type, record_type); 1374 1375 ATF_REQUIRE(memcmp(plaintext, decrypted, len) == 0); 1376 1377 free(outbuf); 1378 free(decrypted); 1379 free(plaintext); 1380 1381 close_sockets(sockets); 1382 } 1383 1384 static void 1385 test_ktls_transmit_empty_fragment(const atf_tc_t *tc, struct tls_enable *en, 1386 uint64_t seqno) 1387 { 1388 struct tls_record_layer *hdr; 1389 char *outbuf; 1390 size_t outbuf_cap, payload_len, record_len; 1391 ssize_t rv; 1392 int sockets[2]; 1393 uint8_t record_type; 1394 1395 outbuf_cap = tls_header_len(en) + tls_trailer_len(en); 1396 outbuf = malloc(outbuf_cap); 1397 hdr = (struct tls_record_layer *)outbuf; 1398 1399 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1400 1401 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en, 1402 sizeof(*en)) == 0); 1403 check_tls_mode(tc, sockets[1], TCP_TXTLS_MODE); 1404 1405 fd_set_blocking(sockets[0]); 1406 fd_set_blocking(sockets[1]); 1407 1408 /* 1409 * A write of zero bytes should send an empty fragment only for 1410 * TLS 1.0, otherwise an error should be raised. 1411 */ 1412 rv = write(sockets[1], NULL, 0); 1413 if (rv == 0) { 1414 ATF_REQUIRE_INTEQ(CRYPTO_AES_CBC, en->cipher_algorithm); 1415 ATF_REQUIRE_INTEQ(TLS_MINOR_VER_ZERO, en->tls_vminor); 1416 } else { 1417 ATF_REQUIRE_INTEQ(-1, rv); 1418 ATF_REQUIRE_ERRNO(EINVAL, true); 1419 goto out; 1420 } 1421 1422 /* 1423 * First read the header to determine how much additional data 1424 * to read. 1425 */ 1426 rv = read(sockets[0], outbuf, sizeof(struct tls_record_layer)); 1427 ATF_REQUIRE_INTEQ(sizeof(struct tls_record_layer), rv); 1428 payload_len = ntohs(hdr->tls_length); 1429 record_len = payload_len + sizeof(struct tls_record_layer); 1430 ATF_REQUIRE_MSG(record_len <= outbuf_cap, 1431 "record_len (%zu) > outbuf_cap (%zu)", record_len, outbuf_cap); 1432 rv = read(sockets[0], outbuf + sizeof(struct tls_record_layer), 1433 payload_len); 1434 ATF_REQUIRE_INTEQ((ssize_t)payload_len, rv); 1435 1436 rv = decrypt_tls_record(tc, en, seqno, outbuf, record_len, NULL, 0, 1437 &record_type); 1438 1439 ATF_REQUIRE_MSG(rv == 0, 1440 "read %zd decrypted bytes for an empty fragment", rv); 1441 ATF_REQUIRE_INTEQ(TLS_RLTYPE_APP, record_type); 1442 1443 out: 1444 free(outbuf); 1445 1446 close_sockets(sockets); 1447 } 1448 1449 static size_t 1450 ktls_receive_tls_record(struct tls_enable *en, int fd, uint8_t record_type, 1451 void *data, size_t len) 1452 { 1453 struct msghdr msg; 1454 struct cmsghdr *cmsg; 1455 struct tls_get_record *tgr; 1456 char cbuf[CMSG_SPACE(sizeof(*tgr))]; 1457 struct iovec iov; 1458 ssize_t rv; 1459 1460 memset(&msg, 0, sizeof(msg)); 1461 1462 msg.msg_control = cbuf; 1463 msg.msg_controllen = sizeof(cbuf); 1464 1465 iov.iov_base = data; 1466 iov.iov_len = len; 1467 msg.msg_iov = &iov; 1468 msg.msg_iovlen = 1; 1469 1470 ATF_REQUIRE((rv = recvmsg(fd, &msg, 0)) > 0); 1471 1472 ATF_REQUIRE((msg.msg_flags & (MSG_EOR | MSG_CTRUNC)) == MSG_EOR); 1473 1474 cmsg = CMSG_FIRSTHDR(&msg); 1475 ATF_REQUIRE(cmsg != NULL); 1476 ATF_REQUIRE_INTEQ(IPPROTO_TCP, cmsg->cmsg_level); 1477 ATF_REQUIRE_INTEQ(TLS_GET_RECORD, cmsg->cmsg_type); 1478 ATF_REQUIRE_INTEQ(CMSG_LEN(sizeof(*tgr)), cmsg->cmsg_len); 1479 1480 tgr = (struct tls_get_record *)CMSG_DATA(cmsg); 1481 ATF_REQUIRE_INTEQ(record_type, tgr->tls_type); 1482 ATF_REQUIRE_INTEQ(en->tls_vmajor, tgr->tls_vmajor); 1483 /* XXX: Not sure if this is what OpenSSL expects? */ 1484 if (en->tls_vminor == TLS_MINOR_VER_THREE) 1485 ATF_REQUIRE_INTEQ(TLS_MINOR_VER_TWO, tgr->tls_vminor); 1486 else 1487 ATF_REQUIRE_INTEQ(en->tls_vminor, tgr->tls_vminor); 1488 ATF_REQUIRE_INTEQ(htons(rv), tgr->tls_length); 1489 1490 return (rv); 1491 } 1492 1493 static void 1494 test_ktls_receive_app_data(const atf_tc_t *tc, struct tls_enable *en, 1495 uint64_t seqno, size_t len, size_t padding) 1496 { 1497 struct kevent ev; 1498 char *plaintext, *received, *outbuf; 1499 size_t outbuf_cap, outbuf_len, outbuf_sent, received_len, todo, written; 1500 ssize_t rv; 1501 int kq, sockets[2]; 1502 1503 plaintext = alloc_buffer(len); 1504 received = malloc(len); 1505 outbuf_cap = tls_header_len(en) + TLS_MAX_MSG_SIZE_V10_2 + 1506 tls_trailer_len(en); 1507 outbuf = malloc(outbuf_cap); 1508 1509 ATF_REQUIRE((kq = kqueue()) != -1); 1510 1511 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1512 1513 ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1514 sizeof(*en)) == 0); 1515 check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE); 1516 1517 EV_SET(&ev, sockets[0], EVFILT_READ, EV_ADD, 0, 0, NULL); 1518 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 1519 EV_SET(&ev, sockets[1], EVFILT_WRITE, EV_ADD, 0, 0, NULL); 1520 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 1521 1522 received_len = 0; 1523 outbuf_len = 0; 1524 written = 0; 1525 1526 while (received_len != len) { 1527 ATF_REQUIRE(kevent(kq, NULL, 0, &ev, 1, NULL) == 1); 1528 1529 switch (ev.filter) { 1530 case EVFILT_WRITE: 1531 /* 1532 * Compose the next TLS record to send. 1533 */ 1534 if (outbuf_len == 0) { 1535 ATF_REQUIRE(written < len); 1536 todo = len - written; 1537 if (todo > TLS_MAX_MSG_SIZE_V10_2 - padding) 1538 todo = TLS_MAX_MSG_SIZE_V10_2 - padding; 1539 outbuf_len = encrypt_tls_record(tc, en, 1540 TLS_RLTYPE_APP, seqno, plaintext + written, 1541 todo, outbuf, outbuf_cap, padding); 1542 outbuf_sent = 0; 1543 written += todo; 1544 seqno++; 1545 } 1546 1547 /* 1548 * Try to write the remainder of the current 1549 * TLS record. 1550 */ 1551 rv = write(ev.ident, outbuf + outbuf_sent, 1552 outbuf_len - outbuf_sent); 1553 ATF_REQUIRE_MSG(rv > 0, 1554 "failed to write to socket: %s", strerror(errno)); 1555 outbuf_sent += rv; 1556 if (outbuf_sent == outbuf_len) { 1557 outbuf_len = 0; 1558 if (written == len) { 1559 ev.flags = EV_DISABLE; 1560 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, 1561 NULL) == 0); 1562 } 1563 } 1564 break; 1565 1566 case EVFILT_READ: 1567 ATF_REQUIRE((ev.flags & EV_EOF) == 0); 1568 1569 rv = ktls_receive_tls_record(en, ev.ident, 1570 TLS_RLTYPE_APP, received + received_len, 1571 len - received_len); 1572 received_len += rv; 1573 break; 1574 } 1575 } 1576 1577 ATF_REQUIRE_MSG(written == received_len, 1578 "read %zu decrypted bytes, but wrote %zu", received_len, written); 1579 1580 ATF_REQUIRE(memcmp(plaintext, received, len) == 0); 1581 1582 free(outbuf); 1583 free(received); 1584 free(plaintext); 1585 1586 close_sockets(sockets); 1587 ATF_REQUIRE(close(kq) == 0); 1588 } 1589 1590 static void 1591 ktls_receive_tls_error(int fd, int expected_error) 1592 { 1593 struct msghdr msg; 1594 struct tls_get_record *tgr; 1595 char cbuf[CMSG_SPACE(sizeof(*tgr))]; 1596 char buf[64]; 1597 struct iovec iov; 1598 1599 memset(&msg, 0, sizeof(msg)); 1600 1601 msg.msg_control = cbuf; 1602 msg.msg_controllen = sizeof(cbuf); 1603 1604 iov.iov_base = buf; 1605 iov.iov_len = sizeof(buf); 1606 msg.msg_iov = &iov; 1607 msg.msg_iovlen = 1; 1608 1609 ATF_REQUIRE(recvmsg(fd, &msg, 0) == -1); 1610 if (expected_error != 0) 1611 ATF_REQUIRE_ERRNO(expected_error, true); 1612 } 1613 1614 static void 1615 test_ktls_receive_corrupted_record(const atf_tc_t *tc, struct tls_enable *en, 1616 uint64_t seqno, size_t len, ssize_t offset) 1617 { 1618 char *plaintext, *outbuf; 1619 size_t outbuf_cap, outbuf_len; 1620 ssize_t rv; 1621 int sockets[2]; 1622 1623 ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2); 1624 1625 plaintext = alloc_buffer(len); 1626 outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en); 1627 outbuf = malloc(outbuf_cap); 1628 1629 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1630 1631 ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1632 sizeof(*en)) == 0); 1633 check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE); 1634 1635 fd_set_blocking(sockets[0]); 1636 fd_set_blocking(sockets[1]); 1637 1638 outbuf_len = encrypt_tls_record(tc, en, TLS_RLTYPE_APP, seqno, 1639 plaintext, len, outbuf, outbuf_cap, 0); 1640 1641 /* A negative offset is an offset from the end. */ 1642 if (offset < 0) 1643 offset += outbuf_len; 1644 outbuf[offset] ^= 0x01; 1645 1646 rv = write(sockets[1], outbuf, outbuf_len); 1647 ATF_REQUIRE_INTEQ((ssize_t)outbuf_len, rv); 1648 1649 ktls_receive_tls_error(sockets[0], EBADMSG); 1650 1651 free(outbuf); 1652 free(plaintext); 1653 1654 close_sockets_ignore_errors(sockets); 1655 } 1656 1657 static void 1658 test_ktls_receive_corrupted_iv(const atf_tc_t *tc, struct tls_enable *en, 1659 uint64_t seqno, size_t len) 1660 { 1661 ATF_REQUIRE(tls_header_len(en) > sizeof(struct tls_record_layer)); 1662 1663 /* Corrupt the first byte of the explicit IV after the header. */ 1664 test_ktls_receive_corrupted_record(tc, en, seqno, len, 1665 sizeof(struct tls_record_layer)); 1666 } 1667 1668 static void 1669 test_ktls_receive_corrupted_data(const atf_tc_t *tc, struct tls_enable *en, 1670 uint64_t seqno, size_t len) 1671 { 1672 ATF_REQUIRE(len > 0); 1673 1674 /* Corrupt the first ciphertext byte after the header. */ 1675 test_ktls_receive_corrupted_record(tc, en, seqno, len, 1676 tls_header_len(en)); 1677 } 1678 1679 static void 1680 test_ktls_receive_corrupted_mac(const atf_tc_t *tc, struct tls_enable *en, 1681 uint64_t seqno, size_t len) 1682 { 1683 size_t offset; 1684 1685 /* Corrupt the first byte of the MAC. */ 1686 if (en->cipher_algorithm == CRYPTO_AES_CBC) 1687 offset = tls_header_len(en) + len; 1688 else 1689 offset = -tls_mac_len(en); 1690 test_ktls_receive_corrupted_record(tc, en, seqno, len, offset); 1691 } 1692 1693 static void 1694 test_ktls_receive_corrupted_padding(const atf_tc_t *tc, struct tls_enable *en, 1695 uint64_t seqno, size_t len) 1696 { 1697 ATF_REQUIRE_INTEQ(CRYPTO_AES_CBC, en->cipher_algorithm); 1698 1699 /* Corrupt the last byte of the padding. */ 1700 test_ktls_receive_corrupted_record(tc, en, seqno, len, -1); 1701 } 1702 1703 static void 1704 test_ktls_receive_truncated_record(const atf_tc_t *tc, struct tls_enable *en, 1705 uint64_t seqno, size_t len) 1706 { 1707 char *plaintext, *outbuf; 1708 size_t outbuf_cap, outbuf_len; 1709 ssize_t rv; 1710 int sockets[2]; 1711 1712 ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2); 1713 1714 plaintext = alloc_buffer(len); 1715 outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en); 1716 outbuf = malloc(outbuf_cap); 1717 1718 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1719 1720 ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1721 sizeof(*en)) == 0); 1722 check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE); 1723 1724 fd_set_blocking(sockets[0]); 1725 fd_set_blocking(sockets[1]); 1726 1727 outbuf_len = encrypt_tls_record(tc, en, TLS_RLTYPE_APP, seqno, 1728 plaintext, len, outbuf, outbuf_cap, 0); 1729 1730 rv = write(sockets[1], outbuf, outbuf_len / 2); 1731 ATF_REQUIRE_INTEQ((ssize_t)(outbuf_len / 2), rv); 1732 1733 ATF_REQUIRE(shutdown(sockets[1], SHUT_WR) == 0); 1734 1735 ktls_receive_tls_error(sockets[0], EMSGSIZE); 1736 1737 free(outbuf); 1738 free(plaintext); 1739 1740 close_sockets_ignore_errors(sockets); 1741 } 1742 1743 static void 1744 test_ktls_receive_bad_major(const atf_tc_t *tc, struct tls_enable *en, 1745 uint64_t seqno, size_t len) 1746 { 1747 struct tls_record_layer *hdr; 1748 char *plaintext, *outbuf; 1749 size_t outbuf_cap, outbuf_len; 1750 ssize_t rv; 1751 int sockets[2]; 1752 1753 ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2); 1754 1755 plaintext = alloc_buffer(len); 1756 outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en); 1757 outbuf = malloc(outbuf_cap); 1758 1759 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1760 1761 ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1762 sizeof(*en)) == 0); 1763 check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE); 1764 1765 fd_set_blocking(sockets[0]); 1766 fd_set_blocking(sockets[1]); 1767 1768 outbuf_len = encrypt_tls_record(tc, en, TLS_RLTYPE_APP, seqno, 1769 plaintext, len, outbuf, outbuf_cap, 0); 1770 1771 hdr = (void *)outbuf; 1772 hdr->tls_vmajor++; 1773 1774 rv = write(sockets[1], outbuf, outbuf_len); 1775 ATF_REQUIRE_INTEQ((ssize_t)outbuf_len, rv); 1776 1777 ktls_receive_tls_error(sockets[0], EINVAL); 1778 1779 free(outbuf); 1780 free(plaintext); 1781 1782 close_sockets_ignore_errors(sockets); 1783 } 1784 1785 static void 1786 test_ktls_receive_bad_minor(const atf_tc_t *tc, struct tls_enable *en, 1787 uint64_t seqno, size_t len) 1788 { 1789 struct tls_record_layer *hdr; 1790 char *plaintext, *outbuf; 1791 size_t outbuf_cap, outbuf_len; 1792 ssize_t rv; 1793 int sockets[2]; 1794 1795 ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2); 1796 1797 plaintext = alloc_buffer(len); 1798 outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en); 1799 outbuf = malloc(outbuf_cap); 1800 1801 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1802 1803 ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1804 sizeof(*en)) == 0); 1805 check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE); 1806 1807 fd_set_blocking(sockets[0]); 1808 fd_set_blocking(sockets[1]); 1809 1810 outbuf_len = encrypt_tls_record(tc, en, TLS_RLTYPE_APP, seqno, 1811 plaintext, len, outbuf, outbuf_cap, 0); 1812 1813 hdr = (void *)outbuf; 1814 hdr->tls_vminor++; 1815 1816 rv = write(sockets[1], outbuf, outbuf_len); 1817 ATF_REQUIRE_INTEQ((ssize_t)outbuf_len, rv); 1818 1819 ktls_receive_tls_error(sockets[0], EINVAL); 1820 1821 free(outbuf); 1822 free(plaintext); 1823 1824 close_sockets_ignore_errors(sockets); 1825 } 1826 1827 static void 1828 test_ktls_receive_bad_type(const atf_tc_t *tc, struct tls_enable *en, 1829 uint64_t seqno, size_t len) 1830 { 1831 struct tls_record_layer *hdr; 1832 char *plaintext, *outbuf; 1833 size_t outbuf_cap, outbuf_len; 1834 ssize_t rv; 1835 int sockets[2]; 1836 1837 ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2); 1838 ATF_REQUIRE_INTEQ(TLS_MINOR_VER_THREE, en->tls_vminor); 1839 1840 plaintext = alloc_buffer(len); 1841 outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en); 1842 outbuf = malloc(outbuf_cap); 1843 1844 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1845 1846 ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1847 sizeof(*en)) == 0); 1848 check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE); 1849 1850 fd_set_blocking(sockets[0]); 1851 fd_set_blocking(sockets[1]); 1852 1853 outbuf_len = encrypt_tls_record(tc, en, 0x21 /* Alert */, seqno, 1854 plaintext, len, outbuf, outbuf_cap, 0); 1855 1856 hdr = (void *)outbuf; 1857 hdr->tls_type = TLS_RLTYPE_APP + 1; 1858 1859 rv = write(sockets[1], outbuf, outbuf_len); 1860 ATF_REQUIRE_INTEQ((ssize_t)outbuf_len, rv); 1861 1862 ktls_receive_tls_error(sockets[0], EINVAL); 1863 1864 free(outbuf); 1865 free(plaintext); 1866 1867 close_sockets_ignore_errors(sockets); 1868 } 1869 1870 static void 1871 test_ktls_receive_bad_size(const atf_tc_t *tc, struct tls_enable *en, 1872 uint64_t seqno, size_t len) 1873 { 1874 struct tls_record_layer *hdr; 1875 char *outbuf; 1876 size_t outbuf_len; 1877 ssize_t rv; 1878 int sockets[2]; 1879 1880 outbuf_len = sizeof(*hdr) + len; 1881 outbuf = calloc(1, outbuf_len); 1882 1883 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1884 1885 ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1886 sizeof(*en)) == 0); 1887 check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE); 1888 1889 fd_set_blocking(sockets[0]); 1890 fd_set_blocking(sockets[1]); 1891 1892 hdr = (void *)outbuf; 1893 hdr->tls_vmajor = en->tls_vmajor; 1894 if (en->tls_vminor == TLS_MINOR_VER_THREE) 1895 hdr->tls_vminor = TLS_MINOR_VER_TWO; 1896 else 1897 hdr->tls_vminor = en->tls_vminor; 1898 hdr->tls_type = TLS_RLTYPE_APP; 1899 hdr->tls_length = htons(len); 1900 1901 rv = write(sockets[1], outbuf, outbuf_len); 1902 ATF_REQUIRE_INTEQ((ssize_t)outbuf_len, rv); 1903 1904 /* 1905 * The other end may notice the error and drop the connection 1906 * before this executes resulting in shutdown() failing with 1907 * either ENOTCONN or ECONNRESET. Ignore this error if it 1908 * occurs. 1909 */ 1910 if (shutdown(sockets[1], SHUT_WR) != 0) { 1911 ATF_REQUIRE_MSG(errno == ENOTCONN || errno == ECONNRESET, 1912 "shutdown() failed: %s", strerror(errno)); 1913 } 1914 1915 ktls_receive_tls_error(sockets[0], EMSGSIZE); 1916 1917 free(outbuf); 1918 1919 close_sockets_ignore_errors(sockets); 1920 } 1921 1922 #define TLS_10_TESTS(M) \ 1923 M(aes128_cbc_1_0_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1924 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ZERO) \ 1925 M(aes256_cbc_1_0_sha1, CRYPTO_AES_CBC, 256 / 8, \ 1926 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ZERO) 1927 1928 #define TLS_13_TESTS(M) \ 1929 M(aes128_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1930 TLS_MINOR_VER_THREE) \ 1931 M(aes256_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0, \ 1932 TLS_MINOR_VER_THREE) \ 1933 M(chacha20_poly1305_1_3, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1934 TLS_MINOR_VER_THREE) 1935 1936 #define AES_CBC_NONZERO_TESTS(M) \ 1937 M(aes128_cbc_1_1_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1938 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ONE) \ 1939 M(aes256_cbc_1_1_sha1, CRYPTO_AES_CBC, 256 / 8, \ 1940 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ONE) \ 1941 M(aes128_cbc_1_2_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1942 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_TWO) \ 1943 M(aes256_cbc_1_2_sha1, CRYPTO_AES_CBC, 256 / 8, \ 1944 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_TWO) \ 1945 M(aes128_cbc_1_2_sha256, CRYPTO_AES_CBC, 128 / 8, \ 1946 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_TWO) \ 1947 M(aes256_cbc_1_2_sha256, CRYPTO_AES_CBC, 256 / 8, \ 1948 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_TWO) \ 1949 M(aes128_cbc_1_2_sha384, CRYPTO_AES_CBC, 128 / 8, \ 1950 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_TWO) \ 1951 M(aes256_cbc_1_2_sha384, CRYPTO_AES_CBC, 256 / 8, \ 1952 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_TWO) \ 1953 1954 #define AES_CBC_TESTS(M) \ 1955 TLS_10_TESTS(M) \ 1956 AES_CBC_NONZERO_TESTS(M) 1957 1958 #define AES_GCM_12_TESTS(M) \ 1959 M(aes128_gcm_1_2, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1960 TLS_MINOR_VER_TWO) \ 1961 M(aes256_gcm_1_2, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0, \ 1962 TLS_MINOR_VER_TWO) 1963 1964 #define AES_GCM_TESTS(M) \ 1965 AES_GCM_12_TESTS(M) \ 1966 M(aes128_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1967 TLS_MINOR_VER_THREE) \ 1968 M(aes256_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0, \ 1969 TLS_MINOR_VER_THREE) 1970 1971 #define CHACHA20_TESTS(M) \ 1972 M(chacha20_poly1305_1_2, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1973 TLS_MINOR_VER_TWO) \ 1974 M(chacha20_poly1305_1_3, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1975 TLS_MINOR_VER_THREE) 1976 1977 #define GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1978 auth_alg, minor, name, len) \ 1979 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_##name); \ 1980 ATF_TC_BODY(ktls_transmit_##cipher_name##_##name, tc) \ 1981 { \ 1982 struct tls_enable en; \ 1983 uint64_t seqno; \ 1984 \ 1985 ATF_REQUIRE_KTLS(); \ 1986 seqno = random(); \ 1987 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 1988 seqno, &en); \ 1989 test_ktls_transmit_app_data(tc, &en, seqno, len); \ 1990 free_tls_enable(&en); \ 1991 } 1992 1993 #define ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1994 auth_alg, minor, name) \ 1995 ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_##name); 1996 1997 #define GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1998 auth_alg, minor, name, type, len) \ 1999 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_##name); \ 2000 ATF_TC_BODY(ktls_transmit_##cipher_name##_##name, tc) \ 2001 { \ 2002 struct tls_enable en; \ 2003 uint64_t seqno; \ 2004 \ 2005 ATF_REQUIRE_KTLS(); \ 2006 seqno = random(); \ 2007 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2008 seqno, &en); \ 2009 test_ktls_transmit_control(tc, &en, seqno, type, len); \ 2010 free_tls_enable(&en); \ 2011 } 2012 2013 #define ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2014 auth_alg, minor, name) \ 2015 ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_##name); 2016 2017 #define GEN_TRANSMIT_EMPTY_FRAGMENT_TEST(cipher_name, cipher_alg, \ 2018 key_size, auth_alg, minor) \ 2019 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_empty_fragment); \ 2020 ATF_TC_BODY(ktls_transmit_##cipher_name##_empty_fragment, tc) \ 2021 { \ 2022 struct tls_enable en; \ 2023 uint64_t seqno; \ 2024 \ 2025 ATF_REQUIRE_KTLS(); \ 2026 seqno = random(); \ 2027 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2028 seqno, &en); \ 2029 test_ktls_transmit_empty_fragment(tc, &en, seqno); \ 2030 free_tls_enable(&en); \ 2031 } 2032 2033 #define ADD_TRANSMIT_EMPTY_FRAGMENT_TEST(cipher_name, cipher_alg, \ 2034 key_size, auth_alg, minor) \ 2035 ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_empty_fragment); 2036 2037 #define GEN_TRANSMIT_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 2038 minor) \ 2039 GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2040 auth_alg, minor, short, 64) \ 2041 GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2042 auth_alg, minor, long, 64 * 1024) \ 2043 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2044 auth_alg, minor, control, 0x21 /* Alert */, 32) 2045 2046 #define ADD_TRANSMIT_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 2047 minor) \ 2048 ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2049 auth_alg, minor, short) \ 2050 ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2051 auth_alg, minor, long) \ 2052 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2053 auth_alg, minor, control) 2054 2055 /* 2056 * For each supported cipher suite, run three transmit tests: 2057 * 2058 * - a short test which sends 64 bytes of application data (likely as 2059 * a single TLS record) 2060 * 2061 * - a long test which sends 64KB of application data (split across 2062 * multiple TLS records) 2063 * 2064 * - a control test which sends a single record with a specific 2065 * content type via sendmsg() 2066 */ 2067 AES_CBC_TESTS(GEN_TRANSMIT_TESTS); 2068 AES_GCM_TESTS(GEN_TRANSMIT_TESTS); 2069 CHACHA20_TESTS(GEN_TRANSMIT_TESTS); 2070 2071 #define GEN_TRANSMIT_PADDING_TESTS(cipher_name, cipher_alg, key_size, \ 2072 auth_alg, minor) \ 2073 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2074 auth_alg, minor, padding_1, 0x21 /* Alert */, 1) \ 2075 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2076 auth_alg, minor, padding_2, 0x21 /* Alert */, 2) \ 2077 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2078 auth_alg, minor, padding_3, 0x21 /* Alert */, 3) \ 2079 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2080 auth_alg, minor, padding_4, 0x21 /* Alert */, 4) \ 2081 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2082 auth_alg, minor, padding_5, 0x21 /* Alert */, 5) \ 2083 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2084 auth_alg, minor, padding_6, 0x21 /* Alert */, 6) \ 2085 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2086 auth_alg, minor, padding_7, 0x21 /* Alert */, 7) \ 2087 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2088 auth_alg, minor, padding_8, 0x21 /* Alert */, 8) \ 2089 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2090 auth_alg, minor, padding_9, 0x21 /* Alert */, 9) \ 2091 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2092 auth_alg, minor, padding_10, 0x21 /* Alert */, 10) \ 2093 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2094 auth_alg, minor, padding_11, 0x21 /* Alert */, 11) \ 2095 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2096 auth_alg, minor, padding_12, 0x21 /* Alert */, 12) \ 2097 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2098 auth_alg, minor, padding_13, 0x21 /* Alert */, 13) \ 2099 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2100 auth_alg, minor, padding_14, 0x21 /* Alert */, 14) \ 2101 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2102 auth_alg, minor, padding_15, 0x21 /* Alert */, 15) \ 2103 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2104 auth_alg, minor, padding_16, 0x21 /* Alert */, 16) 2105 2106 #define ADD_TRANSMIT_PADDING_TESTS(cipher_name, cipher_alg, key_size, \ 2107 auth_alg, minor) \ 2108 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2109 auth_alg, minor, padding_1) \ 2110 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2111 auth_alg, minor, padding_2) \ 2112 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2113 auth_alg, minor, padding_3) \ 2114 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2115 auth_alg, minor, padding_4) \ 2116 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2117 auth_alg, minor, padding_5) \ 2118 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2119 auth_alg, minor, padding_6) \ 2120 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2121 auth_alg, minor, padding_7) \ 2122 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2123 auth_alg, minor, padding_8) \ 2124 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2125 auth_alg, minor, padding_9) \ 2126 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2127 auth_alg, minor, padding_10) \ 2128 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2129 auth_alg, minor, padding_11) \ 2130 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2131 auth_alg, minor, padding_12) \ 2132 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2133 auth_alg, minor, padding_13) \ 2134 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2135 auth_alg, minor, padding_14) \ 2136 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2137 auth_alg, minor, padding_15) \ 2138 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2139 auth_alg, minor, padding_16) 2140 2141 /* 2142 * For AES-CBC MTE cipher suites using padding, add tests of messages 2143 * with each possible padding size. Note that the padding_<N> tests 2144 * do not necessarily test <N> bytes of padding as the padding is a 2145 * function of the cipher suite's MAC length. However, cycling 2146 * through all of the payload sizes from 1 to 16 should exercise all 2147 * of the possible padding lengths for each suite. 2148 */ 2149 AES_CBC_TESTS(GEN_TRANSMIT_PADDING_TESTS); 2150 2151 /* 2152 * Test "empty fragments" which are TLS records with no payload that 2153 * OpenSSL can send for TLS 1.0 connections. 2154 */ 2155 AES_CBC_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST); 2156 AES_GCM_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST); 2157 CHACHA20_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST); 2158 2159 static void 2160 test_ktls_invalid_transmit_cipher_suite(const atf_tc_t *tc, 2161 struct tls_enable *en) 2162 { 2163 int sockets[2]; 2164 2165 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 2166 2167 ATF_REQUIRE_ERRNO(EINVAL, setsockopt(sockets[1], IPPROTO_TCP, 2168 TCP_TXTLS_ENABLE, en, sizeof(*en)) == -1); 2169 2170 close_sockets(sockets); 2171 } 2172 2173 #define GEN_INVALID_TRANSMIT_TEST(name, cipher_alg, key_size, auth_alg, \ 2174 minor) \ 2175 ATF_TC_WITHOUT_HEAD(ktls_transmit_invalid_##name); \ 2176 ATF_TC_BODY(ktls_transmit_invalid_##name, tc) \ 2177 { \ 2178 struct tls_enable en; \ 2179 uint64_t seqno; \ 2180 \ 2181 ATF_REQUIRE_KTLS(); \ 2182 seqno = random(); \ 2183 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2184 seqno, &en); \ 2185 test_ktls_invalid_transmit_cipher_suite(tc, &en); \ 2186 free_tls_enable(&en); \ 2187 } 2188 2189 #define ADD_INVALID_TRANSMIT_TEST(name, cipher_alg, key_size, auth_alg, \ 2190 minor) \ 2191 ATF_TP_ADD_TC(tp, ktls_transmit_invalid_##name); 2192 2193 #define INVALID_CIPHER_SUITES(M) \ 2194 M(aes128_cbc_1_0_sha256, CRYPTO_AES_CBC, 128 / 8, \ 2195 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_ZERO) \ 2196 M(aes128_cbc_1_0_sha384, CRYPTO_AES_CBC, 128 / 8, \ 2197 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_ZERO) \ 2198 M(aes128_gcm_1_0, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 2199 TLS_MINOR_VER_ZERO) \ 2200 M(chacha20_poly1305_1_0, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 2201 TLS_MINOR_VER_ZERO) \ 2202 M(aes128_cbc_1_1_sha256, CRYPTO_AES_CBC, 128 / 8, \ 2203 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_ONE) \ 2204 M(aes128_cbc_1_1_sha384, CRYPTO_AES_CBC, 128 / 8, \ 2205 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_ONE) \ 2206 M(aes128_gcm_1_1, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 2207 TLS_MINOR_VER_ONE) \ 2208 M(chacha20_poly1305_1_1, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 2209 TLS_MINOR_VER_ONE) \ 2210 M(aes128_cbc_1_3_sha1, CRYPTO_AES_CBC, 128 / 8, \ 2211 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_THREE) \ 2212 M(aes128_cbc_1_3_sha256, CRYPTO_AES_CBC, 128 / 8, \ 2213 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_THREE) \ 2214 M(aes128_cbc_1_3_sha384, CRYPTO_AES_CBC, 128 / 8, \ 2215 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_THREE) 2216 2217 /* 2218 * Ensure that invalid cipher suites are rejected for transmit. 2219 */ 2220 INVALID_CIPHER_SUITES(GEN_INVALID_TRANSMIT_TEST); 2221 2222 #define GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2223 auth_alg, minor, name, len, padding) \ 2224 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_##name); \ 2225 ATF_TC_BODY(ktls_receive_##cipher_name##_##name, tc) \ 2226 { \ 2227 struct tls_enable en; \ 2228 uint64_t seqno; \ 2229 \ 2230 ATF_REQUIRE_KTLS(); \ 2231 seqno = random(); \ 2232 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2233 seqno, &en); \ 2234 test_ktls_receive_app_data(tc, &en, seqno, len, padding); \ 2235 free_tls_enable(&en); \ 2236 } 2237 2238 #define ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2239 auth_alg, minor, name) \ 2240 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_##name); 2241 2242 #define GEN_RECEIVE_BAD_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2243 auth_alg, minor, len) \ 2244 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_data); \ 2245 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_data, tc) \ 2246 { \ 2247 struct tls_enable en; \ 2248 uint64_t seqno; \ 2249 \ 2250 ATF_REQUIRE_KTLS(); \ 2251 seqno = random(); \ 2252 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2253 seqno, &en); \ 2254 test_ktls_receive_corrupted_data(tc, &en, seqno, len); \ 2255 free_tls_enable(&en); \ 2256 } 2257 2258 #define ADD_RECEIVE_BAD_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2259 auth_alg, minor) \ 2260 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_data); 2261 2262 #define GEN_RECEIVE_BAD_MAC_TEST(cipher_name, cipher_alg, key_size, \ 2263 auth_alg, minor, len) \ 2264 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_mac); \ 2265 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_mac, tc) \ 2266 { \ 2267 struct tls_enable en; \ 2268 uint64_t seqno; \ 2269 \ 2270 ATF_REQUIRE_KTLS(); \ 2271 seqno = random(); \ 2272 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2273 seqno, &en); \ 2274 test_ktls_receive_corrupted_mac(tc, &en, seqno, len); \ 2275 free_tls_enable(&en); \ 2276 } 2277 2278 #define ADD_RECEIVE_BAD_MAC_TEST(cipher_name, cipher_alg, key_size, \ 2279 auth_alg, minor) \ 2280 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_mac); 2281 2282 #define GEN_RECEIVE_TRUNCATED_TEST(cipher_name, cipher_alg, key_size, \ 2283 auth_alg, minor, len) \ 2284 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_truncated_record); \ 2285 ATF_TC_BODY(ktls_receive_##cipher_name##_truncated_record, tc) \ 2286 { \ 2287 struct tls_enable en; \ 2288 uint64_t seqno; \ 2289 \ 2290 ATF_REQUIRE_KTLS(); \ 2291 seqno = random(); \ 2292 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2293 seqno, &en); \ 2294 test_ktls_receive_truncated_record(tc, &en, seqno, len); \ 2295 free_tls_enable(&en); \ 2296 } 2297 2298 #define ADD_RECEIVE_TRUNCATED_TEST(cipher_name, cipher_alg, key_size, \ 2299 auth_alg, minor) \ 2300 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_truncated_record); 2301 2302 #define GEN_RECEIVE_BAD_MAJOR_TEST(cipher_name, cipher_alg, key_size, \ 2303 auth_alg, minor, len) \ 2304 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_major); \ 2305 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_major, tc) \ 2306 { \ 2307 struct tls_enable en; \ 2308 uint64_t seqno; \ 2309 \ 2310 ATF_REQUIRE_KTLS(); \ 2311 seqno = random(); \ 2312 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2313 seqno, &en); \ 2314 test_ktls_receive_bad_major(tc, &en, seqno, len); \ 2315 free_tls_enable(&en); \ 2316 } 2317 2318 #define ADD_RECEIVE_BAD_MAJOR_TEST(cipher_name, cipher_alg, key_size, \ 2319 auth_alg, minor) \ 2320 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_major); 2321 2322 #define GEN_RECEIVE_BAD_MINOR_TEST(cipher_name, cipher_alg, key_size, \ 2323 auth_alg, minor, len) \ 2324 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_minor); \ 2325 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_minor, tc) \ 2326 { \ 2327 struct tls_enable en; \ 2328 uint64_t seqno; \ 2329 \ 2330 ATF_REQUIRE_KTLS(); \ 2331 seqno = random(); \ 2332 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2333 seqno, &en); \ 2334 test_ktls_receive_bad_minor(tc, &en, seqno, len); \ 2335 free_tls_enable(&en); \ 2336 } 2337 2338 #define ADD_RECEIVE_BAD_MINOR_TEST(cipher_name, cipher_alg, key_size, \ 2339 auth_alg, minor) \ 2340 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_minor); 2341 2342 #define GEN_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size, \ 2343 auth_alg, minor, name, len) \ 2344 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_##name); \ 2345 ATF_TC_BODY(ktls_receive_##cipher_name##_##name, tc) \ 2346 { \ 2347 struct tls_enable en; \ 2348 uint64_t seqno; \ 2349 \ 2350 ATF_REQUIRE_KTLS(); \ 2351 seqno = random(); \ 2352 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2353 seqno, &en); \ 2354 test_ktls_receive_bad_size(tc, &en, seqno, (len)); \ 2355 free_tls_enable(&en); \ 2356 } 2357 2358 #define ADD_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size, \ 2359 auth_alg, minor, name) \ 2360 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_##name); 2361 2362 #define GEN_RECEIVE_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 2363 minor) \ 2364 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2365 auth_alg, minor, short, 64, 0) \ 2366 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2367 auth_alg, minor, long, 64 * 1024, 0) \ 2368 GEN_RECEIVE_BAD_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2369 auth_alg, minor, 64) \ 2370 GEN_RECEIVE_BAD_MAC_TEST(cipher_name, cipher_alg, key_size, \ 2371 auth_alg, minor, 64) \ 2372 GEN_RECEIVE_TRUNCATED_TEST(cipher_name, cipher_alg, key_size, \ 2373 auth_alg, minor, 64) \ 2374 GEN_RECEIVE_BAD_MAJOR_TEST(cipher_name, cipher_alg, key_size, \ 2375 auth_alg, minor, 64) \ 2376 GEN_RECEIVE_BAD_MINOR_TEST(cipher_name, cipher_alg, key_size, \ 2377 auth_alg, minor, 64) \ 2378 GEN_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size, \ 2379 auth_alg, minor, small_record, \ 2380 tls_minimum_record_payload(&en) - 1) \ 2381 GEN_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size, \ 2382 auth_alg, minor, oversized_record, \ 2383 TLS_MAX_MSG_SIZE_V10_2 * 2) 2384 2385 #define ADD_RECEIVE_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 2386 minor) \ 2387 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2388 auth_alg, minor, short) \ 2389 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2390 auth_alg, minor, long) \ 2391 ADD_RECEIVE_BAD_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2392 auth_alg, minor) \ 2393 ADD_RECEIVE_BAD_MAC_TEST(cipher_name, cipher_alg, key_size, \ 2394 auth_alg, minor) \ 2395 ADD_RECEIVE_TRUNCATED_TEST(cipher_name, cipher_alg, key_size, \ 2396 auth_alg, minor) \ 2397 ADD_RECEIVE_BAD_MAJOR_TEST(cipher_name, cipher_alg, key_size, \ 2398 auth_alg, minor) \ 2399 ADD_RECEIVE_BAD_MINOR_TEST(cipher_name, cipher_alg, key_size, \ 2400 auth_alg, minor) \ 2401 ADD_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size, \ 2402 auth_alg, minor, small_record) \ 2403 ADD_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size, \ 2404 auth_alg, minor, oversized_record) 2405 2406 /* 2407 * For each supported cipher suite, run several receive tests: 2408 * 2409 * - a short test which sends 64 bytes of application data (likely as 2410 * a single TLS record) 2411 * 2412 * - a long test which sends 64KB of application data (split across 2413 * multiple TLS records) 2414 * 2415 * - a test with corrupted payload data in a single TLS record 2416 * 2417 * - a test with a corrupted MAC in a single TLS record 2418 * 2419 * - a test with a truncated TLS record 2420 * 2421 * - tests with invalid TLS major and minor versions 2422 * 2423 * - a tests with a record whose is one less than the smallest valid 2424 * size 2425 * 2426 * - a test with an oversized TLS record 2427 */ 2428 AES_CBC_NONZERO_TESTS(GEN_RECEIVE_TESTS); 2429 AES_GCM_TESTS(GEN_RECEIVE_TESTS); 2430 CHACHA20_TESTS(GEN_RECEIVE_TESTS); 2431 2432 #define GEN_RECEIVE_MTE_PADDING_TESTS(cipher_name, cipher_alg, \ 2433 key_size, auth_alg, minor) \ 2434 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2435 auth_alg, minor, padding_1, 1, 0) \ 2436 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2437 auth_alg, minor, padding_2, 2, 0) \ 2438 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2439 auth_alg, minor, padding_3, 3, 0) \ 2440 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2441 auth_alg, minor, padding_4, 4, 0) \ 2442 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2443 auth_alg, minor, padding_5, 5, 0) \ 2444 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2445 auth_alg, minor, padding_6, 6, 0) \ 2446 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2447 auth_alg, minor, padding_7, 7, 0) \ 2448 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2449 auth_alg, minor, padding_8, 8, 0) \ 2450 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2451 auth_alg, minor, padding_9, 9, 0) \ 2452 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2453 auth_alg, minor, padding_10, 10, 0) \ 2454 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2455 auth_alg, minor, padding_11, 11, 0) \ 2456 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2457 auth_alg, minor, padding_12, 12, 0) \ 2458 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2459 auth_alg, minor, padding_13, 13, 0) \ 2460 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2461 auth_alg, minor, padding_14, 14, 0) \ 2462 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2463 auth_alg, minor, padding_15, 15, 0) \ 2464 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2465 auth_alg, minor, padding_16, 16, 0) \ 2466 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2467 auth_alg, minor, padding_16_extra, 16, 16) \ 2468 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2469 auth_alg, minor, padding_32_extra, 16, 32) 2470 2471 #define ADD_RECEIVE_MTE_PADDING_TESTS(cipher_name, cipher_alg, \ 2472 key_size, auth_alg, minor) \ 2473 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2474 auth_alg, minor, padding_1) \ 2475 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2476 auth_alg, minor, padding_2) \ 2477 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2478 auth_alg, minor, padding_3) \ 2479 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2480 auth_alg, minor, padding_4) \ 2481 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2482 auth_alg, minor, padding_5) \ 2483 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2484 auth_alg, minor, padding_6) \ 2485 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2486 auth_alg, minor, padding_7) \ 2487 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2488 auth_alg, minor, padding_8) \ 2489 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2490 auth_alg, minor, padding_9) \ 2491 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2492 auth_alg, minor, padding_10) \ 2493 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2494 auth_alg, minor, padding_11) \ 2495 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2496 auth_alg, minor, padding_12) \ 2497 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2498 auth_alg, minor, padding_13) \ 2499 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2500 auth_alg, minor, padding_14) \ 2501 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2502 auth_alg, minor, padding_15) \ 2503 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2504 auth_alg, minor, padding_16) \ 2505 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2506 auth_alg, minor, padding_16_extra) \ 2507 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2508 auth_alg, minor, padding_32_extra) 2509 2510 #define GEN_RECEIVE_BAD_PADDING_TEST(cipher_name, cipher_alg, key_size, \ 2511 auth_alg, minor, len) \ 2512 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_padding); \ 2513 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_padding, tc) \ 2514 { \ 2515 struct tls_enable en; \ 2516 uint64_t seqno; \ 2517 \ 2518 ATF_REQUIRE_KTLS(); \ 2519 seqno = random(); \ 2520 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2521 seqno, &en); \ 2522 test_ktls_receive_corrupted_padding(tc, &en, seqno, len); \ 2523 free_tls_enable(&en); \ 2524 } 2525 2526 #define ADD_RECEIVE_BAD_PADDING_TEST(cipher_name, cipher_alg, key_size, \ 2527 auth_alg, minor) \ 2528 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_padding); 2529 2530 #define GEN_RECEIVE_MTE_TESTS(cipher_name, cipher_alg, key_size, \ 2531 auth_alg, minor) \ 2532 GEN_RECEIVE_MTE_PADDING_TESTS(cipher_name, cipher_alg, \ 2533 key_size, auth_alg, minor) \ 2534 GEN_RECEIVE_BAD_PADDING_TEST(cipher_name, cipher_alg, key_size, \ 2535 auth_alg, minor, 64) \ 2536 GEN_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size, \ 2537 auth_alg, minor, non_block_size, \ 2538 tls_minimum_record_payload(&en) + 1) 2539 2540 #define ADD_RECEIVE_MTE_TESTS(cipher_name, cipher_alg, key_size, \ 2541 auth_alg, minor) \ 2542 ADD_RECEIVE_MTE_PADDING_TESTS(cipher_name, cipher_alg, \ 2543 key_size, auth_alg, minor) \ 2544 ADD_RECEIVE_BAD_PADDING_TEST(cipher_name, cipher_alg, key_size, \ 2545 auth_alg, minor) \ 2546 ADD_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size, \ 2547 auth_alg, minor, non_block_size) 2548 2549 /* 2550 * For AES-CBC MTE cipher suites using padding, add tests of messages 2551 * with each possible padding size. Note that the padding_<N> tests 2552 * do not necessarily test <N> bytes of padding as the padding is a 2553 * function of the cipher suite's MAC length. However, cycling 2554 * through all of the payload sizes from 1 to 16 should exercise all 2555 * of the possible padding lengths for each suite. 2556 * 2557 * Two additional tests check for additional padding with an extra 2558 * 16 or 32 bytes beyond the normal padding. 2559 * 2560 * Another test checks for corrupted padding. 2561 * 2562 * Another test checks for a record whose payload is not a multiple of 2563 * the AES block size. 2564 */ 2565 AES_CBC_NONZERO_TESTS(GEN_RECEIVE_MTE_TESTS); 2566 2567 #define GEN_RECEIVE_BAD_IV_TEST(cipher_name, cipher_alg, key_size, \ 2568 auth_alg, minor) \ 2569 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_iv); \ 2570 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_iv, tc) \ 2571 { \ 2572 struct tls_enable en; \ 2573 uint64_t seqno; \ 2574 \ 2575 ATF_REQUIRE_KTLS(); \ 2576 seqno = random(); \ 2577 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2578 seqno, &en); \ 2579 test_ktls_receive_corrupted_iv(tc, &en, seqno, 64); \ 2580 free_tls_enable(&en); \ 2581 } 2582 2583 #define ADD_RECEIVE_BAD_IV_TEST(cipher_name, cipher_alg, key_size, \ 2584 auth_alg, minor) \ 2585 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_iv); 2586 2587 #define GEN_RECEIVE_EXPLICIT_IV_TESTS(cipher_name, cipher_alg, \ 2588 key_size, auth_alg, minor) \ 2589 GEN_RECEIVE_BAD_IV_TEST(cipher_name, cipher_alg, key_size, \ 2590 auth_alg, minor) \ 2591 GEN_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size, \ 2592 auth_alg, minor, short_header, \ 2593 sizeof(struct tls_record_layer) + 1) 2594 2595 #define ADD_RECEIVE_EXPLICIT_IV_TESTS(cipher_name, cipher_alg, \ 2596 key_size, auth_alg, minor) \ 2597 ADD_RECEIVE_BAD_IV_TEST(cipher_name, cipher_alg, key_size, \ 2598 auth_alg, minor) \ 2599 ADD_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size, \ 2600 auth_alg, minor, short_header) 2601 2602 /* 2603 * For cipher suites with an explicit IV, run a receive test where the 2604 * explicit IV has been corrupted. Also run a receive test that sends 2605 * a short record without a complete IV. 2606 */ 2607 AES_CBC_NONZERO_TESTS(GEN_RECEIVE_EXPLICIT_IV_TESTS); 2608 AES_GCM_12_TESTS(GEN_RECEIVE_EXPLICIT_IV_TESTS); 2609 2610 #define GEN_RECEIVE_BAD_TYPE_TEST(cipher_name, cipher_alg, key_size, \ 2611 auth_alg, minor, len) \ 2612 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_type); \ 2613 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_type, tc) \ 2614 { \ 2615 struct tls_enable en; \ 2616 uint64_t seqno; \ 2617 \ 2618 ATF_REQUIRE_KTLS(); \ 2619 seqno = random(); \ 2620 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2621 seqno, &en); \ 2622 test_ktls_receive_bad_type(tc, &en, seqno, len); \ 2623 free_tls_enable(&en); \ 2624 } 2625 2626 #define ADD_RECEIVE_BAD_TYPE_TEST(cipher_name, cipher_alg, key_size, \ 2627 auth_alg, minor) \ 2628 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_type); 2629 2630 #define GEN_RECEIVE_TLS13_TESTS(cipher_name, cipher_alg, key_size, \ 2631 auth_alg, minor) \ 2632 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2633 auth_alg, minor, short_padded, 64, 16) \ 2634 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2635 auth_alg, minor, long_padded, 64 * 1024, 15) \ 2636 GEN_RECEIVE_BAD_TYPE_TEST(cipher_name, cipher_alg, key_size, \ 2637 auth_alg, minor, 64) 2638 2639 #define ADD_RECEIVE_TLS13_TESTS(cipher_name, cipher_alg, key_size, \ 2640 auth_alg, minor) \ 2641 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2642 auth_alg, minor, short_padded) \ 2643 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2644 auth_alg, minor, long_padded) \ 2645 ADD_RECEIVE_BAD_TYPE_TEST(cipher_name, cipher_alg, key_size, \ 2646 auth_alg, minor) 2647 2648 /* 2649 * For TLS 1.3 cipher suites, run two additional receive tests which 2650 * use add padding to each record. Also run a test that uses an 2651 * invalid "outer" record type. 2652 */ 2653 TLS_13_TESTS(GEN_RECEIVE_TLS13_TESTS); 2654 2655 static void 2656 test_ktls_invalid_receive_cipher_suite(const atf_tc_t *tc, 2657 struct tls_enable *en) 2658 { 2659 int sockets[2]; 2660 2661 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 2662 2663 ATF_REQUIRE_ERRNO(EINVAL, setsockopt(sockets[1], IPPROTO_TCP, 2664 TCP_RXTLS_ENABLE, en, sizeof(*en)) == -1); 2665 2666 close_sockets(sockets); 2667 } 2668 2669 #define GEN_INVALID_RECEIVE_TEST(name, cipher_alg, key_size, auth_alg, \ 2670 minor) \ 2671 ATF_TC_WITHOUT_HEAD(ktls_receive_invalid_##name); \ 2672 ATF_TC_BODY(ktls_receive_invalid_##name, tc) \ 2673 { \ 2674 struct tls_enable en; \ 2675 uint64_t seqno; \ 2676 \ 2677 ATF_REQUIRE_KTLS(); \ 2678 seqno = random(); \ 2679 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2680 seqno, &en); \ 2681 test_ktls_invalid_receive_cipher_suite(tc, &en); \ 2682 free_tls_enable(&en); \ 2683 } 2684 2685 #define ADD_INVALID_RECEIVE_TEST(name, cipher_alg, key_size, auth_alg, \ 2686 minor) \ 2687 ATF_TP_ADD_TC(tp, ktls_receive_invalid_##name); 2688 2689 /* 2690 * Ensure that invalid cipher suites are rejected for receive. 2691 */ 2692 INVALID_CIPHER_SUITES(GEN_INVALID_RECEIVE_TEST); 2693 2694 static void 2695 test_ktls_unsupported_receive_cipher_suite(const atf_tc_t *tc, 2696 struct tls_enable *en) 2697 { 2698 int sockets[2]; 2699 2700 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 2701 2702 ATF_REQUIRE_ERRNO(EPROTONOSUPPORT, setsockopt(sockets[1], IPPROTO_TCP, 2703 TCP_RXTLS_ENABLE, en, sizeof(*en)) == -1); 2704 2705 close_sockets(sockets); 2706 } 2707 2708 #define GEN_UNSUPPORTED_RECEIVE_TEST(name, cipher_alg, key_size, \ 2709 auth_alg, minor) \ 2710 ATF_TC_WITHOUT_HEAD(ktls_receive_unsupported_##name); \ 2711 ATF_TC_BODY(ktls_receive_unsupported_##name, tc) \ 2712 { \ 2713 struct tls_enable en; \ 2714 uint64_t seqno; \ 2715 \ 2716 ATF_REQUIRE_KTLS(); \ 2717 seqno = random(); \ 2718 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2719 seqno, &en); \ 2720 test_ktls_unsupported_receive_cipher_suite(tc, &en); \ 2721 free_tls_enable(&en); \ 2722 } 2723 2724 #define ADD_UNSUPPORTED_RECEIVE_TEST(name, cipher_alg, key_size, \ 2725 auth_alg, minor) \ 2726 ATF_TP_ADD_TC(tp, ktls_receive_unsupported_##name); 2727 2728 /* 2729 * Ensure that valid cipher suites not supported for receive are 2730 * rejected. 2731 */ 2732 TLS_10_TESTS(GEN_UNSUPPORTED_RECEIVE_TEST); 2733 2734 /* 2735 * Try to perform an invalid sendto(2) on a TXTLS-enabled socket, to exercise 2736 * KTLS error handling in the socket layer. 2737 */ 2738 ATF_TC_WITHOUT_HEAD(ktls_sendto_baddst); 2739 ATF_TC_BODY(ktls_sendto_baddst, tc) 2740 { 2741 char buf[32]; 2742 struct sockaddr_in dst; 2743 struct tls_enable en; 2744 ssize_t n; 2745 int s; 2746 2747 ATF_REQUIRE_KTLS(); 2748 2749 s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); 2750 ATF_REQUIRE(s >= 0); 2751 2752 build_tls_enable(tc, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, 2753 TLS_MINOR_VER_THREE, (uint64_t)random(), &en); 2754 2755 ATF_REQUIRE(setsockopt(s, IPPROTO_TCP, TCP_TXTLS_ENABLE, &en, 2756 sizeof(en)) == 0); 2757 2758 memset(&dst, 0, sizeof(dst)); 2759 dst.sin_family = AF_INET; 2760 dst.sin_len = sizeof(dst); 2761 dst.sin_addr.s_addr = htonl(INADDR_BROADCAST); 2762 dst.sin_port = htons(12345); 2763 2764 memset(buf, 0, sizeof(buf)); 2765 n = sendto(s, buf, sizeof(buf), 0, (struct sockaddr *)&dst, 2766 sizeof(dst)); 2767 2768 /* Can't transmit to the broadcast address over TCP. */ 2769 ATF_REQUIRE_ERRNO(EACCES, n == -1); 2770 ATF_REQUIRE(close(s) == 0); 2771 } 2772 2773 /* 2774 * Make sure that listen(2) returns an error for KTLS-enabled sockets, and 2775 * verify that an attempt to enable KTLS on a listening socket fails. 2776 */ 2777 ATF_TC_WITHOUT_HEAD(ktls_listening_socket); 2778 ATF_TC_BODY(ktls_listening_socket, tc) 2779 { 2780 struct tls_enable en; 2781 struct sockaddr_in sin; 2782 int s; 2783 2784 ATF_REQUIRE_KTLS(); 2785 2786 s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); 2787 ATF_REQUIRE(s >= 0); 2788 build_tls_enable(tc, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, 2789 TLS_MINOR_VER_THREE, (uint64_t)random(), &en); 2790 ATF_REQUIRE(setsockopt(s, IPPROTO_TCP, TCP_TXTLS_ENABLE, &en, 2791 sizeof(en)) == 0); 2792 ATF_REQUIRE_ERRNO(EINVAL, listen(s, 1) == -1); 2793 ATF_REQUIRE(close(s) == 0); 2794 2795 s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); 2796 ATF_REQUIRE(s >= 0); 2797 build_tls_enable(tc, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, 2798 TLS_MINOR_VER_THREE, (uint64_t)random(), &en); 2799 ATF_REQUIRE(setsockopt(s, IPPROTO_TCP, TCP_RXTLS_ENABLE, &en, 2800 sizeof(en)) == 0); 2801 ATF_REQUIRE_ERRNO(EINVAL, listen(s, 1) == -1); 2802 ATF_REQUIRE(close(s) == 0); 2803 2804 s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); 2805 ATF_REQUIRE(s >= 0); 2806 memset(&sin, 0, sizeof(sin)); 2807 sin.sin_family = AF_INET; 2808 sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); 2809 ATF_REQUIRE(bind(s, (struct sockaddr *)&sin, sizeof(sin)) == 0); 2810 ATF_REQUIRE(listen(s, 1) == 0); 2811 build_tls_enable(tc, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, 2812 TLS_MINOR_VER_THREE, (uint64_t)random(), &en); 2813 ATF_REQUIRE_ERRNO(ENOTCONN, 2814 setsockopt(s, IPPROTO_TCP, TCP_TXTLS_ENABLE, &en, sizeof(en)) != 0); 2815 ATF_REQUIRE_ERRNO(EINVAL, 2816 setsockopt(s, IPPROTO_TCP, TCP_RXTLS_ENABLE, &en, sizeof(en)) != 0); 2817 ATF_REQUIRE(close(s) == 0); 2818 } 2819 2820 ATF_TP_ADD_TCS(tp) 2821 { 2822 /* Transmit tests */ 2823 AES_CBC_TESTS(ADD_TRANSMIT_TESTS); 2824 AES_GCM_TESTS(ADD_TRANSMIT_TESTS); 2825 CHACHA20_TESTS(ADD_TRANSMIT_TESTS); 2826 AES_CBC_TESTS(ADD_TRANSMIT_PADDING_TESTS); 2827 AES_CBC_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST); 2828 AES_GCM_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST); 2829 CHACHA20_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST); 2830 INVALID_CIPHER_SUITES(ADD_INVALID_TRANSMIT_TEST); 2831 2832 /* Receive tests */ 2833 TLS_10_TESTS(ADD_UNSUPPORTED_RECEIVE_TEST); 2834 AES_CBC_NONZERO_TESTS(ADD_RECEIVE_TESTS); 2835 AES_GCM_TESTS(ADD_RECEIVE_TESTS); 2836 CHACHA20_TESTS(ADD_RECEIVE_TESTS); 2837 AES_CBC_NONZERO_TESTS(ADD_RECEIVE_MTE_TESTS); 2838 AES_CBC_NONZERO_TESTS(ADD_RECEIVE_EXPLICIT_IV_TESTS); 2839 AES_GCM_12_TESTS(ADD_RECEIVE_EXPLICIT_IV_TESTS); 2840 TLS_13_TESTS(ADD_RECEIVE_TLS13_TESTS); 2841 INVALID_CIPHER_SUITES(ADD_INVALID_RECEIVE_TEST); 2842 2843 /* Miscellaneous */ 2844 ATF_TP_ADD_TC(tp, ktls_sendto_baddst); 2845 ATF_TP_ADD_TC(tp, ktls_listening_socket); 2846 2847 return (atf_no_error()); 2848 } 2849