1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2021 Netflix Inc. 5 * Written by: John Baldwin <jhb@FreeBSD.org> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/param.h> 30 #include <sys/endian.h> 31 #include <sys/event.h> 32 #include <sys/ktls.h> 33 #include <sys/socket.h> 34 #include <sys/sysctl.h> 35 #include <netinet/in.h> 36 #include <netinet/tcp.h> 37 #include <crypto/cryptodev.h> 38 #include <assert.h> 39 #include <err.h> 40 #include <fcntl.h> 41 #include <libutil.h> 42 #include <netdb.h> 43 #include <poll.h> 44 #include <stdbool.h> 45 #include <stdlib.h> 46 #include <atf-c.h> 47 48 #include <openssl/err.h> 49 #include <openssl/evp.h> 50 #include <openssl/hmac.h> 51 52 static void 53 require_ktls(void) 54 { 55 size_t len; 56 bool enable; 57 58 len = sizeof(enable); 59 if (sysctlbyname("kern.ipc.tls.enable", &enable, &len, NULL, 0) == -1) { 60 if (errno == ENOENT) 61 atf_tc_skip("kernel does not support TLS offload"); 62 atf_libc_error(errno, "Failed to read kern.ipc.tls.enable"); 63 } 64 65 if (!enable) 66 atf_tc_skip("Kernel TLS is disabled"); 67 } 68 69 #define ATF_REQUIRE_KTLS() require_ktls() 70 71 static void 72 check_tls_mode(const atf_tc_t *tc, int s, int sockopt) 73 { 74 if (atf_tc_get_config_var_as_bool_wd(tc, "ktls.require_ifnet", false)) { 75 socklen_t len; 76 int mode; 77 78 len = sizeof(mode); 79 if (getsockopt(s, IPPROTO_TCP, sockopt, &mode, &len) == -1) 80 atf_libc_error(errno, "Failed to fetch TLS mode"); 81 82 if (mode != TCP_TLS_MODE_IFNET) 83 atf_tc_skip("connection did not use ifnet TLS"); 84 } 85 86 if (atf_tc_get_config_var_as_bool_wd(tc, "ktls.require_toe", false)) { 87 socklen_t len; 88 int mode; 89 90 len = sizeof(mode); 91 if (getsockopt(s, IPPROTO_TCP, sockopt, &mode, &len) == -1) 92 atf_libc_error(errno, "Failed to fetch TLS mode"); 93 94 if (mode != TCP_TLS_MODE_TOE) 95 atf_tc_skip("connection did not use TOE TLS"); 96 } 97 } 98 99 static void __printflike(2, 3) 100 debug(const atf_tc_t *tc, const char *fmt, ...) 101 { 102 if (!atf_tc_get_config_var_as_bool_wd(tc, "ktls.debug", false)) 103 return; 104 105 va_list ap; 106 va_start(ap, fmt); 107 vprintf(fmt, ap); 108 va_end(ap); 109 } 110 111 static void 112 debug_hexdump(const atf_tc_t *tc, const void *buf, int length, 113 const char *label) 114 { 115 if (!atf_tc_get_config_var_as_bool_wd(tc, "ktls.debug", false)) 116 return; 117 118 if (label != NULL) 119 printf("%s:\n", label); 120 hexdump(buf, length, NULL, 0); 121 } 122 123 static char 124 rdigit(void) 125 { 126 /* ASCII printable values between 0x20 and 0x7e */ 127 return (0x20 + random() % (0x7f - 0x20)); 128 } 129 130 static char * 131 alloc_buffer(size_t len) 132 { 133 char *buf; 134 size_t i; 135 136 if (len == 0) 137 return (NULL); 138 buf = malloc(len); 139 for (i = 0; i < len; i++) 140 buf[i] = rdigit(); 141 return (buf); 142 } 143 144 static bool 145 socketpair_tcp(int sv[2]) 146 { 147 struct pollfd pfd; 148 struct sockaddr_in sin; 149 socklen_t len; 150 int as, cs, ls; 151 152 ls = socket(PF_INET, SOCK_STREAM, 0); 153 if (ls == -1) { 154 warn("socket() for listen"); 155 return (false); 156 } 157 158 memset(&sin, 0, sizeof(sin)); 159 sin.sin_len = sizeof(sin); 160 sin.sin_family = AF_INET; 161 sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); 162 if (bind(ls, (struct sockaddr *)&sin, sizeof(sin)) == -1) { 163 warn("bind"); 164 close(ls); 165 return (false); 166 } 167 168 if (listen(ls, 1) == -1) { 169 warn("listen"); 170 close(ls); 171 return (false); 172 } 173 174 len = sizeof(sin); 175 if (getsockname(ls, (struct sockaddr *)&sin, &len) == -1) { 176 warn("getsockname"); 177 close(ls); 178 return (false); 179 } 180 181 cs = socket(PF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0); 182 if (cs == -1) { 183 warn("socket() for connect"); 184 close(ls); 185 return (false); 186 } 187 188 if (connect(cs, (struct sockaddr *)&sin, sizeof(sin)) == -1) { 189 if (errno != EINPROGRESS) { 190 warn("connect"); 191 close(ls); 192 close(cs); 193 return (false); 194 } 195 } 196 197 as = accept4(ls, NULL, NULL, SOCK_NONBLOCK); 198 if (as == -1) { 199 warn("accept4"); 200 close(ls); 201 close(cs); 202 return (false); 203 } 204 205 close(ls); 206 207 pfd.fd = cs; 208 pfd.events = POLLOUT; 209 pfd.revents = 0; 210 ATF_REQUIRE_INTEQ(1, poll(&pfd, 1, INFTIM)); 211 ATF_REQUIRE_INTEQ(POLLOUT, pfd.revents); 212 213 sv[0] = cs; 214 sv[1] = as; 215 return (true); 216 } 217 218 static bool 219 echo_socket(const atf_tc_t *tc, int sv[2]) 220 { 221 const char *cause, *host, *port; 222 struct addrinfo hints, *ai, *tofree; 223 int error, flags, s; 224 225 host = atf_tc_get_config_var(tc, "ktls.host"); 226 port = atf_tc_get_config_var_wd(tc, "ktls.port", "echo"); 227 memset(&hints, 0, sizeof(hints)); 228 hints.ai_family = AF_UNSPEC; 229 hints.ai_socktype = SOCK_STREAM; 230 hints.ai_protocol = IPPROTO_TCP; 231 error = getaddrinfo(host, port, &hints, &tofree); 232 if (error != 0) { 233 warnx("getaddrinfo(%s:%s) failed: %s", host, port, 234 gai_strerror(error)); 235 return (false); 236 } 237 238 cause = NULL; 239 for (ai = tofree; ai != NULL; ai = ai->ai_next) { 240 s = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol); 241 if (s == -1) { 242 cause = "socket"; 243 error = errno; 244 continue; 245 } 246 247 if (connect(s, ai->ai_addr, ai->ai_addrlen) == -1) { 248 cause = "connect"; 249 error = errno; 250 close(s); 251 continue; 252 } 253 254 freeaddrinfo(tofree); 255 256 ATF_REQUIRE((flags = fcntl(s, F_GETFL)) != -1); 257 flags |= O_NONBLOCK; 258 ATF_REQUIRE(fcntl(s, F_SETFL, flags) != -1); 259 260 sv[0] = s; 261 sv[1] = s; 262 return (true); 263 } 264 265 warnc(error, "%s", cause); 266 freeaddrinfo(tofree); 267 return (false); 268 } 269 270 static bool 271 open_sockets(const atf_tc_t *tc, int sv[2]) 272 { 273 if (atf_tc_has_config_var(tc, "ktls.host")) 274 return (echo_socket(tc, sv)); 275 else 276 return (socketpair_tcp(sv)); 277 } 278 279 static void 280 close_sockets(int sv[2]) 281 { 282 if (sv[0] != sv[1]) 283 ATF_REQUIRE(close(sv[1]) == 0); 284 ATF_REQUIRE(close(sv[0]) == 0); 285 } 286 287 static void 288 close_sockets_ignore_errors(int sv[2]) 289 { 290 if (sv[0] != sv[1]) 291 close(sv[1]); 292 close(sv[0]); 293 } 294 295 static void 296 fd_set_blocking(int fd) 297 { 298 int flags; 299 300 ATF_REQUIRE((flags = fcntl(fd, F_GETFL)) != -1); 301 flags &= ~O_NONBLOCK; 302 ATF_REQUIRE(fcntl(fd, F_SETFL, flags) != -1); 303 } 304 305 static bool 306 cbc_crypt(const EVP_CIPHER *cipher, const char *key, const char *iv, 307 const char *input, char *output, size_t size, int enc) 308 { 309 EVP_CIPHER_CTX *ctx; 310 int outl, total; 311 312 ctx = EVP_CIPHER_CTX_new(); 313 if (ctx == NULL) { 314 warnx("EVP_CIPHER_CTX_new failed: %s", 315 ERR_error_string(ERR_get_error(), NULL)); 316 return (false); 317 } 318 if (EVP_CipherInit_ex(ctx, cipher, NULL, (const u_char *)key, 319 (const u_char *)iv, enc) != 1) { 320 warnx("EVP_CipherInit_ex failed: %s", 321 ERR_error_string(ERR_get_error(), NULL)); 322 EVP_CIPHER_CTX_free(ctx); 323 return (false); 324 } 325 EVP_CIPHER_CTX_set_padding(ctx, 0); 326 if (EVP_CipherUpdate(ctx, (u_char *)output, &outl, 327 (const u_char *)input, size) != 1) { 328 warnx("EVP_CipherUpdate failed: %s", 329 ERR_error_string(ERR_get_error(), NULL)); 330 EVP_CIPHER_CTX_free(ctx); 331 return (false); 332 } 333 total = outl; 334 if (EVP_CipherFinal_ex(ctx, (u_char *)output + outl, &outl) != 1) { 335 warnx("EVP_CipherFinal_ex failed: %s", 336 ERR_error_string(ERR_get_error(), NULL)); 337 EVP_CIPHER_CTX_free(ctx); 338 return (false); 339 } 340 total += outl; 341 if ((size_t)total != size) { 342 warnx("decrypt size mismatch: %zu vs %d", size, total); 343 EVP_CIPHER_CTX_free(ctx); 344 return (false); 345 } 346 EVP_CIPHER_CTX_free(ctx); 347 return (true); 348 } 349 350 static bool 351 cbc_encrypt(const EVP_CIPHER *cipher, const char *key, const char *iv, 352 const char *input, char *output, size_t size) 353 { 354 return (cbc_crypt(cipher, key, iv, input, output, size, 1)); 355 } 356 357 static bool 358 cbc_decrypt(const EVP_CIPHER *cipher, const char *key, const char *iv, 359 const char *input, char *output, size_t size) 360 { 361 return (cbc_crypt(cipher, key, iv, input, output, size, 0)); 362 } 363 364 static bool 365 compute_hash(const EVP_MD *md, const void *key, size_t key_len, const void *aad, 366 size_t aad_len, const void *buffer, size_t len, void *digest, 367 u_int *digest_len) 368 { 369 HMAC_CTX *ctx; 370 371 ctx = HMAC_CTX_new(); 372 if (ctx == NULL) { 373 warnx("HMAC_CTX_new failed: %s", 374 ERR_error_string(ERR_get_error(), NULL)); 375 return (false); 376 } 377 if (HMAC_Init_ex(ctx, key, key_len, md, NULL) != 1) { 378 warnx("HMAC_Init_ex failed: %s", 379 ERR_error_string(ERR_get_error(), NULL)); 380 HMAC_CTX_free(ctx); 381 return (false); 382 } 383 if (HMAC_Update(ctx, aad, aad_len) != 1) { 384 warnx("HMAC_Update (aad) failed: %s", 385 ERR_error_string(ERR_get_error(), NULL)); 386 HMAC_CTX_free(ctx); 387 return (false); 388 } 389 if (HMAC_Update(ctx, buffer, len) != 1) { 390 warnx("HMAC_Update (payload) failed: %s", 391 ERR_error_string(ERR_get_error(), NULL)); 392 HMAC_CTX_free(ctx); 393 return (false); 394 } 395 if (HMAC_Final(ctx, digest, digest_len) != 1) { 396 warnx("HMAC_Final failed: %s", 397 ERR_error_string(ERR_get_error(), NULL)); 398 HMAC_CTX_free(ctx); 399 return (false); 400 } 401 HMAC_CTX_free(ctx); 402 return (true); 403 } 404 405 static bool 406 verify_hash(const EVP_MD *md, const void *key, size_t key_len, const void *aad, 407 size_t aad_len, const void *buffer, size_t len, const void *digest) 408 { 409 unsigned char digest2[EVP_MAX_MD_SIZE]; 410 u_int digest_len; 411 412 if (!compute_hash(md, key, key_len, aad, aad_len, buffer, len, digest2, 413 &digest_len)) 414 return (false); 415 if (memcmp(digest, digest2, digest_len) != 0) { 416 warnx("HMAC mismatch"); 417 return (false); 418 } 419 return (true); 420 } 421 422 static bool 423 aead_encrypt(const EVP_CIPHER *cipher, const char *key, const char *nonce, 424 const void *aad, size_t aad_len, const char *input, char *output, 425 size_t size, char *tag, size_t tag_len) 426 { 427 EVP_CIPHER_CTX *ctx; 428 int outl, total; 429 430 ctx = EVP_CIPHER_CTX_new(); 431 if (ctx == NULL) { 432 warnx("EVP_CIPHER_CTX_new failed: %s", 433 ERR_error_string(ERR_get_error(), NULL)); 434 return (false); 435 } 436 if (EVP_EncryptInit_ex(ctx, cipher, NULL, (const u_char *)key, 437 (const u_char *)nonce) != 1) { 438 warnx("EVP_EncryptInit_ex failed: %s", 439 ERR_error_string(ERR_get_error(), NULL)); 440 EVP_CIPHER_CTX_free(ctx); 441 return (false); 442 } 443 EVP_CIPHER_CTX_set_padding(ctx, 0); 444 if (aad != NULL) { 445 if (EVP_EncryptUpdate(ctx, NULL, &outl, (const u_char *)aad, 446 aad_len) != 1) { 447 warnx("EVP_EncryptUpdate for AAD failed: %s", 448 ERR_error_string(ERR_get_error(), NULL)); 449 EVP_CIPHER_CTX_free(ctx); 450 return (false); 451 } 452 } 453 if (EVP_EncryptUpdate(ctx, (u_char *)output, &outl, 454 (const u_char *)input, size) != 1) { 455 warnx("EVP_EncryptUpdate failed: %s", 456 ERR_error_string(ERR_get_error(), NULL)); 457 EVP_CIPHER_CTX_free(ctx); 458 return (false); 459 } 460 total = outl; 461 if (EVP_EncryptFinal_ex(ctx, (u_char *)output + outl, &outl) != 1) { 462 warnx("EVP_EncryptFinal_ex failed: %s", 463 ERR_error_string(ERR_get_error(), NULL)); 464 EVP_CIPHER_CTX_free(ctx); 465 return (false); 466 } 467 total += outl; 468 if ((size_t)total != size) { 469 warnx("encrypt size mismatch: %zu vs %d", size, total); 470 EVP_CIPHER_CTX_free(ctx); 471 return (false); 472 } 473 if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, tag_len, tag) != 474 1) { 475 warnx("EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_GET_TAG) failed: %s", 476 ERR_error_string(ERR_get_error(), NULL)); 477 EVP_CIPHER_CTX_free(ctx); 478 return (false); 479 } 480 EVP_CIPHER_CTX_free(ctx); 481 return (true); 482 } 483 484 static bool 485 aead_decrypt(const EVP_CIPHER *cipher, const char *key, const char *nonce, 486 const void *aad, size_t aad_len, const char *input, char *output, 487 size_t size, const char *tag, size_t tag_len) 488 { 489 EVP_CIPHER_CTX *ctx; 490 int outl, total; 491 bool valid; 492 493 ctx = EVP_CIPHER_CTX_new(); 494 if (ctx == NULL) { 495 warnx("EVP_CIPHER_CTX_new failed: %s", 496 ERR_error_string(ERR_get_error(), NULL)); 497 return (false); 498 } 499 if (EVP_DecryptInit_ex(ctx, cipher, NULL, (const u_char *)key, 500 (const u_char *)nonce) != 1) { 501 warnx("EVP_DecryptInit_ex failed: %s", 502 ERR_error_string(ERR_get_error(), NULL)); 503 EVP_CIPHER_CTX_free(ctx); 504 return (false); 505 } 506 EVP_CIPHER_CTX_set_padding(ctx, 0); 507 if (aad != NULL) { 508 if (EVP_DecryptUpdate(ctx, NULL, &outl, (const u_char *)aad, 509 aad_len) != 1) { 510 warnx("EVP_DecryptUpdate for AAD failed: %s", 511 ERR_error_string(ERR_get_error(), NULL)); 512 EVP_CIPHER_CTX_free(ctx); 513 return (false); 514 } 515 } 516 if (EVP_DecryptUpdate(ctx, (u_char *)output, &outl, 517 (const u_char *)input, size) != 1) { 518 warnx("EVP_DecryptUpdate failed: %s", 519 ERR_error_string(ERR_get_error(), NULL)); 520 EVP_CIPHER_CTX_free(ctx); 521 return (false); 522 } 523 total = outl; 524 if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, tag_len, 525 __DECONST(char *, tag)) != 1) { 526 warnx("EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_TAG) failed: %s", 527 ERR_error_string(ERR_get_error(), NULL)); 528 EVP_CIPHER_CTX_free(ctx); 529 return (false); 530 } 531 valid = (EVP_DecryptFinal_ex(ctx, (u_char *)output + outl, &outl) == 1); 532 total += outl; 533 if ((size_t)total != size) { 534 warnx("decrypt size mismatch: %zu vs %d", size, total); 535 EVP_CIPHER_CTX_free(ctx); 536 return (false); 537 } 538 if (!valid) 539 warnx("tag mismatch"); 540 EVP_CIPHER_CTX_free(ctx); 541 return (valid); 542 } 543 544 static void 545 build_tls_enable(const atf_tc_t *tc, int cipher_alg, size_t cipher_key_len, 546 int auth_alg, int minor, uint64_t seqno, struct tls_enable *en) 547 { 548 u_int auth_key_len, iv_len; 549 550 memset(en, 0, sizeof(*en)); 551 552 switch (cipher_alg) { 553 case CRYPTO_AES_CBC: 554 if (minor == TLS_MINOR_VER_ZERO) 555 iv_len = AES_BLOCK_LEN; 556 else 557 iv_len = 0; 558 break; 559 case CRYPTO_AES_NIST_GCM_16: 560 if (minor == TLS_MINOR_VER_TWO) 561 iv_len = TLS_AEAD_GCM_LEN; 562 else 563 iv_len = TLS_1_3_GCM_IV_LEN; 564 break; 565 case CRYPTO_CHACHA20_POLY1305: 566 iv_len = TLS_CHACHA20_IV_LEN; 567 break; 568 default: 569 iv_len = 0; 570 break; 571 } 572 switch (auth_alg) { 573 case CRYPTO_SHA1_HMAC: 574 auth_key_len = SHA1_HASH_LEN; 575 break; 576 case CRYPTO_SHA2_256_HMAC: 577 auth_key_len = SHA2_256_HASH_LEN; 578 break; 579 case CRYPTO_SHA2_384_HMAC: 580 auth_key_len = SHA2_384_HASH_LEN; 581 break; 582 default: 583 auth_key_len = 0; 584 break; 585 } 586 en->cipher_key = alloc_buffer(cipher_key_len); 587 debug_hexdump(tc, en->cipher_key, cipher_key_len, "cipher key"); 588 en->iv = alloc_buffer(iv_len); 589 if (iv_len != 0) 590 debug_hexdump(tc, en->iv, iv_len, "iv"); 591 en->auth_key = alloc_buffer(auth_key_len); 592 if (auth_key_len != 0) 593 debug_hexdump(tc, en->auth_key, auth_key_len, "auth key"); 594 en->cipher_algorithm = cipher_alg; 595 en->cipher_key_len = cipher_key_len; 596 en->iv_len = iv_len; 597 en->auth_algorithm = auth_alg; 598 en->auth_key_len = auth_key_len; 599 en->tls_vmajor = TLS_MAJOR_VER_ONE; 600 en->tls_vminor = minor; 601 be64enc(en->rec_seq, seqno); 602 debug(tc, "seqno: %ju\n", (uintmax_t)seqno); 603 } 604 605 static void 606 free_tls_enable(struct tls_enable *en) 607 { 608 free(__DECONST(void *, en->cipher_key)); 609 free(__DECONST(void *, en->iv)); 610 free(__DECONST(void *, en->auth_key)); 611 } 612 613 static const EVP_CIPHER * 614 tls_EVP_CIPHER(const struct tls_enable *en) 615 { 616 switch (en->cipher_algorithm) { 617 case CRYPTO_AES_CBC: 618 switch (en->cipher_key_len) { 619 case 128 / 8: 620 return (EVP_aes_128_cbc()); 621 case 256 / 8: 622 return (EVP_aes_256_cbc()); 623 default: 624 return (NULL); 625 } 626 break; 627 case CRYPTO_AES_NIST_GCM_16: 628 switch (en->cipher_key_len) { 629 case 128 / 8: 630 return (EVP_aes_128_gcm()); 631 case 256 / 8: 632 return (EVP_aes_256_gcm()); 633 default: 634 return (NULL); 635 } 636 break; 637 case CRYPTO_CHACHA20_POLY1305: 638 return (EVP_chacha20_poly1305()); 639 default: 640 return (NULL); 641 } 642 } 643 644 static const EVP_MD * 645 tls_EVP_MD(const struct tls_enable *en) 646 { 647 switch (en->auth_algorithm) { 648 case CRYPTO_SHA1_HMAC: 649 return (EVP_sha1()); 650 case CRYPTO_SHA2_256_HMAC: 651 return (EVP_sha256()); 652 case CRYPTO_SHA2_384_HMAC: 653 return (EVP_sha384()); 654 default: 655 return (NULL); 656 } 657 } 658 659 static size_t 660 tls_header_len(struct tls_enable *en) 661 { 662 size_t len; 663 664 len = sizeof(struct tls_record_layer); 665 switch (en->cipher_algorithm) { 666 case CRYPTO_AES_CBC: 667 if (en->tls_vminor != TLS_MINOR_VER_ZERO) 668 len += AES_BLOCK_LEN; 669 return (len); 670 case CRYPTO_AES_NIST_GCM_16: 671 if (en->tls_vminor == TLS_MINOR_VER_TWO) 672 len += sizeof(uint64_t); 673 return (len); 674 case CRYPTO_CHACHA20_POLY1305: 675 return (len); 676 default: 677 return (0); 678 } 679 } 680 681 static size_t 682 tls_mac_len(struct tls_enable *en) 683 { 684 switch (en->cipher_algorithm) { 685 case CRYPTO_AES_CBC: 686 switch (en->auth_algorithm) { 687 case CRYPTO_SHA1_HMAC: 688 return (SHA1_HASH_LEN); 689 case CRYPTO_SHA2_256_HMAC: 690 return (SHA2_256_HASH_LEN); 691 case CRYPTO_SHA2_384_HMAC: 692 return (SHA2_384_HASH_LEN); 693 default: 694 return (0); 695 } 696 case CRYPTO_AES_NIST_GCM_16: 697 return (AES_GMAC_HASH_LEN); 698 case CRYPTO_CHACHA20_POLY1305: 699 return (POLY1305_HASH_LEN); 700 default: 701 return (0); 702 } 703 } 704 705 /* Includes maximum padding for MTE. */ 706 static size_t 707 tls_trailer_len(struct tls_enable *en) 708 { 709 size_t len; 710 711 len = tls_mac_len(en); 712 if (en->cipher_algorithm == CRYPTO_AES_CBC) 713 len += AES_BLOCK_LEN; 714 if (en->tls_vminor == TLS_MINOR_VER_THREE) 715 len++; 716 return (len); 717 } 718 719 /* Minimum valid record payload size for a given cipher suite. */ 720 static size_t 721 tls_minimum_record_payload(struct tls_enable *en) 722 { 723 size_t len; 724 725 len = tls_header_len(en); 726 if (en->cipher_algorithm == CRYPTO_AES_CBC) 727 len += roundup2(tls_mac_len(en) + 1, AES_BLOCK_LEN); 728 else 729 len += tls_mac_len(en); 730 if (en->tls_vminor == TLS_MINOR_VER_THREE) 731 len++; 732 return (len - sizeof(struct tls_record_layer)); 733 } 734 735 /* 'len' is the length of the payload application data. */ 736 static void 737 tls_mte_aad(struct tls_enable *en, size_t len, 738 const struct tls_record_layer *hdr, uint64_t seqno, struct tls_mac_data *ad) 739 { 740 ad->seq = htobe64(seqno); 741 ad->type = hdr->tls_type; 742 ad->tls_vmajor = hdr->tls_vmajor; 743 ad->tls_vminor = hdr->tls_vminor; 744 ad->tls_length = htons(len); 745 } 746 747 static void 748 tls_12_aead_aad(struct tls_enable *en, size_t len, 749 const struct tls_record_layer *hdr, uint64_t seqno, 750 struct tls_aead_data *ad) 751 { 752 ad->seq = htobe64(seqno); 753 ad->type = hdr->tls_type; 754 ad->tls_vmajor = hdr->tls_vmajor; 755 ad->tls_vminor = hdr->tls_vminor; 756 ad->tls_length = htons(len); 757 } 758 759 static void 760 tls_13_aad(struct tls_enable *en, const struct tls_record_layer *hdr, 761 uint64_t seqno, struct tls_aead_data_13 *ad) 762 { 763 ad->type = hdr->tls_type; 764 ad->tls_vmajor = hdr->tls_vmajor; 765 ad->tls_vminor = hdr->tls_vminor; 766 ad->tls_length = hdr->tls_length; 767 } 768 769 static void 770 tls_12_gcm_nonce(struct tls_enable *en, const struct tls_record_layer *hdr, 771 char *nonce) 772 { 773 memcpy(nonce, en->iv, TLS_AEAD_GCM_LEN); 774 memcpy(nonce + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t)); 775 } 776 777 static void 778 tls_13_nonce(struct tls_enable *en, uint64_t seqno, char *nonce) 779 { 780 static_assert(TLS_1_3_GCM_IV_LEN == TLS_CHACHA20_IV_LEN, 781 "TLS 1.3 nonce length mismatch"); 782 memcpy(nonce, en->iv, TLS_1_3_GCM_IV_LEN); 783 *(uint64_t *)(nonce + 4) ^= htobe64(seqno); 784 } 785 786 /* 787 * Decrypt a TLS record 'len' bytes long at 'src' and store the result at 788 * 'dst'. If the TLS record header length doesn't match or 'dst' doesn't 789 * have sufficient room ('avail'), fail the test. 790 */ 791 static size_t 792 decrypt_tls_aes_cbc_mte(const atf_tc_t *tc, struct tls_enable *en, 793 uint64_t seqno, const void *src, size_t len, void *dst, size_t avail, 794 uint8_t *record_type) 795 { 796 const struct tls_record_layer *hdr; 797 struct tls_mac_data aad; 798 const char *iv; 799 char *buf; 800 size_t hdr_len, mac_len, payload_len; 801 int padding; 802 803 hdr = src; 804 hdr_len = tls_header_len(en); 805 mac_len = tls_mac_len(en); 806 ATF_REQUIRE_INTEQ(TLS_MAJOR_VER_ONE, hdr->tls_vmajor); 807 ATF_REQUIRE_INTEQ(en->tls_vminor, hdr->tls_vminor); 808 debug(tc, "decrypting MTE record seqno %ju:\n", (uintmax_t)seqno); 809 debug_hexdump(tc, src, len, NULL); 810 811 /* First, decrypt the outer payload into a temporary buffer. */ 812 payload_len = len - hdr_len; 813 buf = malloc(payload_len); 814 if (en->tls_vminor == TLS_MINOR_VER_ZERO) 815 iv = en->iv; 816 else 817 iv = (void *)(hdr + 1); 818 debug_hexdump(tc, iv, AES_BLOCK_LEN, "iv"); 819 ATF_REQUIRE(cbc_decrypt(tls_EVP_CIPHER(en), en->cipher_key, iv, 820 (const u_char *)src + hdr_len, buf, payload_len)); 821 debug_hexdump(tc, buf, payload_len, "decrypted buffer"); 822 823 /* 824 * Copy the last encrypted block to use as the IV for the next 825 * record for TLS 1.0. 826 */ 827 if (en->tls_vminor == TLS_MINOR_VER_ZERO) 828 memcpy(__DECONST(uint8_t *, en->iv), (const u_char *)src + 829 (len - AES_BLOCK_LEN), AES_BLOCK_LEN); 830 831 /* 832 * Verify trailing padding and strip. 833 * 834 * The kernel always generates the smallest amount of padding. 835 */ 836 padding = buf[payload_len - 1] + 1; 837 ATF_REQUIRE_MSG(padding > 0 && padding <= AES_BLOCK_LEN, 838 "invalid padding %d", padding); 839 ATF_REQUIRE_MSG(payload_len >= mac_len + padding, 840 "payload_len (%zu) < mac_len (%zu) + padding (%d)", payload_len, 841 mac_len, padding); 842 payload_len -= padding; 843 844 /* Verify HMAC. */ 845 payload_len -= mac_len; 846 tls_mte_aad(en, payload_len, hdr, seqno, &aad); 847 debug_hexdump(tc, &aad, sizeof(aad), "aad"); 848 ATF_REQUIRE(verify_hash(tls_EVP_MD(en), en->auth_key, en->auth_key_len, 849 &aad, sizeof(aad), buf, payload_len, buf + payload_len)); 850 851 ATF_REQUIRE_MSG(payload_len <= avail, "payload_len (%zu) < avail (%zu)", 852 payload_len, avail); 853 memcpy(dst, buf, payload_len); 854 *record_type = hdr->tls_type; 855 return (payload_len); 856 } 857 858 static size_t 859 decrypt_tls_12_aead(const atf_tc_t *tc, struct tls_enable *en, uint64_t seqno, 860 const void *src, size_t len, void *dst, uint8_t *record_type) 861 { 862 const struct tls_record_layer *hdr; 863 struct tls_aead_data aad; 864 char nonce[12]; 865 size_t hdr_len, mac_len, payload_len; 866 867 hdr = src; 868 869 hdr_len = tls_header_len(en); 870 mac_len = tls_mac_len(en); 871 payload_len = len - (hdr_len + mac_len); 872 ATF_REQUIRE_INTEQ(TLS_MAJOR_VER_ONE, hdr->tls_vmajor); 873 ATF_REQUIRE_INTEQ(TLS_MINOR_VER_TWO, hdr->tls_vminor); 874 debug(tc, "decrypting TLS 1.2 record seqno %ju:\n", (uintmax_t)seqno); 875 debug_hexdump(tc, src, len, NULL); 876 877 tls_12_aead_aad(en, payload_len, hdr, seqno, &aad); 878 debug_hexdump(tc, &aad, sizeof(aad), "aad"); 879 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 880 tls_12_gcm_nonce(en, hdr, nonce); 881 else 882 tls_13_nonce(en, seqno, nonce); 883 debug_hexdump(tc, nonce, sizeof(nonce), "nonce"); 884 885 ATF_REQUIRE(aead_decrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 886 &aad, sizeof(aad), (const char *)src + hdr_len, dst, payload_len, 887 (const char *)src + hdr_len + payload_len, mac_len)); 888 889 *record_type = hdr->tls_type; 890 return (payload_len); 891 } 892 893 static size_t 894 decrypt_tls_13_aead(const atf_tc_t *tc, struct tls_enable *en, uint64_t seqno, 895 const void *src, size_t len, void *dst, uint8_t *record_type) 896 { 897 const struct tls_record_layer *hdr; 898 struct tls_aead_data_13 aad; 899 char nonce[12]; 900 char *buf; 901 size_t hdr_len, mac_len, payload_len; 902 903 hdr = src; 904 905 hdr_len = tls_header_len(en); 906 mac_len = tls_mac_len(en); 907 payload_len = len - (hdr_len + mac_len); 908 ATF_REQUIRE_MSG(payload_len >= 1, 909 "payload_len (%zu) too short: len %zu hdr_len %zu mac_len %zu", 910 payload_len, len, hdr_len, mac_len); 911 ATF_REQUIRE_INTEQ(TLS_RLTYPE_APP, hdr->tls_type); 912 ATF_REQUIRE_INTEQ(TLS_MAJOR_VER_ONE, hdr->tls_vmajor); 913 ATF_REQUIRE_INTEQ(TLS_MINOR_VER_TWO, hdr->tls_vminor); 914 debug(tc, "decrypting TLS 1.3 record seqno %ju:\n", (uintmax_t)seqno); 915 debug_hexdump(tc, src, len, NULL); 916 917 tls_13_aad(en, hdr, seqno, &aad); 918 debug_hexdump(tc, &aad, sizeof(aad), "aad"); 919 tls_13_nonce(en, seqno, nonce); 920 debug_hexdump(tc, nonce, sizeof(nonce), "nonce"); 921 922 /* 923 * Have to use a temporary buffer for the output due to the 924 * record type as the last byte of the trailer. 925 */ 926 buf = malloc(payload_len); 927 928 ATF_REQUIRE(aead_decrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 929 &aad, sizeof(aad), (const char *)src + hdr_len, buf, payload_len, 930 (const char *)src + hdr_len + payload_len, mac_len)); 931 debug_hexdump(tc, buf, payload_len, "decrypted buffer"); 932 933 /* Trim record type. */ 934 *record_type = buf[payload_len - 1]; 935 payload_len--; 936 937 memcpy(dst, buf, payload_len); 938 free(buf); 939 940 return (payload_len); 941 } 942 943 static size_t 944 decrypt_tls_aead(const atf_tc_t *tc, struct tls_enable *en, uint64_t seqno, 945 const void *src, size_t len, void *dst, size_t avail, uint8_t *record_type) 946 { 947 const struct tls_record_layer *hdr; 948 size_t payload_len; 949 950 hdr = src; 951 ATF_REQUIRE_INTEQ(len, ntohs(hdr->tls_length) + sizeof(*hdr)); 952 953 payload_len = len - (tls_header_len(en) + tls_trailer_len(en)); 954 ATF_REQUIRE_MSG(payload_len <= avail, "payload_len (%zu) > avail (%zu)", 955 payload_len, avail); 956 957 if (en->tls_vminor == TLS_MINOR_VER_TWO) { 958 ATF_REQUIRE_INTEQ(payload_len, decrypt_tls_12_aead(tc, en, 959 seqno, src, len, dst, record_type)); 960 } else { 961 ATF_REQUIRE_INTEQ(payload_len, decrypt_tls_13_aead(tc, en, 962 seqno, src, len, dst, record_type)); 963 } 964 965 return (payload_len); 966 } 967 968 static size_t 969 decrypt_tls_record(const atf_tc_t *tc, struct tls_enable *en, uint64_t seqno, 970 const void *src, size_t len, void *dst, size_t avail, uint8_t *record_type) 971 { 972 if (en->cipher_algorithm == CRYPTO_AES_CBC) 973 return (decrypt_tls_aes_cbc_mte(tc, en, seqno, src, len, dst, 974 avail, record_type)); 975 else 976 return (decrypt_tls_aead(tc, en, seqno, src, len, dst, avail, 977 record_type)); 978 } 979 980 /* 981 * Encrypt a TLS record of type 'record_type' with payload 'len' bytes 982 * long at 'src' and store the result at 'dst'. If 'dst' doesn't have 983 * sufficient room ('avail'), fail the test. 'padding' is the amount 984 * of additional padding to include beyond any amount mandated by the 985 * cipher suite. 986 */ 987 static size_t 988 encrypt_tls_aes_cbc_mte(const atf_tc_t *tc, struct tls_enable *en, 989 uint8_t record_type, uint64_t seqno, const void *src, size_t len, void *dst, 990 size_t avail, size_t padding) 991 { 992 struct tls_record_layer *hdr; 993 struct tls_mac_data aad; 994 char *buf, *iv; 995 size_t hdr_len, mac_len, record_len; 996 u_int digest_len, i; 997 998 ATF_REQUIRE_INTEQ(0, padding % 16); 999 1000 hdr = dst; 1001 buf = dst; 1002 1003 debug(tc, "encrypting MTE record seqno %ju:\n", (uintmax_t)seqno); 1004 hdr_len = tls_header_len(en); 1005 mac_len = tls_mac_len(en); 1006 padding += (AES_BLOCK_LEN - (len + mac_len) % AES_BLOCK_LEN); 1007 ATF_REQUIRE_MSG(padding > 0 && padding <= 255, "invalid padding (%zu)", 1008 padding); 1009 1010 record_len = hdr_len + len + mac_len + padding; 1011 ATF_REQUIRE_MSG(record_len <= avail, "record_len (%zu) > avail (%zu): " 1012 "hdr_len %zu, len %zu, mac_len %zu, padding %zu", record_len, 1013 avail, hdr_len, len, mac_len, padding); 1014 1015 hdr->tls_type = record_type; 1016 hdr->tls_vmajor = TLS_MAJOR_VER_ONE; 1017 hdr->tls_vminor = en->tls_vminor; 1018 hdr->tls_length = htons(record_len - sizeof(*hdr)); 1019 iv = (char *)(hdr + 1); 1020 for (i = 0; i < AES_BLOCK_LEN; i++) 1021 iv[i] = rdigit(); 1022 debug_hexdump(tc, iv, AES_BLOCK_LEN, "explicit IV"); 1023 1024 /* Copy plaintext to ciphertext region. */ 1025 memcpy(buf + hdr_len, src, len); 1026 1027 /* Compute HMAC. */ 1028 tls_mte_aad(en, len, hdr, seqno, &aad); 1029 debug_hexdump(tc, &aad, sizeof(aad), "aad"); 1030 debug_hexdump(tc, src, len, "plaintext"); 1031 ATF_REQUIRE(compute_hash(tls_EVP_MD(en), en->auth_key, en->auth_key_len, 1032 &aad, sizeof(aad), src, len, buf + hdr_len + len, &digest_len)); 1033 ATF_REQUIRE_INTEQ(mac_len, digest_len); 1034 1035 /* Store padding. */ 1036 for (i = 0; i < padding; i++) 1037 buf[hdr_len + len + mac_len + i] = padding - 1; 1038 debug_hexdump(tc, buf + hdr_len + len, mac_len + padding, 1039 "MAC and padding"); 1040 1041 /* Encrypt the record. */ 1042 ATF_REQUIRE(cbc_encrypt(tls_EVP_CIPHER(en), en->cipher_key, iv, 1043 buf + hdr_len, buf + hdr_len, len + mac_len + padding)); 1044 debug_hexdump(tc, dst, record_len, "encrypted record"); 1045 1046 return (record_len); 1047 } 1048 1049 static size_t 1050 encrypt_tls_12_aead(const atf_tc_t *tc, struct tls_enable *en, 1051 uint8_t record_type, uint64_t seqno, const void *src, size_t len, void *dst) 1052 { 1053 struct tls_record_layer *hdr; 1054 struct tls_aead_data aad; 1055 char nonce[12]; 1056 size_t hdr_len, mac_len, record_len; 1057 1058 hdr = dst; 1059 1060 debug(tc, "encrypting TLS 1.2 record seqno %ju:\n", (uintmax_t)seqno); 1061 hdr_len = tls_header_len(en); 1062 mac_len = tls_mac_len(en); 1063 record_len = hdr_len + len + mac_len; 1064 1065 hdr->tls_type = record_type; 1066 hdr->tls_vmajor = TLS_MAJOR_VER_ONE; 1067 hdr->tls_vminor = TLS_MINOR_VER_TWO; 1068 hdr->tls_length = htons(record_len - sizeof(*hdr)); 1069 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 1070 memcpy(hdr + 1, &seqno, sizeof(seqno)); 1071 1072 tls_12_aead_aad(en, len, hdr, seqno, &aad); 1073 debug_hexdump(tc, &aad, sizeof(aad), "aad"); 1074 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 1075 tls_12_gcm_nonce(en, hdr, nonce); 1076 else 1077 tls_13_nonce(en, seqno, nonce); 1078 debug_hexdump(tc, nonce, sizeof(nonce), "nonce"); 1079 1080 debug_hexdump(tc, src, len, "plaintext"); 1081 ATF_REQUIRE(aead_encrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 1082 &aad, sizeof(aad), src, (char *)dst + hdr_len, len, 1083 (char *)dst + hdr_len + len, mac_len)); 1084 debug_hexdump(tc, dst, record_len, "encrypted record"); 1085 1086 return (record_len); 1087 } 1088 1089 static size_t 1090 encrypt_tls_13_aead(const atf_tc_t *tc, struct tls_enable *en, 1091 uint8_t record_type, uint64_t seqno, const void *src, size_t len, void *dst, 1092 size_t padding) 1093 { 1094 struct tls_record_layer *hdr; 1095 struct tls_aead_data_13 aad; 1096 char nonce[12]; 1097 char *buf; 1098 size_t hdr_len, mac_len, record_len; 1099 1100 hdr = dst; 1101 1102 debug(tc, "encrypting TLS 1.3 record seqno %ju:\n", (uintmax_t)seqno); 1103 hdr_len = tls_header_len(en); 1104 mac_len = tls_mac_len(en); 1105 record_len = hdr_len + len + 1 + padding + mac_len; 1106 1107 hdr->tls_type = TLS_RLTYPE_APP; 1108 hdr->tls_vmajor = TLS_MAJOR_VER_ONE; 1109 hdr->tls_vminor = TLS_MINOR_VER_TWO; 1110 hdr->tls_length = htons(record_len - sizeof(*hdr)); 1111 1112 tls_13_aad(en, hdr, seqno, &aad); 1113 debug_hexdump(tc, &aad, sizeof(aad), "aad"); 1114 tls_13_nonce(en, seqno, nonce); 1115 debug_hexdump(tc, nonce, sizeof(nonce), "nonce"); 1116 1117 /* 1118 * Have to use a temporary buffer for the input so that the record 1119 * type can be appended. 1120 */ 1121 buf = malloc(len + 1 + padding); 1122 memcpy(buf, src, len); 1123 buf[len] = record_type; 1124 memset(buf + len + 1, 0, padding); 1125 debug_hexdump(tc, buf, len + 1 + padding, "plaintext + type + padding"); 1126 1127 ATF_REQUIRE(aead_encrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 1128 &aad, sizeof(aad), buf, (char *)dst + hdr_len, len + 1 + padding, 1129 (char *)dst + hdr_len + len + 1 + padding, mac_len)); 1130 debug_hexdump(tc, dst, record_len, "encrypted record"); 1131 1132 free(buf); 1133 1134 return (record_len); 1135 } 1136 1137 static size_t 1138 encrypt_tls_aead(const atf_tc_t *tc, struct tls_enable *en, 1139 uint8_t record_type, uint64_t seqno, const void *src, size_t len, void *dst, 1140 size_t avail, size_t padding) 1141 { 1142 size_t record_len; 1143 1144 record_len = tls_header_len(en) + len + padding + tls_trailer_len(en); 1145 ATF_REQUIRE_MSG(record_len <= avail, "record_len (%zu) > avail (%zu): " 1146 "header %zu len %zu padding %zu trailer %zu", record_len, avail, 1147 tls_header_len(en), len, padding, tls_trailer_len(en)); 1148 1149 if (en->tls_vminor == TLS_MINOR_VER_TWO) { 1150 ATF_REQUIRE_INTEQ(0, padding); 1151 ATF_REQUIRE_INTEQ(record_len, encrypt_tls_12_aead(tc, en, 1152 record_type, seqno, src, len, dst)); 1153 } else 1154 ATF_REQUIRE_INTEQ(record_len, encrypt_tls_13_aead(tc, en, 1155 record_type, seqno, src, len, dst, padding)); 1156 1157 return (record_len); 1158 } 1159 1160 static size_t 1161 encrypt_tls_record(const atf_tc_t *tc, struct tls_enable *en, 1162 uint8_t record_type, uint64_t seqno, const void *src, size_t len, void *dst, 1163 size_t avail, size_t padding) 1164 { 1165 if (en->cipher_algorithm == CRYPTO_AES_CBC) 1166 return (encrypt_tls_aes_cbc_mte(tc, en, record_type, seqno, src, 1167 len, dst, avail, padding)); 1168 else 1169 return (encrypt_tls_aead(tc, en, record_type, seqno, src, len, 1170 dst, avail, padding)); 1171 } 1172 1173 static void 1174 test_ktls_transmit_app_data(const atf_tc_t *tc, struct tls_enable *en, 1175 uint64_t seqno, size_t len) 1176 { 1177 struct kevent ev; 1178 struct tls_record_layer *hdr; 1179 char *plaintext, *decrypted, *outbuf; 1180 size_t decrypted_len, outbuf_len, outbuf_cap, record_len, written; 1181 ssize_t rv; 1182 int kq, sockets[2]; 1183 uint8_t record_type; 1184 1185 plaintext = alloc_buffer(len); 1186 debug_hexdump(tc, plaintext, len, "plaintext"); 1187 decrypted = malloc(len); 1188 outbuf_cap = tls_header_len(en) + TLS_MAX_MSG_SIZE_V10_2 + 1189 tls_trailer_len(en); 1190 outbuf = malloc(outbuf_cap); 1191 hdr = (struct tls_record_layer *)outbuf; 1192 1193 ATF_REQUIRE((kq = kqueue()) != -1); 1194 1195 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1196 1197 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en, 1198 sizeof(*en)) == 0); 1199 check_tls_mode(tc, sockets[1], TCP_TXTLS_MODE); 1200 1201 EV_SET(&ev, sockets[0], EVFILT_READ, EV_ADD, 0, 0, NULL); 1202 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 1203 EV_SET(&ev, sockets[1], EVFILT_WRITE, EV_ADD, 0, 0, NULL); 1204 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 1205 1206 decrypted_len = 0; 1207 outbuf_len = 0; 1208 written = 0; 1209 1210 while (decrypted_len != len) { 1211 ATF_REQUIRE(kevent(kq, NULL, 0, &ev, 1, NULL) == 1); 1212 1213 switch (ev.filter) { 1214 case EVFILT_WRITE: 1215 /* Try to write any remaining data. */ 1216 rv = write(ev.ident, plaintext + written, 1217 len - written); 1218 ATF_REQUIRE_MSG(rv > 0, 1219 "failed to write to socket"); 1220 written += rv; 1221 if (written == len) { 1222 ev.flags = EV_DISABLE; 1223 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, 1224 NULL) == 0); 1225 } 1226 break; 1227 1228 case EVFILT_READ: 1229 ATF_REQUIRE((ev.flags & EV_EOF) == 0); 1230 1231 /* 1232 * Try to read data for the next TLS record 1233 * into outbuf. Start by reading the header 1234 * to determine how much additional data to 1235 * read. 1236 */ 1237 if (outbuf_len < sizeof(struct tls_record_layer)) { 1238 rv = read(ev.ident, outbuf + outbuf_len, 1239 sizeof(struct tls_record_layer) - 1240 outbuf_len); 1241 ATF_REQUIRE_MSG(rv > 0, 1242 "failed to read from socket"); 1243 outbuf_len += rv; 1244 1245 if (outbuf_len == 1246 sizeof(struct tls_record_layer)) { 1247 debug(tc, "TLS header for seqno %ju:\n", 1248 (uintmax_t)seqno); 1249 debug_hexdump(tc, outbuf, outbuf_len, 1250 NULL); 1251 } 1252 } 1253 1254 if (outbuf_len < sizeof(struct tls_record_layer)) 1255 break; 1256 1257 record_len = sizeof(struct tls_record_layer) + 1258 ntohs(hdr->tls_length); 1259 debug(tc, "record_len %zu outbuf_cap %zu\n", 1260 record_len, outbuf_cap); 1261 ATF_REQUIRE(record_len <= outbuf_cap); 1262 ATF_REQUIRE(record_len > outbuf_len); 1263 rv = read(ev.ident, outbuf + outbuf_len, 1264 record_len - outbuf_len); 1265 if (rv == -1 && errno == EAGAIN) 1266 break; 1267 ATF_REQUIRE_MSG(rv > 0, 1268 "failed to read from socket: %s", strerror(errno)); 1269 1270 outbuf_len += rv; 1271 if (outbuf_len == record_len) { 1272 decrypted_len += decrypt_tls_record(tc, en, 1273 seqno, outbuf, outbuf_len, 1274 decrypted + decrypted_len, 1275 len - decrypted_len, &record_type); 1276 ATF_REQUIRE_INTEQ(TLS_RLTYPE_APP, record_type); 1277 1278 seqno++; 1279 outbuf_len = 0; 1280 } 1281 break; 1282 } 1283 } 1284 1285 ATF_REQUIRE_MSG(written == decrypted_len, 1286 "read %zu decrypted bytes, but wrote %zu", decrypted_len, written); 1287 1288 ATF_REQUIRE(memcmp(plaintext, decrypted, len) == 0); 1289 1290 free(outbuf); 1291 free(decrypted); 1292 free(plaintext); 1293 1294 close_sockets(sockets); 1295 ATF_REQUIRE(close(kq) == 0); 1296 } 1297 1298 static void 1299 ktls_send_control_message(int fd, uint8_t type, void *data, size_t len) 1300 { 1301 struct msghdr msg; 1302 struct cmsghdr *cmsg; 1303 char cbuf[CMSG_SPACE(sizeof(type))]; 1304 struct iovec iov; 1305 1306 memset(&msg, 0, sizeof(msg)); 1307 1308 msg.msg_control = cbuf; 1309 msg.msg_controllen = sizeof(cbuf); 1310 cmsg = CMSG_FIRSTHDR(&msg); 1311 cmsg->cmsg_level = IPPROTO_TCP; 1312 cmsg->cmsg_type = TLS_SET_RECORD_TYPE; 1313 cmsg->cmsg_len = CMSG_LEN(sizeof(type)); 1314 *(uint8_t *)CMSG_DATA(cmsg) = type; 1315 1316 iov.iov_base = data; 1317 iov.iov_len = len; 1318 msg.msg_iov = &iov; 1319 msg.msg_iovlen = 1; 1320 1321 ATF_REQUIRE_INTEQ((ssize_t)len, sendmsg(fd, &msg, 0)); 1322 } 1323 1324 static void 1325 test_ktls_transmit_control(const atf_tc_t *tc, struct tls_enable *en, 1326 uint64_t seqno, uint8_t type, size_t len) 1327 { 1328 struct tls_record_layer *hdr; 1329 char *plaintext, *decrypted, *outbuf; 1330 size_t outbuf_cap, payload_len, record_len; 1331 ssize_t rv; 1332 int sockets[2]; 1333 uint8_t record_type; 1334 1335 ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2); 1336 1337 plaintext = alloc_buffer(len); 1338 decrypted = malloc(len); 1339 outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en); 1340 outbuf = malloc(outbuf_cap); 1341 hdr = (struct tls_record_layer *)outbuf; 1342 1343 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1344 1345 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en, 1346 sizeof(*en)) == 0); 1347 check_tls_mode(tc, sockets[1], TCP_TXTLS_MODE); 1348 1349 fd_set_blocking(sockets[0]); 1350 fd_set_blocking(sockets[1]); 1351 1352 ktls_send_control_message(sockets[1], type, plaintext, len); 1353 1354 /* 1355 * First read the header to determine how much additional data 1356 * to read. 1357 */ 1358 rv = read(sockets[0], outbuf, sizeof(struct tls_record_layer)); 1359 ATF_REQUIRE_INTEQ(sizeof(struct tls_record_layer), rv); 1360 payload_len = ntohs(hdr->tls_length); 1361 record_len = payload_len + sizeof(struct tls_record_layer); 1362 ATF_REQUIRE_MSG(record_len <= outbuf_cap, 1363 "record_len (%zu) > outbuf_cap (%zu)", record_len, outbuf_cap); 1364 rv = read(sockets[0], outbuf + sizeof(struct tls_record_layer), 1365 payload_len); 1366 ATF_REQUIRE_INTEQ((ssize_t)payload_len, rv); 1367 1368 rv = decrypt_tls_record(tc, en, seqno, outbuf, record_len, decrypted, 1369 len, &record_type); 1370 1371 ATF_REQUIRE_MSG((ssize_t)len == rv, 1372 "read %zd decrypted bytes, but wrote %zu", rv, len); 1373 ATF_REQUIRE_INTEQ(type, record_type); 1374 1375 ATF_REQUIRE(memcmp(plaintext, decrypted, len) == 0); 1376 1377 free(outbuf); 1378 free(decrypted); 1379 free(plaintext); 1380 1381 close_sockets(sockets); 1382 } 1383 1384 static void 1385 test_ktls_transmit_empty_fragment(const atf_tc_t *tc, struct tls_enable *en, 1386 uint64_t seqno) 1387 { 1388 struct tls_record_layer *hdr; 1389 char *outbuf; 1390 size_t outbuf_cap, payload_len, record_len; 1391 ssize_t rv; 1392 int sockets[2]; 1393 uint8_t record_type; 1394 1395 outbuf_cap = tls_header_len(en) + tls_trailer_len(en); 1396 outbuf = malloc(outbuf_cap); 1397 hdr = (struct tls_record_layer *)outbuf; 1398 1399 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1400 1401 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en, 1402 sizeof(*en)) == 0); 1403 check_tls_mode(tc, sockets[1], TCP_TXTLS_MODE); 1404 1405 fd_set_blocking(sockets[0]); 1406 fd_set_blocking(sockets[1]); 1407 1408 /* 1409 * A write of zero bytes should send an empty fragment only for 1410 * TLS 1.0, otherwise an error should be raised. 1411 */ 1412 rv = write(sockets[1], NULL, 0); 1413 if (rv == 0) { 1414 ATF_REQUIRE_INTEQ(CRYPTO_AES_CBC, en->cipher_algorithm); 1415 ATF_REQUIRE_INTEQ(TLS_MINOR_VER_ZERO, en->tls_vminor); 1416 } else { 1417 ATF_REQUIRE_INTEQ(-1, rv); 1418 ATF_REQUIRE_ERRNO(EINVAL, true); 1419 goto out; 1420 } 1421 1422 /* 1423 * First read the header to determine how much additional data 1424 * to read. 1425 */ 1426 rv = read(sockets[0], outbuf, sizeof(struct tls_record_layer)); 1427 ATF_REQUIRE_INTEQ(sizeof(struct tls_record_layer), rv); 1428 payload_len = ntohs(hdr->tls_length); 1429 record_len = payload_len + sizeof(struct tls_record_layer); 1430 ATF_REQUIRE_MSG(record_len <= outbuf_cap, 1431 "record_len (%zu) > outbuf_cap (%zu)", record_len, outbuf_cap); 1432 rv = read(sockets[0], outbuf + sizeof(struct tls_record_layer), 1433 payload_len); 1434 ATF_REQUIRE_INTEQ((ssize_t)payload_len, rv); 1435 1436 rv = decrypt_tls_record(tc, en, seqno, outbuf, record_len, NULL, 0, 1437 &record_type); 1438 1439 ATF_REQUIRE_MSG(rv == 0, 1440 "read %zd decrypted bytes for an empty fragment", rv); 1441 ATF_REQUIRE_INTEQ(TLS_RLTYPE_APP, record_type); 1442 1443 out: 1444 free(outbuf); 1445 1446 close_sockets(sockets); 1447 } 1448 1449 static size_t 1450 ktls_receive_tls_record(struct tls_enable *en, int fd, uint8_t record_type, 1451 void *data, size_t len) 1452 { 1453 struct msghdr msg; 1454 struct cmsghdr *cmsg; 1455 struct tls_get_record *tgr; 1456 char cbuf[CMSG_SPACE(sizeof(*tgr))]; 1457 struct iovec iov; 1458 ssize_t rv; 1459 1460 memset(&msg, 0, sizeof(msg)); 1461 1462 msg.msg_control = cbuf; 1463 msg.msg_controllen = sizeof(cbuf); 1464 1465 iov.iov_base = data; 1466 iov.iov_len = len; 1467 msg.msg_iov = &iov; 1468 msg.msg_iovlen = 1; 1469 1470 ATF_REQUIRE((rv = recvmsg(fd, &msg, 0)) > 0); 1471 1472 ATF_REQUIRE((msg.msg_flags & (MSG_EOR | MSG_CTRUNC)) == MSG_EOR); 1473 1474 cmsg = CMSG_FIRSTHDR(&msg); 1475 ATF_REQUIRE(cmsg != NULL); 1476 ATF_REQUIRE_INTEQ(IPPROTO_TCP, cmsg->cmsg_level); 1477 ATF_REQUIRE_INTEQ(TLS_GET_RECORD, cmsg->cmsg_type); 1478 ATF_REQUIRE_INTEQ(CMSG_LEN(sizeof(*tgr)), cmsg->cmsg_len); 1479 1480 tgr = (struct tls_get_record *)CMSG_DATA(cmsg); 1481 ATF_REQUIRE_INTEQ(record_type, tgr->tls_type); 1482 ATF_REQUIRE_INTEQ(en->tls_vmajor, tgr->tls_vmajor); 1483 /* XXX: Not sure if this is what OpenSSL expects? */ 1484 if (en->tls_vminor == TLS_MINOR_VER_THREE) 1485 ATF_REQUIRE_INTEQ(TLS_MINOR_VER_TWO, tgr->tls_vminor); 1486 else 1487 ATF_REQUIRE_INTEQ(en->tls_vminor, tgr->tls_vminor); 1488 ATF_REQUIRE_INTEQ(htons(rv), tgr->tls_length); 1489 1490 return (rv); 1491 } 1492 1493 static void 1494 test_ktls_receive_app_data(const atf_tc_t *tc, struct tls_enable *en, 1495 uint64_t seqno, size_t len, size_t padding) 1496 { 1497 struct kevent ev; 1498 char *plaintext, *received, *outbuf; 1499 size_t outbuf_cap, outbuf_len, outbuf_sent, received_len, todo, written; 1500 ssize_t rv; 1501 int kq, sockets[2]; 1502 1503 plaintext = alloc_buffer(len); 1504 received = malloc(len); 1505 outbuf_cap = tls_header_len(en) + TLS_MAX_MSG_SIZE_V10_2 + 1506 tls_trailer_len(en); 1507 outbuf = malloc(outbuf_cap); 1508 1509 ATF_REQUIRE((kq = kqueue()) != -1); 1510 1511 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1512 1513 ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1514 sizeof(*en)) == 0); 1515 check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE); 1516 1517 EV_SET(&ev, sockets[0], EVFILT_READ, EV_ADD, 0, 0, NULL); 1518 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 1519 EV_SET(&ev, sockets[1], EVFILT_WRITE, EV_ADD, 0, 0, NULL); 1520 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 1521 1522 received_len = 0; 1523 outbuf_len = 0; 1524 written = 0; 1525 1526 while (received_len != len) { 1527 ATF_REQUIRE(kevent(kq, NULL, 0, &ev, 1, NULL) == 1); 1528 1529 switch (ev.filter) { 1530 case EVFILT_WRITE: 1531 /* 1532 * Compose the next TLS record to send. 1533 */ 1534 if (outbuf_len == 0) { 1535 ATF_REQUIRE(written < len); 1536 todo = len - written; 1537 if (todo > TLS_MAX_MSG_SIZE_V10_2 - padding) 1538 todo = TLS_MAX_MSG_SIZE_V10_2 - padding; 1539 outbuf_len = encrypt_tls_record(tc, en, 1540 TLS_RLTYPE_APP, seqno, plaintext + written, 1541 todo, outbuf, outbuf_cap, padding); 1542 outbuf_sent = 0; 1543 written += todo; 1544 seqno++; 1545 } 1546 1547 /* 1548 * Try to write the remainder of the current 1549 * TLS record. 1550 */ 1551 rv = write(ev.ident, outbuf + outbuf_sent, 1552 outbuf_len - outbuf_sent); 1553 ATF_REQUIRE_MSG(rv > 0, 1554 "failed to write to socket: %s", strerror(errno)); 1555 outbuf_sent += rv; 1556 if (outbuf_sent == outbuf_len) { 1557 outbuf_len = 0; 1558 if (written == len) { 1559 ev.flags = EV_DISABLE; 1560 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, 1561 NULL) == 0); 1562 } 1563 } 1564 break; 1565 1566 case EVFILT_READ: 1567 ATF_REQUIRE((ev.flags & EV_EOF) == 0); 1568 1569 rv = ktls_receive_tls_record(en, ev.ident, 1570 TLS_RLTYPE_APP, received + received_len, 1571 len - received_len); 1572 received_len += rv; 1573 break; 1574 } 1575 } 1576 1577 ATF_REQUIRE_MSG(written == received_len, 1578 "read %zu decrypted bytes, but wrote %zu", received_len, written); 1579 1580 ATF_REQUIRE(memcmp(plaintext, received, len) == 0); 1581 1582 free(outbuf); 1583 free(received); 1584 free(plaintext); 1585 1586 close_sockets(sockets); 1587 ATF_REQUIRE(close(kq) == 0); 1588 } 1589 1590 static void 1591 ktls_receive_tls_error(int fd, int expected_error) 1592 { 1593 struct msghdr msg; 1594 struct tls_get_record *tgr; 1595 char cbuf[CMSG_SPACE(sizeof(*tgr))]; 1596 char buf[64]; 1597 struct iovec iov; 1598 1599 memset(&msg, 0, sizeof(msg)); 1600 1601 msg.msg_control = cbuf; 1602 msg.msg_controllen = sizeof(cbuf); 1603 1604 iov.iov_base = buf; 1605 iov.iov_len = sizeof(buf); 1606 msg.msg_iov = &iov; 1607 msg.msg_iovlen = 1; 1608 1609 ATF_REQUIRE(recvmsg(fd, &msg, 0) == -1); 1610 if (expected_error != 0) 1611 ATF_REQUIRE_ERRNO(expected_error, true); 1612 } 1613 1614 static void 1615 test_ktls_receive_corrupted_record(const atf_tc_t *tc, struct tls_enable *en, 1616 uint64_t seqno, size_t len, ssize_t offset) 1617 { 1618 char *plaintext, *outbuf; 1619 size_t outbuf_cap, outbuf_len; 1620 ssize_t rv; 1621 int sockets[2]; 1622 1623 ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2); 1624 1625 plaintext = alloc_buffer(len); 1626 outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en); 1627 outbuf = malloc(outbuf_cap); 1628 1629 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1630 1631 ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1632 sizeof(*en)) == 0); 1633 check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE); 1634 1635 fd_set_blocking(sockets[0]); 1636 fd_set_blocking(sockets[1]); 1637 1638 outbuf_len = encrypt_tls_record(tc, en, TLS_RLTYPE_APP, seqno, 1639 plaintext, len, outbuf, outbuf_cap, 0); 1640 1641 /* A negative offset is an offset from the end. */ 1642 if (offset < 0) 1643 offset += outbuf_len; 1644 outbuf[offset] ^= 0x01; 1645 1646 rv = write(sockets[1], outbuf, outbuf_len); 1647 ATF_REQUIRE_INTEQ((ssize_t)outbuf_len, rv); 1648 1649 ktls_receive_tls_error(sockets[0], EBADMSG); 1650 1651 free(outbuf); 1652 free(plaintext); 1653 1654 close_sockets_ignore_errors(sockets); 1655 } 1656 1657 static void 1658 test_ktls_receive_corrupted_iv(const atf_tc_t *tc, struct tls_enable *en, 1659 uint64_t seqno, size_t len) 1660 { 1661 ATF_REQUIRE(tls_header_len(en) > sizeof(struct tls_record_layer)); 1662 1663 /* Corrupt the first byte of the explicit IV after the header. */ 1664 test_ktls_receive_corrupted_record(tc, en, seqno, len, 1665 sizeof(struct tls_record_layer)); 1666 } 1667 1668 static void 1669 test_ktls_receive_corrupted_data(const atf_tc_t *tc, struct tls_enable *en, 1670 uint64_t seqno, size_t len) 1671 { 1672 ATF_REQUIRE(len > 0); 1673 1674 /* Corrupt the first ciphertext byte after the header. */ 1675 test_ktls_receive_corrupted_record(tc, en, seqno, len, 1676 tls_header_len(en)); 1677 } 1678 1679 static void 1680 test_ktls_receive_corrupted_mac(const atf_tc_t *tc, struct tls_enable *en, 1681 uint64_t seqno, size_t len) 1682 { 1683 size_t offset; 1684 1685 /* Corrupt the first byte of the MAC. */ 1686 if (en->cipher_algorithm == CRYPTO_AES_CBC) 1687 offset = tls_header_len(en) + len; 1688 else 1689 offset = -tls_mac_len(en); 1690 test_ktls_receive_corrupted_record(tc, en, seqno, len, offset); 1691 } 1692 1693 static void 1694 test_ktls_receive_corrupted_padding(const atf_tc_t *tc, struct tls_enable *en, 1695 uint64_t seqno, size_t len) 1696 { 1697 ATF_REQUIRE_INTEQ(CRYPTO_AES_CBC, en->cipher_algorithm); 1698 1699 /* Corrupt the last byte of the padding. */ 1700 test_ktls_receive_corrupted_record(tc, en, seqno, len, -1); 1701 } 1702 1703 static void 1704 test_ktls_receive_truncated_record(const atf_tc_t *tc, struct tls_enable *en, 1705 uint64_t seqno, size_t len) 1706 { 1707 char *plaintext, *outbuf; 1708 size_t outbuf_cap, outbuf_len; 1709 ssize_t rv; 1710 int sockets[2]; 1711 1712 ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2); 1713 1714 plaintext = alloc_buffer(len); 1715 outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en); 1716 outbuf = malloc(outbuf_cap); 1717 1718 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1719 1720 ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1721 sizeof(*en)) == 0); 1722 check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE); 1723 1724 fd_set_blocking(sockets[0]); 1725 fd_set_blocking(sockets[1]); 1726 1727 outbuf_len = encrypt_tls_record(tc, en, TLS_RLTYPE_APP, seqno, 1728 plaintext, len, outbuf, outbuf_cap, 0); 1729 1730 rv = write(sockets[1], outbuf, outbuf_len / 2); 1731 ATF_REQUIRE_INTEQ((ssize_t)(outbuf_len / 2), rv); 1732 1733 ATF_REQUIRE(shutdown(sockets[1], SHUT_WR) == 0); 1734 1735 ktls_receive_tls_error(sockets[0], EMSGSIZE); 1736 1737 free(outbuf); 1738 free(plaintext); 1739 1740 close_sockets_ignore_errors(sockets); 1741 } 1742 1743 static void 1744 test_ktls_receive_bad_major(const atf_tc_t *tc, struct tls_enable *en, 1745 uint64_t seqno, size_t len) 1746 { 1747 struct tls_record_layer *hdr; 1748 char *plaintext, *outbuf; 1749 size_t outbuf_cap, outbuf_len; 1750 ssize_t rv; 1751 int sockets[2]; 1752 1753 ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2); 1754 1755 plaintext = alloc_buffer(len); 1756 outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en); 1757 outbuf = malloc(outbuf_cap); 1758 1759 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1760 1761 ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1762 sizeof(*en)) == 0); 1763 check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE); 1764 1765 fd_set_blocking(sockets[0]); 1766 fd_set_blocking(sockets[1]); 1767 1768 outbuf_len = encrypt_tls_record(tc, en, TLS_RLTYPE_APP, seqno, 1769 plaintext, len, outbuf, outbuf_cap, 0); 1770 1771 hdr = (void *)outbuf; 1772 hdr->tls_vmajor++; 1773 1774 rv = write(sockets[1], outbuf, outbuf_len); 1775 ATF_REQUIRE_INTEQ((ssize_t)outbuf_len, rv); 1776 1777 ktls_receive_tls_error(sockets[0], EINVAL); 1778 1779 free(outbuf); 1780 free(plaintext); 1781 1782 close_sockets_ignore_errors(sockets); 1783 } 1784 1785 static void 1786 test_ktls_receive_bad_minor(const atf_tc_t *tc, struct tls_enable *en, 1787 uint64_t seqno, size_t len) 1788 { 1789 struct tls_record_layer *hdr; 1790 char *plaintext, *outbuf; 1791 size_t outbuf_cap, outbuf_len; 1792 ssize_t rv; 1793 int sockets[2]; 1794 1795 ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2); 1796 1797 plaintext = alloc_buffer(len); 1798 outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en); 1799 outbuf = malloc(outbuf_cap); 1800 1801 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1802 1803 ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1804 sizeof(*en)) == 0); 1805 check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE); 1806 1807 fd_set_blocking(sockets[0]); 1808 fd_set_blocking(sockets[1]); 1809 1810 outbuf_len = encrypt_tls_record(tc, en, TLS_RLTYPE_APP, seqno, 1811 plaintext, len, outbuf, outbuf_cap, 0); 1812 1813 hdr = (void *)outbuf; 1814 hdr->tls_vminor++; 1815 1816 rv = write(sockets[1], outbuf, outbuf_len); 1817 ATF_REQUIRE_INTEQ((ssize_t)outbuf_len, rv); 1818 1819 ktls_receive_tls_error(sockets[0], EINVAL); 1820 1821 free(outbuf); 1822 free(plaintext); 1823 1824 close_sockets_ignore_errors(sockets); 1825 } 1826 1827 static void 1828 test_ktls_receive_bad_type(const atf_tc_t *tc, struct tls_enable *en, 1829 uint64_t seqno, size_t len) 1830 { 1831 struct tls_record_layer *hdr; 1832 char *plaintext, *outbuf; 1833 size_t outbuf_cap, outbuf_len; 1834 ssize_t rv; 1835 int sockets[2]; 1836 1837 ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2); 1838 ATF_REQUIRE_INTEQ(TLS_MINOR_VER_THREE, en->tls_vminor); 1839 1840 plaintext = alloc_buffer(len); 1841 outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en); 1842 outbuf = malloc(outbuf_cap); 1843 1844 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1845 1846 ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1847 sizeof(*en)) == 0); 1848 check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE); 1849 1850 fd_set_blocking(sockets[0]); 1851 fd_set_blocking(sockets[1]); 1852 1853 outbuf_len = encrypt_tls_record(tc, en, 0x21 /* Alert */, seqno, 1854 plaintext, len, outbuf, outbuf_cap, 0); 1855 1856 hdr = (void *)outbuf; 1857 hdr->tls_type = TLS_RLTYPE_APP + 1; 1858 1859 rv = write(sockets[1], outbuf, outbuf_len); 1860 ATF_REQUIRE_INTEQ((ssize_t)outbuf_len, rv); 1861 1862 ktls_receive_tls_error(sockets[0], EINVAL); 1863 1864 free(outbuf); 1865 free(plaintext); 1866 1867 close_sockets_ignore_errors(sockets); 1868 } 1869 1870 static void 1871 test_ktls_receive_bad_size(const atf_tc_t *tc, struct tls_enable *en, 1872 uint64_t seqno, size_t len) 1873 { 1874 struct tls_record_layer *hdr; 1875 char *outbuf; 1876 size_t outbuf_len; 1877 ssize_t rv; 1878 int sockets[2]; 1879 1880 outbuf_len = sizeof(*hdr) + len; 1881 outbuf = calloc(1, outbuf_len); 1882 1883 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1884 1885 ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1886 sizeof(*en)) == 0); 1887 check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE); 1888 1889 fd_set_blocking(sockets[0]); 1890 fd_set_blocking(sockets[1]); 1891 1892 hdr = (void *)outbuf; 1893 hdr->tls_vmajor = en->tls_vmajor; 1894 if (en->tls_vminor == TLS_MINOR_VER_THREE) 1895 hdr->tls_vminor = TLS_MINOR_VER_TWO; 1896 else 1897 hdr->tls_vminor = en->tls_vminor; 1898 hdr->tls_type = TLS_RLTYPE_APP; 1899 hdr->tls_length = htons(len); 1900 1901 rv = write(sockets[1], outbuf, outbuf_len); 1902 ATF_REQUIRE_INTEQ((ssize_t)outbuf_len, rv); 1903 1904 /* 1905 * The other end may notice the error and drop the connection 1906 * before this executes resulting in shutdown() failing with 1907 * ENOTCONN. Ignore this error if it occurs. 1908 */ 1909 if (shutdown(sockets[1], SHUT_WR) != 0) 1910 ATF_REQUIRE_ERRNO(ENOTCONN, true); 1911 1912 ktls_receive_tls_error(sockets[0], EMSGSIZE); 1913 1914 free(outbuf); 1915 1916 close_sockets_ignore_errors(sockets); 1917 } 1918 1919 #define TLS_10_TESTS(M) \ 1920 M(aes128_cbc_1_0_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1921 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ZERO) \ 1922 M(aes256_cbc_1_0_sha1, CRYPTO_AES_CBC, 256 / 8, \ 1923 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ZERO) 1924 1925 #define TLS_13_TESTS(M) \ 1926 M(aes128_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1927 TLS_MINOR_VER_THREE) \ 1928 M(aes256_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0, \ 1929 TLS_MINOR_VER_THREE) \ 1930 M(chacha20_poly1305_1_3, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1931 TLS_MINOR_VER_THREE) 1932 1933 #define AES_CBC_NONZERO_TESTS(M) \ 1934 M(aes128_cbc_1_1_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1935 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ONE) \ 1936 M(aes256_cbc_1_1_sha1, CRYPTO_AES_CBC, 256 / 8, \ 1937 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ONE) \ 1938 M(aes128_cbc_1_2_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1939 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_TWO) \ 1940 M(aes256_cbc_1_2_sha1, CRYPTO_AES_CBC, 256 / 8, \ 1941 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_TWO) \ 1942 M(aes128_cbc_1_2_sha256, CRYPTO_AES_CBC, 128 / 8, \ 1943 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_TWO) \ 1944 M(aes256_cbc_1_2_sha256, CRYPTO_AES_CBC, 256 / 8, \ 1945 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_TWO) \ 1946 M(aes128_cbc_1_2_sha384, CRYPTO_AES_CBC, 128 / 8, \ 1947 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_TWO) \ 1948 M(aes256_cbc_1_2_sha384, CRYPTO_AES_CBC, 256 / 8, \ 1949 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_TWO) \ 1950 1951 #define AES_CBC_TESTS(M) \ 1952 TLS_10_TESTS(M) \ 1953 AES_CBC_NONZERO_TESTS(M) 1954 1955 #define AES_GCM_12_TESTS(M) \ 1956 M(aes128_gcm_1_2, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1957 TLS_MINOR_VER_TWO) \ 1958 M(aes256_gcm_1_2, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0, \ 1959 TLS_MINOR_VER_TWO) 1960 1961 #define AES_GCM_TESTS(M) \ 1962 AES_GCM_12_TESTS(M) \ 1963 M(aes128_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1964 TLS_MINOR_VER_THREE) \ 1965 M(aes256_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0, \ 1966 TLS_MINOR_VER_THREE) 1967 1968 #define CHACHA20_TESTS(M) \ 1969 M(chacha20_poly1305_1_2, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1970 TLS_MINOR_VER_TWO) \ 1971 M(chacha20_poly1305_1_3, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1972 TLS_MINOR_VER_THREE) 1973 1974 #define GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1975 auth_alg, minor, name, len) \ 1976 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_##name); \ 1977 ATF_TC_BODY(ktls_transmit_##cipher_name##_##name, tc) \ 1978 { \ 1979 struct tls_enable en; \ 1980 uint64_t seqno; \ 1981 \ 1982 ATF_REQUIRE_KTLS(); \ 1983 seqno = random(); \ 1984 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 1985 seqno, &en); \ 1986 test_ktls_transmit_app_data(tc, &en, seqno, len); \ 1987 free_tls_enable(&en); \ 1988 } 1989 1990 #define ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1991 auth_alg, minor, name) \ 1992 ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_##name); 1993 1994 #define GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1995 auth_alg, minor, name, type, len) \ 1996 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_##name); \ 1997 ATF_TC_BODY(ktls_transmit_##cipher_name##_##name, tc) \ 1998 { \ 1999 struct tls_enable en; \ 2000 uint64_t seqno; \ 2001 \ 2002 ATF_REQUIRE_KTLS(); \ 2003 seqno = random(); \ 2004 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2005 seqno, &en); \ 2006 test_ktls_transmit_control(tc, &en, seqno, type, len); \ 2007 free_tls_enable(&en); \ 2008 } 2009 2010 #define ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2011 auth_alg, minor, name) \ 2012 ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_##name); 2013 2014 #define GEN_TRANSMIT_EMPTY_FRAGMENT_TEST(cipher_name, cipher_alg, \ 2015 key_size, auth_alg, minor) \ 2016 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_empty_fragment); \ 2017 ATF_TC_BODY(ktls_transmit_##cipher_name##_empty_fragment, tc) \ 2018 { \ 2019 struct tls_enable en; \ 2020 uint64_t seqno; \ 2021 \ 2022 ATF_REQUIRE_KTLS(); \ 2023 seqno = random(); \ 2024 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2025 seqno, &en); \ 2026 test_ktls_transmit_empty_fragment(tc, &en, seqno); \ 2027 free_tls_enable(&en); \ 2028 } 2029 2030 #define ADD_TRANSMIT_EMPTY_FRAGMENT_TEST(cipher_name, cipher_alg, \ 2031 key_size, auth_alg, minor) \ 2032 ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_empty_fragment); 2033 2034 #define GEN_TRANSMIT_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 2035 minor) \ 2036 GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2037 auth_alg, minor, short, 64) \ 2038 GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2039 auth_alg, minor, long, 64 * 1024) \ 2040 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2041 auth_alg, minor, control, 0x21 /* Alert */, 32) 2042 2043 #define ADD_TRANSMIT_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 2044 minor) \ 2045 ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2046 auth_alg, minor, short) \ 2047 ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2048 auth_alg, minor, long) \ 2049 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2050 auth_alg, minor, control) 2051 2052 /* 2053 * For each supported cipher suite, run three transmit tests: 2054 * 2055 * - a short test which sends 64 bytes of application data (likely as 2056 * a single TLS record) 2057 * 2058 * - a long test which sends 64KB of application data (split across 2059 * multiple TLS records) 2060 * 2061 * - a control test which sends a single record with a specific 2062 * content type via sendmsg() 2063 */ 2064 AES_CBC_TESTS(GEN_TRANSMIT_TESTS); 2065 AES_GCM_TESTS(GEN_TRANSMIT_TESTS); 2066 CHACHA20_TESTS(GEN_TRANSMIT_TESTS); 2067 2068 #define GEN_TRANSMIT_PADDING_TESTS(cipher_name, cipher_alg, key_size, \ 2069 auth_alg, minor) \ 2070 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2071 auth_alg, minor, padding_1, 0x21 /* Alert */, 1) \ 2072 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2073 auth_alg, minor, padding_2, 0x21 /* Alert */, 2) \ 2074 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2075 auth_alg, minor, padding_3, 0x21 /* Alert */, 3) \ 2076 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2077 auth_alg, minor, padding_4, 0x21 /* Alert */, 4) \ 2078 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2079 auth_alg, minor, padding_5, 0x21 /* Alert */, 5) \ 2080 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2081 auth_alg, minor, padding_6, 0x21 /* Alert */, 6) \ 2082 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2083 auth_alg, minor, padding_7, 0x21 /* Alert */, 7) \ 2084 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2085 auth_alg, minor, padding_8, 0x21 /* Alert */, 8) \ 2086 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2087 auth_alg, minor, padding_9, 0x21 /* Alert */, 9) \ 2088 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2089 auth_alg, minor, padding_10, 0x21 /* Alert */, 10) \ 2090 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2091 auth_alg, minor, padding_11, 0x21 /* Alert */, 11) \ 2092 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2093 auth_alg, minor, padding_12, 0x21 /* Alert */, 12) \ 2094 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2095 auth_alg, minor, padding_13, 0x21 /* Alert */, 13) \ 2096 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2097 auth_alg, minor, padding_14, 0x21 /* Alert */, 14) \ 2098 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2099 auth_alg, minor, padding_15, 0x21 /* Alert */, 15) \ 2100 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2101 auth_alg, minor, padding_16, 0x21 /* Alert */, 16) 2102 2103 #define ADD_TRANSMIT_PADDING_TESTS(cipher_name, cipher_alg, key_size, \ 2104 auth_alg, minor) \ 2105 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2106 auth_alg, minor, padding_1) \ 2107 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2108 auth_alg, minor, padding_2) \ 2109 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2110 auth_alg, minor, padding_3) \ 2111 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2112 auth_alg, minor, padding_4) \ 2113 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2114 auth_alg, minor, padding_5) \ 2115 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2116 auth_alg, minor, padding_6) \ 2117 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2118 auth_alg, minor, padding_7) \ 2119 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2120 auth_alg, minor, padding_8) \ 2121 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2122 auth_alg, minor, padding_9) \ 2123 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2124 auth_alg, minor, padding_10) \ 2125 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2126 auth_alg, minor, padding_11) \ 2127 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2128 auth_alg, minor, padding_12) \ 2129 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2130 auth_alg, minor, padding_13) \ 2131 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2132 auth_alg, minor, padding_14) \ 2133 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2134 auth_alg, minor, padding_15) \ 2135 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 2136 auth_alg, minor, padding_16) 2137 2138 /* 2139 * For AES-CBC MTE cipher suites using padding, add tests of messages 2140 * with each possible padding size. Note that the padding_<N> tests 2141 * do not necessarily test <N> bytes of padding as the padding is a 2142 * function of the cipher suite's MAC length. However, cycling 2143 * through all of the payload sizes from 1 to 16 should exercise all 2144 * of the possible padding lengths for each suite. 2145 */ 2146 AES_CBC_TESTS(GEN_TRANSMIT_PADDING_TESTS); 2147 2148 /* 2149 * Test "empty fragments" which are TLS records with no payload that 2150 * OpenSSL can send for TLS 1.0 connections. 2151 */ 2152 AES_CBC_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST); 2153 AES_GCM_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST); 2154 CHACHA20_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST); 2155 2156 static void 2157 test_ktls_invalid_transmit_cipher_suite(const atf_tc_t *tc, 2158 struct tls_enable *en) 2159 { 2160 int sockets[2]; 2161 2162 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 2163 2164 ATF_REQUIRE_ERRNO(EINVAL, setsockopt(sockets[1], IPPROTO_TCP, 2165 TCP_TXTLS_ENABLE, en, sizeof(*en)) == -1); 2166 2167 close_sockets(sockets); 2168 } 2169 2170 #define GEN_INVALID_TRANSMIT_TEST(name, cipher_alg, key_size, auth_alg, \ 2171 minor) \ 2172 ATF_TC_WITHOUT_HEAD(ktls_transmit_invalid_##name); \ 2173 ATF_TC_BODY(ktls_transmit_invalid_##name, tc) \ 2174 { \ 2175 struct tls_enable en; \ 2176 uint64_t seqno; \ 2177 \ 2178 ATF_REQUIRE_KTLS(); \ 2179 seqno = random(); \ 2180 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2181 seqno, &en); \ 2182 test_ktls_invalid_transmit_cipher_suite(tc, &en); \ 2183 free_tls_enable(&en); \ 2184 } 2185 2186 #define ADD_INVALID_TRANSMIT_TEST(name, cipher_alg, key_size, auth_alg, \ 2187 minor) \ 2188 ATF_TP_ADD_TC(tp, ktls_transmit_invalid_##name); 2189 2190 #define INVALID_CIPHER_SUITES(M) \ 2191 M(aes128_cbc_1_0_sha256, CRYPTO_AES_CBC, 128 / 8, \ 2192 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_ZERO) \ 2193 M(aes128_cbc_1_0_sha384, CRYPTO_AES_CBC, 128 / 8, \ 2194 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_ZERO) \ 2195 M(aes128_gcm_1_0, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 2196 TLS_MINOR_VER_ZERO) \ 2197 M(chacha20_poly1305_1_0, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 2198 TLS_MINOR_VER_ZERO) \ 2199 M(aes128_cbc_1_1_sha256, CRYPTO_AES_CBC, 128 / 8, \ 2200 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_ONE) \ 2201 M(aes128_cbc_1_1_sha384, CRYPTO_AES_CBC, 128 / 8, \ 2202 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_ONE) \ 2203 M(aes128_gcm_1_1, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 2204 TLS_MINOR_VER_ONE) \ 2205 M(chacha20_poly1305_1_1, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 2206 TLS_MINOR_VER_ONE) \ 2207 M(aes128_cbc_1_3_sha1, CRYPTO_AES_CBC, 128 / 8, \ 2208 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_THREE) \ 2209 M(aes128_cbc_1_3_sha256, CRYPTO_AES_CBC, 128 / 8, \ 2210 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_THREE) \ 2211 M(aes128_cbc_1_3_sha384, CRYPTO_AES_CBC, 128 / 8, \ 2212 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_THREE) 2213 2214 /* 2215 * Ensure that invalid cipher suites are rejected for transmit. 2216 */ 2217 INVALID_CIPHER_SUITES(GEN_INVALID_TRANSMIT_TEST); 2218 2219 #define GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2220 auth_alg, minor, name, len, padding) \ 2221 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_##name); \ 2222 ATF_TC_BODY(ktls_receive_##cipher_name##_##name, tc) \ 2223 { \ 2224 struct tls_enable en; \ 2225 uint64_t seqno; \ 2226 \ 2227 ATF_REQUIRE_KTLS(); \ 2228 seqno = random(); \ 2229 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2230 seqno, &en); \ 2231 test_ktls_receive_app_data(tc, &en, seqno, len, padding); \ 2232 free_tls_enable(&en); \ 2233 } 2234 2235 #define ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2236 auth_alg, minor, name) \ 2237 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_##name); 2238 2239 #define GEN_RECEIVE_BAD_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2240 auth_alg, minor, len) \ 2241 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_data); \ 2242 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_data, tc) \ 2243 { \ 2244 struct tls_enable en; \ 2245 uint64_t seqno; \ 2246 \ 2247 ATF_REQUIRE_KTLS(); \ 2248 seqno = random(); \ 2249 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2250 seqno, &en); \ 2251 test_ktls_receive_corrupted_data(tc, &en, seqno, len); \ 2252 free_tls_enable(&en); \ 2253 } 2254 2255 #define ADD_RECEIVE_BAD_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2256 auth_alg, minor) \ 2257 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_data); 2258 2259 #define GEN_RECEIVE_BAD_MAC_TEST(cipher_name, cipher_alg, key_size, \ 2260 auth_alg, minor, len) \ 2261 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_mac); \ 2262 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_mac, tc) \ 2263 { \ 2264 struct tls_enable en; \ 2265 uint64_t seqno; \ 2266 \ 2267 ATF_REQUIRE_KTLS(); \ 2268 seqno = random(); \ 2269 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2270 seqno, &en); \ 2271 test_ktls_receive_corrupted_mac(tc, &en, seqno, len); \ 2272 free_tls_enable(&en); \ 2273 } 2274 2275 #define ADD_RECEIVE_BAD_MAC_TEST(cipher_name, cipher_alg, key_size, \ 2276 auth_alg, minor) \ 2277 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_mac); 2278 2279 #define GEN_RECEIVE_TRUNCATED_TEST(cipher_name, cipher_alg, key_size, \ 2280 auth_alg, minor, len) \ 2281 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_truncated_record); \ 2282 ATF_TC_BODY(ktls_receive_##cipher_name##_truncated_record, tc) \ 2283 { \ 2284 struct tls_enable en; \ 2285 uint64_t seqno; \ 2286 \ 2287 ATF_REQUIRE_KTLS(); \ 2288 seqno = random(); \ 2289 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2290 seqno, &en); \ 2291 test_ktls_receive_truncated_record(tc, &en, seqno, len); \ 2292 free_tls_enable(&en); \ 2293 } 2294 2295 #define ADD_RECEIVE_TRUNCATED_TEST(cipher_name, cipher_alg, key_size, \ 2296 auth_alg, minor) \ 2297 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_truncated_record); 2298 2299 #define GEN_RECEIVE_BAD_MAJOR_TEST(cipher_name, cipher_alg, key_size, \ 2300 auth_alg, minor, len) \ 2301 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_major); \ 2302 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_major, tc) \ 2303 { \ 2304 struct tls_enable en; \ 2305 uint64_t seqno; \ 2306 \ 2307 ATF_REQUIRE_KTLS(); \ 2308 seqno = random(); \ 2309 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2310 seqno, &en); \ 2311 test_ktls_receive_bad_major(tc, &en, seqno, len); \ 2312 free_tls_enable(&en); \ 2313 } 2314 2315 #define ADD_RECEIVE_BAD_MAJOR_TEST(cipher_name, cipher_alg, key_size, \ 2316 auth_alg, minor) \ 2317 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_major); 2318 2319 #define GEN_RECEIVE_BAD_MINOR_TEST(cipher_name, cipher_alg, key_size, \ 2320 auth_alg, minor, len) \ 2321 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_minor); \ 2322 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_minor, tc) \ 2323 { \ 2324 struct tls_enable en; \ 2325 uint64_t seqno; \ 2326 \ 2327 ATF_REQUIRE_KTLS(); \ 2328 seqno = random(); \ 2329 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2330 seqno, &en); \ 2331 test_ktls_receive_bad_minor(tc, &en, seqno, len); \ 2332 free_tls_enable(&en); \ 2333 } 2334 2335 #define ADD_RECEIVE_BAD_MINOR_TEST(cipher_name, cipher_alg, key_size, \ 2336 auth_alg, minor) \ 2337 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_minor); 2338 2339 #define GEN_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size, \ 2340 auth_alg, minor, name, len) \ 2341 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_##name); \ 2342 ATF_TC_BODY(ktls_receive_##cipher_name##_##name, tc) \ 2343 { \ 2344 struct tls_enable en; \ 2345 uint64_t seqno; \ 2346 \ 2347 ATF_REQUIRE_KTLS(); \ 2348 seqno = random(); \ 2349 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2350 seqno, &en); \ 2351 test_ktls_receive_bad_size(tc, &en, seqno, (len)); \ 2352 free_tls_enable(&en); \ 2353 } 2354 2355 #define ADD_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size, \ 2356 auth_alg, minor, name) \ 2357 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_##name); 2358 2359 #define GEN_RECEIVE_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 2360 minor) \ 2361 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2362 auth_alg, minor, short, 64, 0) \ 2363 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2364 auth_alg, minor, long, 64 * 1024, 0) \ 2365 GEN_RECEIVE_BAD_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2366 auth_alg, minor, 64) \ 2367 GEN_RECEIVE_BAD_MAC_TEST(cipher_name, cipher_alg, key_size, \ 2368 auth_alg, minor, 64) \ 2369 GEN_RECEIVE_TRUNCATED_TEST(cipher_name, cipher_alg, key_size, \ 2370 auth_alg, minor, 64) \ 2371 GEN_RECEIVE_BAD_MAJOR_TEST(cipher_name, cipher_alg, key_size, \ 2372 auth_alg, minor, 64) \ 2373 GEN_RECEIVE_BAD_MINOR_TEST(cipher_name, cipher_alg, key_size, \ 2374 auth_alg, minor, 64) \ 2375 GEN_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size, \ 2376 auth_alg, minor, small_record, \ 2377 tls_minimum_record_payload(&en) - 1) \ 2378 GEN_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size, \ 2379 auth_alg, minor, oversized_record, \ 2380 TLS_MAX_MSG_SIZE_V10_2 * 2) 2381 2382 #define ADD_RECEIVE_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 2383 minor) \ 2384 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2385 auth_alg, minor, short) \ 2386 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2387 auth_alg, minor, long) \ 2388 ADD_RECEIVE_BAD_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2389 auth_alg, minor) \ 2390 ADD_RECEIVE_BAD_MAC_TEST(cipher_name, cipher_alg, key_size, \ 2391 auth_alg, minor) \ 2392 ADD_RECEIVE_TRUNCATED_TEST(cipher_name, cipher_alg, key_size, \ 2393 auth_alg, minor) \ 2394 ADD_RECEIVE_BAD_MAJOR_TEST(cipher_name, cipher_alg, key_size, \ 2395 auth_alg, minor) \ 2396 ADD_RECEIVE_BAD_MINOR_TEST(cipher_name, cipher_alg, key_size, \ 2397 auth_alg, minor) \ 2398 ADD_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size, \ 2399 auth_alg, minor, small_record) \ 2400 ADD_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size, \ 2401 auth_alg, minor, oversized_record) 2402 2403 /* 2404 * For each supported cipher suite, run several receive tests: 2405 * 2406 * - a short test which sends 64 bytes of application data (likely as 2407 * a single TLS record) 2408 * 2409 * - a long test which sends 64KB of application data (split across 2410 * multiple TLS records) 2411 * 2412 * - a test with corrupted payload data in a single TLS record 2413 * 2414 * - a test with a corrupted MAC in a single TLS record 2415 * 2416 * - a test with a truncated TLS record 2417 * 2418 * - tests with invalid TLS major and minor versions 2419 * 2420 * - a tests with a record whose is one less than the smallest valid 2421 * size 2422 * 2423 * - a test with an oversized TLS record 2424 */ 2425 AES_CBC_NONZERO_TESTS(GEN_RECEIVE_TESTS); 2426 AES_GCM_TESTS(GEN_RECEIVE_TESTS); 2427 CHACHA20_TESTS(GEN_RECEIVE_TESTS); 2428 2429 #define GEN_RECEIVE_MTE_PADDING_TESTS(cipher_name, cipher_alg, \ 2430 key_size, auth_alg, minor) \ 2431 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2432 auth_alg, minor, padding_1, 1, 0) \ 2433 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2434 auth_alg, minor, padding_2, 2, 0) \ 2435 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2436 auth_alg, minor, padding_3, 3, 0) \ 2437 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2438 auth_alg, minor, padding_4, 4, 0) \ 2439 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2440 auth_alg, minor, padding_5, 5, 0) \ 2441 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2442 auth_alg, minor, padding_6, 6, 0) \ 2443 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2444 auth_alg, minor, padding_7, 7, 0) \ 2445 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2446 auth_alg, minor, padding_8, 8, 0) \ 2447 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2448 auth_alg, minor, padding_9, 9, 0) \ 2449 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2450 auth_alg, minor, padding_10, 10, 0) \ 2451 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2452 auth_alg, minor, padding_11, 11, 0) \ 2453 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2454 auth_alg, minor, padding_12, 12, 0) \ 2455 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2456 auth_alg, minor, padding_13, 13, 0) \ 2457 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2458 auth_alg, minor, padding_14, 14, 0) \ 2459 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2460 auth_alg, minor, padding_15, 15, 0) \ 2461 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2462 auth_alg, minor, padding_16, 16, 0) \ 2463 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2464 auth_alg, minor, padding_16_extra, 16, 16) \ 2465 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2466 auth_alg, minor, padding_32_extra, 16, 32) 2467 2468 #define ADD_RECEIVE_MTE_PADDING_TESTS(cipher_name, cipher_alg, \ 2469 key_size, auth_alg, minor) \ 2470 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2471 auth_alg, minor, padding_1) \ 2472 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2473 auth_alg, minor, padding_2) \ 2474 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2475 auth_alg, minor, padding_3) \ 2476 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2477 auth_alg, minor, padding_4) \ 2478 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2479 auth_alg, minor, padding_5) \ 2480 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2481 auth_alg, minor, padding_6) \ 2482 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2483 auth_alg, minor, padding_7) \ 2484 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2485 auth_alg, minor, padding_8) \ 2486 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2487 auth_alg, minor, padding_9) \ 2488 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2489 auth_alg, minor, padding_10) \ 2490 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2491 auth_alg, minor, padding_11) \ 2492 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2493 auth_alg, minor, padding_12) \ 2494 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2495 auth_alg, minor, padding_13) \ 2496 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2497 auth_alg, minor, padding_14) \ 2498 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2499 auth_alg, minor, padding_15) \ 2500 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2501 auth_alg, minor, padding_16) \ 2502 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2503 auth_alg, minor, padding_16_extra) \ 2504 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2505 auth_alg, minor, padding_32_extra) 2506 2507 #define GEN_RECEIVE_BAD_PADDING_TEST(cipher_name, cipher_alg, key_size, \ 2508 auth_alg, minor, len) \ 2509 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_padding); \ 2510 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_padding, tc) \ 2511 { \ 2512 struct tls_enable en; \ 2513 uint64_t seqno; \ 2514 \ 2515 ATF_REQUIRE_KTLS(); \ 2516 seqno = random(); \ 2517 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2518 seqno, &en); \ 2519 test_ktls_receive_corrupted_padding(tc, &en, seqno, len); \ 2520 free_tls_enable(&en); \ 2521 } 2522 2523 #define ADD_RECEIVE_BAD_PADDING_TEST(cipher_name, cipher_alg, key_size, \ 2524 auth_alg, minor) \ 2525 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_padding); 2526 2527 #define GEN_RECEIVE_MTE_TESTS(cipher_name, cipher_alg, key_size, \ 2528 auth_alg, minor) \ 2529 GEN_RECEIVE_MTE_PADDING_TESTS(cipher_name, cipher_alg, \ 2530 key_size, auth_alg, minor) \ 2531 GEN_RECEIVE_BAD_PADDING_TEST(cipher_name, cipher_alg, key_size, \ 2532 auth_alg, minor, 64) \ 2533 GEN_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size, \ 2534 auth_alg, minor, non_block_size, \ 2535 tls_minimum_record_payload(&en) + 1) 2536 2537 #define ADD_RECEIVE_MTE_TESTS(cipher_name, cipher_alg, key_size, \ 2538 auth_alg, minor) \ 2539 ADD_RECEIVE_MTE_PADDING_TESTS(cipher_name, cipher_alg, \ 2540 key_size, auth_alg, minor) \ 2541 ADD_RECEIVE_BAD_PADDING_TEST(cipher_name, cipher_alg, key_size, \ 2542 auth_alg, minor) \ 2543 ADD_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size, \ 2544 auth_alg, minor, non_block_size) 2545 2546 /* 2547 * For AES-CBC MTE cipher suites using padding, add tests of messages 2548 * with each possible padding size. Note that the padding_<N> tests 2549 * do not necessarily test <N> bytes of padding as the padding is a 2550 * function of the cipher suite's MAC length. However, cycling 2551 * through all of the payload sizes from 1 to 16 should exercise all 2552 * of the possible padding lengths for each suite. 2553 * 2554 * Two additional tests check for additional padding with an extra 2555 * 16 or 32 bytes beyond the normal padding. 2556 * 2557 * Another test checks for corrupted padding. 2558 * 2559 * Another test checks for a record whose payload is not a multiple of 2560 * the AES block size. 2561 */ 2562 AES_CBC_NONZERO_TESTS(GEN_RECEIVE_MTE_TESTS); 2563 2564 #define GEN_RECEIVE_BAD_IV_TEST(cipher_name, cipher_alg, key_size, \ 2565 auth_alg, minor) \ 2566 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_iv); \ 2567 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_iv, tc) \ 2568 { \ 2569 struct tls_enable en; \ 2570 uint64_t seqno; \ 2571 \ 2572 ATF_REQUIRE_KTLS(); \ 2573 seqno = random(); \ 2574 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2575 seqno, &en); \ 2576 test_ktls_receive_corrupted_iv(tc, &en, seqno, 64); \ 2577 free_tls_enable(&en); \ 2578 } 2579 2580 #define ADD_RECEIVE_BAD_IV_TEST(cipher_name, cipher_alg, key_size, \ 2581 auth_alg, minor) \ 2582 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_iv); 2583 2584 #define GEN_RECEIVE_EXPLICIT_IV_TESTS(cipher_name, cipher_alg, \ 2585 key_size, auth_alg, minor) \ 2586 GEN_RECEIVE_BAD_IV_TEST(cipher_name, cipher_alg, key_size, \ 2587 auth_alg, minor) \ 2588 GEN_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size, \ 2589 auth_alg, minor, short_header, \ 2590 sizeof(struct tls_record_layer) + 1) 2591 2592 #define ADD_RECEIVE_EXPLICIT_IV_TESTS(cipher_name, cipher_alg, \ 2593 key_size, auth_alg, minor) \ 2594 ADD_RECEIVE_BAD_IV_TEST(cipher_name, cipher_alg, key_size, \ 2595 auth_alg, minor) \ 2596 ADD_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size, \ 2597 auth_alg, minor, short_header) 2598 2599 /* 2600 * For cipher suites with an explicit IV, run a receive test where the 2601 * explicit IV has been corrupted. Also run a receive test that sends 2602 * a short record without a complete IV. 2603 */ 2604 AES_CBC_NONZERO_TESTS(GEN_RECEIVE_EXPLICIT_IV_TESTS); 2605 AES_GCM_12_TESTS(GEN_RECEIVE_EXPLICIT_IV_TESTS); 2606 2607 #define GEN_RECEIVE_BAD_TYPE_TEST(cipher_name, cipher_alg, key_size, \ 2608 auth_alg, minor, len) \ 2609 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_type); \ 2610 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_type, tc) \ 2611 { \ 2612 struct tls_enable en; \ 2613 uint64_t seqno; \ 2614 \ 2615 ATF_REQUIRE_KTLS(); \ 2616 seqno = random(); \ 2617 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2618 seqno, &en); \ 2619 test_ktls_receive_bad_type(tc, &en, seqno, len); \ 2620 free_tls_enable(&en); \ 2621 } 2622 2623 #define ADD_RECEIVE_BAD_TYPE_TEST(cipher_name, cipher_alg, key_size, \ 2624 auth_alg, minor) \ 2625 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_type); 2626 2627 #define GEN_RECEIVE_TLS13_TESTS(cipher_name, cipher_alg, key_size, \ 2628 auth_alg, minor) \ 2629 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2630 auth_alg, minor, short_padded, 64, 16) \ 2631 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2632 auth_alg, minor, long_padded, 64 * 1024, 15) \ 2633 GEN_RECEIVE_BAD_TYPE_TEST(cipher_name, cipher_alg, key_size, \ 2634 auth_alg, minor, 64) 2635 2636 #define ADD_RECEIVE_TLS13_TESTS(cipher_name, cipher_alg, key_size, \ 2637 auth_alg, minor) \ 2638 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2639 auth_alg, minor, short_padded) \ 2640 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 2641 auth_alg, minor, long_padded) \ 2642 ADD_RECEIVE_BAD_TYPE_TEST(cipher_name, cipher_alg, key_size, \ 2643 auth_alg, minor) 2644 2645 /* 2646 * For TLS 1.3 cipher suites, run two additional receive tests which 2647 * use add padding to each record. Also run a test that uses an 2648 * invalid "outer" record type. 2649 */ 2650 TLS_13_TESTS(GEN_RECEIVE_TLS13_TESTS); 2651 2652 static void 2653 test_ktls_invalid_receive_cipher_suite(const atf_tc_t *tc, 2654 struct tls_enable *en) 2655 { 2656 int sockets[2]; 2657 2658 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 2659 2660 ATF_REQUIRE_ERRNO(EINVAL, setsockopt(sockets[1], IPPROTO_TCP, 2661 TCP_RXTLS_ENABLE, en, sizeof(*en)) == -1); 2662 2663 close_sockets(sockets); 2664 } 2665 2666 #define GEN_INVALID_RECEIVE_TEST(name, cipher_alg, key_size, auth_alg, \ 2667 minor) \ 2668 ATF_TC_WITHOUT_HEAD(ktls_receive_invalid_##name); \ 2669 ATF_TC_BODY(ktls_receive_invalid_##name, tc) \ 2670 { \ 2671 struct tls_enable en; \ 2672 uint64_t seqno; \ 2673 \ 2674 ATF_REQUIRE_KTLS(); \ 2675 seqno = random(); \ 2676 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2677 seqno, &en); \ 2678 test_ktls_invalid_receive_cipher_suite(tc, &en); \ 2679 free_tls_enable(&en); \ 2680 } 2681 2682 #define ADD_INVALID_RECEIVE_TEST(name, cipher_alg, key_size, auth_alg, \ 2683 minor) \ 2684 ATF_TP_ADD_TC(tp, ktls_receive_invalid_##name); 2685 2686 /* 2687 * Ensure that invalid cipher suites are rejected for receive. 2688 */ 2689 INVALID_CIPHER_SUITES(GEN_INVALID_RECEIVE_TEST); 2690 2691 static void 2692 test_ktls_unsupported_receive_cipher_suite(const atf_tc_t *tc, 2693 struct tls_enable *en) 2694 { 2695 int sockets[2]; 2696 2697 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 2698 2699 ATF_REQUIRE_ERRNO(EPROTONOSUPPORT, setsockopt(sockets[1], IPPROTO_TCP, 2700 TCP_RXTLS_ENABLE, en, sizeof(*en)) == -1); 2701 2702 close_sockets(sockets); 2703 } 2704 2705 #define GEN_UNSUPPORTED_RECEIVE_TEST(name, cipher_alg, key_size, \ 2706 auth_alg, minor) \ 2707 ATF_TC_WITHOUT_HEAD(ktls_receive_unsupported_##name); \ 2708 ATF_TC_BODY(ktls_receive_unsupported_##name, tc) \ 2709 { \ 2710 struct tls_enable en; \ 2711 uint64_t seqno; \ 2712 \ 2713 ATF_REQUIRE_KTLS(); \ 2714 seqno = random(); \ 2715 build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor, \ 2716 seqno, &en); \ 2717 test_ktls_unsupported_receive_cipher_suite(tc, &en); \ 2718 free_tls_enable(&en); \ 2719 } 2720 2721 #define ADD_UNSUPPORTED_RECEIVE_TEST(name, cipher_alg, key_size, \ 2722 auth_alg, minor) \ 2723 ATF_TP_ADD_TC(tp, ktls_receive_unsupported_##name); 2724 2725 /* 2726 * Ensure that valid cipher suites not supported for receive are 2727 * rejected. 2728 */ 2729 TLS_10_TESTS(GEN_UNSUPPORTED_RECEIVE_TEST); 2730 2731 /* 2732 * Try to perform an invalid sendto(2) on a TXTLS-enabled socket, to exercise 2733 * KTLS error handling in the socket layer. 2734 */ 2735 ATF_TC_WITHOUT_HEAD(ktls_sendto_baddst); 2736 ATF_TC_BODY(ktls_sendto_baddst, tc) 2737 { 2738 char buf[32]; 2739 struct sockaddr_in dst; 2740 struct tls_enable en; 2741 ssize_t n; 2742 int s; 2743 2744 ATF_REQUIRE_KTLS(); 2745 2746 s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); 2747 ATF_REQUIRE(s >= 0); 2748 2749 build_tls_enable(tc, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, 2750 TLS_MINOR_VER_THREE, (uint64_t)random(), &en); 2751 2752 ATF_REQUIRE(setsockopt(s, IPPROTO_TCP, TCP_TXTLS_ENABLE, &en, 2753 sizeof(en)) == 0); 2754 2755 memset(&dst, 0, sizeof(dst)); 2756 dst.sin_family = AF_INET; 2757 dst.sin_len = sizeof(dst); 2758 dst.sin_addr.s_addr = htonl(INADDR_BROADCAST); 2759 dst.sin_port = htons(12345); 2760 2761 memset(buf, 0, sizeof(buf)); 2762 n = sendto(s, buf, sizeof(buf), 0, (struct sockaddr *)&dst, 2763 sizeof(dst)); 2764 2765 /* Can't transmit to the broadcast address over TCP. */ 2766 ATF_REQUIRE_ERRNO(EACCES, n == -1); 2767 ATF_REQUIRE(close(s) == 0); 2768 } 2769 2770 /* 2771 * Make sure that listen(2) returns an error for KTLS-enabled sockets, and 2772 * verify that an attempt to enable KTLS on a listening socket fails. 2773 */ 2774 ATF_TC_WITHOUT_HEAD(ktls_listening_socket); 2775 ATF_TC_BODY(ktls_listening_socket, tc) 2776 { 2777 struct tls_enable en; 2778 struct sockaddr_in sin; 2779 int s; 2780 2781 ATF_REQUIRE_KTLS(); 2782 2783 s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); 2784 ATF_REQUIRE(s >= 0); 2785 build_tls_enable(tc, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, 2786 TLS_MINOR_VER_THREE, (uint64_t)random(), &en); 2787 ATF_REQUIRE(setsockopt(s, IPPROTO_TCP, TCP_TXTLS_ENABLE, &en, 2788 sizeof(en)) == 0); 2789 ATF_REQUIRE_ERRNO(EINVAL, listen(s, 1) == -1); 2790 ATF_REQUIRE(close(s) == 0); 2791 2792 s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); 2793 ATF_REQUIRE(s >= 0); 2794 build_tls_enable(tc, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, 2795 TLS_MINOR_VER_THREE, (uint64_t)random(), &en); 2796 ATF_REQUIRE(setsockopt(s, IPPROTO_TCP, TCP_RXTLS_ENABLE, &en, 2797 sizeof(en)) == 0); 2798 ATF_REQUIRE_ERRNO(EINVAL, listen(s, 1) == -1); 2799 ATF_REQUIRE(close(s) == 0); 2800 2801 s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); 2802 ATF_REQUIRE(s >= 0); 2803 memset(&sin, 0, sizeof(sin)); 2804 ATF_REQUIRE(bind(s, (struct sockaddr *)&sin, sizeof(sin)) == 0); 2805 ATF_REQUIRE(listen(s, 1) == 0); 2806 build_tls_enable(tc, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, 2807 TLS_MINOR_VER_THREE, (uint64_t)random(), &en); 2808 ATF_REQUIRE_ERRNO(ENOTCONN, 2809 setsockopt(s, IPPROTO_TCP, TCP_TXTLS_ENABLE, &en, sizeof(en)) != 0); 2810 ATF_REQUIRE_ERRNO(EINVAL, 2811 setsockopt(s, IPPROTO_TCP, TCP_RXTLS_ENABLE, &en, sizeof(en)) != 0); 2812 ATF_REQUIRE(close(s) == 0); 2813 } 2814 2815 ATF_TP_ADD_TCS(tp) 2816 { 2817 /* Transmit tests */ 2818 AES_CBC_TESTS(ADD_TRANSMIT_TESTS); 2819 AES_GCM_TESTS(ADD_TRANSMIT_TESTS); 2820 CHACHA20_TESTS(ADD_TRANSMIT_TESTS); 2821 AES_CBC_TESTS(ADD_TRANSMIT_PADDING_TESTS); 2822 AES_CBC_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST); 2823 AES_GCM_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST); 2824 CHACHA20_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST); 2825 INVALID_CIPHER_SUITES(ADD_INVALID_TRANSMIT_TEST); 2826 2827 /* Receive tests */ 2828 TLS_10_TESTS(ADD_UNSUPPORTED_RECEIVE_TEST); 2829 AES_CBC_NONZERO_TESTS(ADD_RECEIVE_TESTS); 2830 AES_GCM_TESTS(ADD_RECEIVE_TESTS); 2831 CHACHA20_TESTS(ADD_RECEIVE_TESTS); 2832 AES_CBC_NONZERO_TESTS(ADD_RECEIVE_MTE_TESTS); 2833 AES_CBC_NONZERO_TESTS(ADD_RECEIVE_EXPLICIT_IV_TESTS); 2834 AES_GCM_12_TESTS(ADD_RECEIVE_EXPLICIT_IV_TESTS); 2835 TLS_13_TESTS(ADD_RECEIVE_TLS13_TESTS); 2836 INVALID_CIPHER_SUITES(ADD_INVALID_RECEIVE_TEST); 2837 2838 /* Miscellaneous */ 2839 ATF_TP_ADD_TC(tp, ktls_sendto_baddst); 2840 ATF_TP_ADD_TC(tp, ktls_listening_socket); 2841 2842 return (atf_no_error()); 2843 } 2844