1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2021 Netflix Inc. 5 * Written by: John Baldwin <jhb@FreeBSD.org> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/types.h> 30 #include <sys/endian.h> 31 #include <sys/event.h> 32 #include <sys/ktls.h> 33 #include <sys/socket.h> 34 #include <sys/sysctl.h> 35 #include <netinet/in.h> 36 #include <netinet/tcp.h> 37 #include <crypto/cryptodev.h> 38 #include <assert.h> 39 #include <err.h> 40 #include <fcntl.h> 41 #include <netdb.h> 42 #include <poll.h> 43 #include <stdbool.h> 44 #include <stdlib.h> 45 #include <atf-c.h> 46 47 #include <openssl/err.h> 48 #include <openssl/evp.h> 49 #include <openssl/hmac.h> 50 51 static void 52 require_ktls(void) 53 { 54 size_t len; 55 bool enable; 56 57 len = sizeof(enable); 58 if (sysctlbyname("kern.ipc.tls.enable", &enable, &len, NULL, 0) == -1) { 59 if (errno == ENOENT) 60 atf_tc_skip("kernel does not support TLS offload"); 61 atf_libc_error(errno, "Failed to read kern.ipc.tls.enable"); 62 } 63 64 if (!enable) 65 atf_tc_skip("Kernel TLS is disabled"); 66 } 67 68 #define ATF_REQUIRE_KTLS() require_ktls() 69 70 static void 71 check_tls_mode(const atf_tc_t *tc, int s, int sockopt) 72 { 73 if (atf_tc_get_config_var_as_bool_wd(tc, "ktls.require_ifnet", false)) { 74 socklen_t len; 75 int mode; 76 77 len = sizeof(mode); 78 if (getsockopt(s, IPPROTO_TCP, sockopt, &mode, &len) == -1) 79 atf_libc_error(errno, "Failed to fetch TLS mode"); 80 81 if (mode != TCP_TLS_MODE_IFNET) 82 atf_tc_skip("connection did not use ifnet TLS"); 83 } 84 } 85 86 static char 87 rdigit(void) 88 { 89 /* ASCII printable values between 0x20 and 0x7e */ 90 return (0x20 + random() % (0x7f - 0x20)); 91 } 92 93 static char * 94 alloc_buffer(size_t len) 95 { 96 char *buf; 97 size_t i; 98 99 if (len == 0) 100 return (NULL); 101 buf = malloc(len); 102 for (i = 0; i < len; i++) 103 buf[i] = rdigit(); 104 return (buf); 105 } 106 107 static bool 108 socketpair_tcp(int sv[2]) 109 { 110 struct pollfd pfd; 111 struct sockaddr_in sin; 112 socklen_t len; 113 int as, cs, ls; 114 115 ls = socket(PF_INET, SOCK_STREAM, 0); 116 if (ls == -1) { 117 warn("socket() for listen"); 118 return (false); 119 } 120 121 memset(&sin, 0, sizeof(sin)); 122 sin.sin_len = sizeof(sin); 123 sin.sin_family = AF_INET; 124 sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); 125 if (bind(ls, (struct sockaddr *)&sin, sizeof(sin)) == -1) { 126 warn("bind"); 127 close(ls); 128 return (false); 129 } 130 131 if (listen(ls, 1) == -1) { 132 warn("listen"); 133 close(ls); 134 return (false); 135 } 136 137 len = sizeof(sin); 138 if (getsockname(ls, (struct sockaddr *)&sin, &len) == -1) { 139 warn("getsockname"); 140 close(ls); 141 return (false); 142 } 143 144 cs = socket(PF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0); 145 if (cs == -1) { 146 warn("socket() for connect"); 147 close(ls); 148 return (false); 149 } 150 151 if (connect(cs, (struct sockaddr *)&sin, sizeof(sin)) == -1) { 152 if (errno != EINPROGRESS) { 153 warn("connect"); 154 close(ls); 155 close(cs); 156 return (false); 157 } 158 } 159 160 as = accept4(ls, NULL, NULL, SOCK_NONBLOCK); 161 if (as == -1) { 162 warn("accept4"); 163 close(ls); 164 close(cs); 165 return (false); 166 } 167 168 close(ls); 169 170 pfd.fd = cs; 171 pfd.events = POLLOUT; 172 pfd.revents = 0; 173 ATF_REQUIRE(poll(&pfd, 1, INFTIM) == 1); 174 ATF_REQUIRE(pfd.revents == POLLOUT); 175 176 sv[0] = cs; 177 sv[1] = as; 178 return (true); 179 } 180 181 static bool 182 echo_socket(const atf_tc_t *tc, int sv[2]) 183 { 184 const char *cause, *host, *port; 185 struct addrinfo hints, *ai, *tofree; 186 int error, flags, s; 187 188 host = atf_tc_get_config_var(tc, "ktls.host"); 189 port = atf_tc_get_config_var_wd(tc, "ktls.port", "echo"); 190 memset(&hints, 0, sizeof(hints)); 191 hints.ai_family = AF_UNSPEC; 192 hints.ai_socktype = SOCK_STREAM; 193 hints.ai_protocol = IPPROTO_TCP; 194 error = getaddrinfo(host, port, &hints, &tofree); 195 if (error != 0) { 196 warnx("getaddrinfo(%s:%s) failed: %s", host, port, 197 gai_strerror(error)); 198 return (false); 199 } 200 201 cause = NULL; 202 for (ai = tofree; ai != NULL; ai = ai->ai_next) { 203 s = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol); 204 if (s == -1) { 205 cause = "socket"; 206 error = errno; 207 continue; 208 } 209 210 if (connect(s, ai->ai_addr, ai->ai_addrlen) == -1) { 211 cause = "connect"; 212 error = errno; 213 close(s); 214 continue; 215 } 216 217 freeaddrinfo(tofree); 218 219 ATF_REQUIRE((flags = fcntl(s, F_GETFL)) != -1); 220 flags |= O_NONBLOCK; 221 ATF_REQUIRE(fcntl(s, F_SETFL, flags) != -1); 222 223 sv[0] = s; 224 sv[1] = s; 225 return (true); 226 } 227 228 warnc(error, "%s", cause); 229 freeaddrinfo(tofree); 230 return (false); 231 } 232 233 static bool 234 open_sockets(const atf_tc_t *tc, int sv[2]) 235 { 236 if (atf_tc_has_config_var(tc, "ktls.host")) 237 return (echo_socket(tc, sv)); 238 else 239 return (socketpair_tcp(sv)); 240 } 241 242 static void 243 close_sockets(int sv[2]) 244 { 245 if (sv[0] != sv[1]) 246 ATF_REQUIRE(close(sv[1]) == 0); 247 ATF_REQUIRE(close(sv[0]) == 0); 248 } 249 250 static void 251 fd_set_blocking(int fd) 252 { 253 int flags; 254 255 ATF_REQUIRE((flags = fcntl(fd, F_GETFL)) != -1); 256 flags &= ~O_NONBLOCK; 257 ATF_REQUIRE(fcntl(fd, F_SETFL, flags) != -1); 258 } 259 260 static bool 261 cbc_decrypt(const EVP_CIPHER *cipher, const char *key, const char *iv, 262 const char *input, char *output, size_t size) 263 { 264 EVP_CIPHER_CTX *ctx; 265 int outl, total; 266 267 ctx = EVP_CIPHER_CTX_new(); 268 if (ctx == NULL) { 269 warnx("EVP_CIPHER_CTX_new failed: %s", 270 ERR_error_string(ERR_get_error(), NULL)); 271 return (false); 272 } 273 if (EVP_CipherInit_ex(ctx, cipher, NULL, (const u_char *)key, 274 (const u_char *)iv, 0) != 1) { 275 warnx("EVP_CipherInit_ex failed: %s", 276 ERR_error_string(ERR_get_error(), NULL)); 277 EVP_CIPHER_CTX_free(ctx); 278 return (false); 279 } 280 EVP_CIPHER_CTX_set_padding(ctx, 0); 281 if (EVP_CipherUpdate(ctx, (u_char *)output, &outl, 282 (const u_char *)input, size) != 1) { 283 warnx("EVP_CipherUpdate failed: %s", 284 ERR_error_string(ERR_get_error(), NULL)); 285 EVP_CIPHER_CTX_free(ctx); 286 return (false); 287 } 288 total = outl; 289 if (EVP_CipherFinal_ex(ctx, (u_char *)output + outl, &outl) != 1) { 290 warnx("EVP_CipherFinal_ex failed: %s", 291 ERR_error_string(ERR_get_error(), NULL)); 292 EVP_CIPHER_CTX_free(ctx); 293 return (false); 294 } 295 total += outl; 296 if ((size_t)total != size) { 297 warnx("decrypt size mismatch: %zu vs %d", size, total); 298 EVP_CIPHER_CTX_free(ctx); 299 return (false); 300 } 301 EVP_CIPHER_CTX_free(ctx); 302 return (true); 303 } 304 305 static bool 306 verify_hash(const EVP_MD *md, const void *key, size_t key_len, const void *aad, 307 size_t aad_len, const void *buffer, size_t len, const void *digest) 308 { 309 HMAC_CTX *ctx; 310 unsigned char digest2[EVP_MAX_MD_SIZE]; 311 u_int digest_len; 312 313 ctx = HMAC_CTX_new(); 314 if (ctx == NULL) { 315 warnx("HMAC_CTX_new failed: %s", 316 ERR_error_string(ERR_get_error(), NULL)); 317 return (false); 318 } 319 if (HMAC_Init_ex(ctx, key, key_len, md, NULL) != 1) { 320 warnx("HMAC_Init_ex failed: %s", 321 ERR_error_string(ERR_get_error(), NULL)); 322 HMAC_CTX_free(ctx); 323 return (false); 324 } 325 if (HMAC_Update(ctx, aad, aad_len) != 1) { 326 warnx("HMAC_Update (aad) failed: %s", 327 ERR_error_string(ERR_get_error(), NULL)); 328 HMAC_CTX_free(ctx); 329 return (false); 330 } 331 if (HMAC_Update(ctx, buffer, len) != 1) { 332 warnx("HMAC_Update (payload) failed: %s", 333 ERR_error_string(ERR_get_error(), NULL)); 334 HMAC_CTX_free(ctx); 335 return (false); 336 } 337 if (HMAC_Final(ctx, digest2, &digest_len) != 1) { 338 warnx("HMAC_Final failed: %s", 339 ERR_error_string(ERR_get_error(), NULL)); 340 HMAC_CTX_free(ctx); 341 return (false); 342 } 343 HMAC_CTX_free(ctx); 344 if (memcmp(digest, digest2, digest_len) != 0) { 345 warnx("HMAC mismatch"); 346 return (false); 347 } 348 return (true); 349 } 350 351 static bool 352 aead_encrypt(const EVP_CIPHER *cipher, const char *key, const char *nonce, 353 const void *aad, size_t aad_len, const char *input, char *output, 354 size_t size, char *tag, size_t tag_len) 355 { 356 EVP_CIPHER_CTX *ctx; 357 int outl, total; 358 359 ctx = EVP_CIPHER_CTX_new(); 360 if (ctx == NULL) { 361 warnx("EVP_CIPHER_CTX_new failed: %s", 362 ERR_error_string(ERR_get_error(), NULL)); 363 return (false); 364 } 365 if (EVP_EncryptInit_ex(ctx, cipher, NULL, (const u_char *)key, 366 (const u_char *)nonce) != 1) { 367 warnx("EVP_EncryptInit_ex failed: %s", 368 ERR_error_string(ERR_get_error(), NULL)); 369 EVP_CIPHER_CTX_free(ctx); 370 return (false); 371 } 372 EVP_CIPHER_CTX_set_padding(ctx, 0); 373 if (aad != NULL) { 374 if (EVP_EncryptUpdate(ctx, NULL, &outl, (const u_char *)aad, 375 aad_len) != 1) { 376 warnx("EVP_EncryptUpdate for AAD failed: %s", 377 ERR_error_string(ERR_get_error(), NULL)); 378 EVP_CIPHER_CTX_free(ctx); 379 return (false); 380 } 381 } 382 if (EVP_EncryptUpdate(ctx, (u_char *)output, &outl, 383 (const u_char *)input, size) != 1) { 384 warnx("EVP_EncryptUpdate failed: %s", 385 ERR_error_string(ERR_get_error(), NULL)); 386 EVP_CIPHER_CTX_free(ctx); 387 return (false); 388 } 389 total = outl; 390 if (EVP_EncryptFinal_ex(ctx, (u_char *)output + outl, &outl) != 1) { 391 warnx("EVP_EncryptFinal_ex failed: %s", 392 ERR_error_string(ERR_get_error(), NULL)); 393 EVP_CIPHER_CTX_free(ctx); 394 return (false); 395 } 396 total += outl; 397 if ((size_t)total != size) { 398 warnx("encrypt size mismatch: %zu vs %d", size, total); 399 EVP_CIPHER_CTX_free(ctx); 400 return (false); 401 } 402 if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, tag_len, tag) != 403 1) { 404 warnx("EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_GET_TAG) failed: %s", 405 ERR_error_string(ERR_get_error(), NULL)); 406 EVP_CIPHER_CTX_free(ctx); 407 return (false); 408 } 409 EVP_CIPHER_CTX_free(ctx); 410 return (true); 411 } 412 413 static bool 414 aead_decrypt(const EVP_CIPHER *cipher, const char *key, const char *nonce, 415 const void *aad, size_t aad_len, const char *input, char *output, 416 size_t size, const char *tag, size_t tag_len) 417 { 418 EVP_CIPHER_CTX *ctx; 419 int outl, total; 420 bool valid; 421 422 ctx = EVP_CIPHER_CTX_new(); 423 if (ctx == NULL) { 424 warnx("EVP_CIPHER_CTX_new failed: %s", 425 ERR_error_string(ERR_get_error(), NULL)); 426 return (false); 427 } 428 if (EVP_DecryptInit_ex(ctx, cipher, NULL, (const u_char *)key, 429 (const u_char *)nonce) != 1) { 430 warnx("EVP_DecryptInit_ex failed: %s", 431 ERR_error_string(ERR_get_error(), NULL)); 432 EVP_CIPHER_CTX_free(ctx); 433 return (false); 434 } 435 EVP_CIPHER_CTX_set_padding(ctx, 0); 436 if (aad != NULL) { 437 if (EVP_DecryptUpdate(ctx, NULL, &outl, (const u_char *)aad, 438 aad_len) != 1) { 439 warnx("EVP_DecryptUpdate for AAD failed: %s", 440 ERR_error_string(ERR_get_error(), NULL)); 441 EVP_CIPHER_CTX_free(ctx); 442 return (false); 443 } 444 } 445 if (EVP_DecryptUpdate(ctx, (u_char *)output, &outl, 446 (const u_char *)input, size) != 1) { 447 warnx("EVP_DecryptUpdate failed: %s", 448 ERR_error_string(ERR_get_error(), NULL)); 449 EVP_CIPHER_CTX_free(ctx); 450 return (false); 451 } 452 total = outl; 453 if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, tag_len, 454 __DECONST(char *, tag)) != 1) { 455 warnx("EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_TAG) failed: %s", 456 ERR_error_string(ERR_get_error(), NULL)); 457 EVP_CIPHER_CTX_free(ctx); 458 return (false); 459 } 460 valid = (EVP_DecryptFinal_ex(ctx, (u_char *)output + outl, &outl) == 1); 461 total += outl; 462 if ((size_t)total != size) { 463 warnx("decrypt size mismatch: %zu vs %d", size, total); 464 EVP_CIPHER_CTX_free(ctx); 465 return (false); 466 } 467 if (!valid) 468 warnx("tag mismatch"); 469 EVP_CIPHER_CTX_free(ctx); 470 return (valid); 471 } 472 473 static void 474 build_tls_enable(int cipher_alg, size_t cipher_key_len, int auth_alg, 475 int minor, uint64_t seqno, struct tls_enable *en) 476 { 477 u_int auth_key_len, iv_len; 478 479 memset(en, 0, sizeof(*en)); 480 481 switch (cipher_alg) { 482 case CRYPTO_AES_CBC: 483 if (minor == TLS_MINOR_VER_ZERO) 484 iv_len = AES_BLOCK_LEN; 485 else 486 iv_len = 0; 487 break; 488 case CRYPTO_AES_NIST_GCM_16: 489 if (minor == TLS_MINOR_VER_TWO) 490 iv_len = TLS_AEAD_GCM_LEN; 491 else 492 iv_len = TLS_1_3_GCM_IV_LEN; 493 break; 494 case CRYPTO_CHACHA20_POLY1305: 495 iv_len = TLS_CHACHA20_IV_LEN; 496 break; 497 default: 498 iv_len = 0; 499 break; 500 } 501 switch (auth_alg) { 502 case CRYPTO_SHA1_HMAC: 503 auth_key_len = SHA1_HASH_LEN; 504 break; 505 case CRYPTO_SHA2_256_HMAC: 506 auth_key_len = SHA2_256_HASH_LEN; 507 break; 508 case CRYPTO_SHA2_384_HMAC: 509 auth_key_len = SHA2_384_HASH_LEN; 510 break; 511 default: 512 auth_key_len = 0; 513 break; 514 } 515 en->cipher_key = alloc_buffer(cipher_key_len); 516 en->iv = alloc_buffer(iv_len); 517 en->auth_key = alloc_buffer(auth_key_len); 518 en->cipher_algorithm = cipher_alg; 519 en->cipher_key_len = cipher_key_len; 520 en->iv_len = iv_len; 521 en->auth_algorithm = auth_alg; 522 en->auth_key_len = auth_key_len; 523 en->tls_vmajor = TLS_MAJOR_VER_ONE; 524 en->tls_vminor = minor; 525 be64enc(en->rec_seq, seqno); 526 } 527 528 static void 529 free_tls_enable(struct tls_enable *en) 530 { 531 free(__DECONST(void *, en->cipher_key)); 532 free(__DECONST(void *, en->iv)); 533 free(__DECONST(void *, en->auth_key)); 534 } 535 536 static const EVP_CIPHER * 537 tls_EVP_CIPHER(const struct tls_enable *en) 538 { 539 switch (en->cipher_algorithm) { 540 case CRYPTO_AES_CBC: 541 switch (en->cipher_key_len) { 542 case 128 / 8: 543 return (EVP_aes_128_cbc()); 544 case 256 / 8: 545 return (EVP_aes_256_cbc()); 546 default: 547 return (NULL); 548 } 549 break; 550 case CRYPTO_AES_NIST_GCM_16: 551 switch (en->cipher_key_len) { 552 case 128 / 8: 553 return (EVP_aes_128_gcm()); 554 case 256 / 8: 555 return (EVP_aes_256_gcm()); 556 default: 557 return (NULL); 558 } 559 break; 560 case CRYPTO_CHACHA20_POLY1305: 561 return (EVP_chacha20_poly1305()); 562 default: 563 return (NULL); 564 } 565 } 566 567 static const EVP_MD * 568 tls_EVP_MD(const struct tls_enable *en) 569 { 570 switch (en->auth_algorithm) { 571 case CRYPTO_SHA1_HMAC: 572 return (EVP_sha1()); 573 case CRYPTO_SHA2_256_HMAC: 574 return (EVP_sha256()); 575 case CRYPTO_SHA2_384_HMAC: 576 return (EVP_sha384()); 577 default: 578 return (NULL); 579 } 580 } 581 582 static size_t 583 tls_header_len(struct tls_enable *en) 584 { 585 size_t len; 586 587 len = sizeof(struct tls_record_layer); 588 switch (en->cipher_algorithm) { 589 case CRYPTO_AES_CBC: 590 if (en->tls_vminor != TLS_MINOR_VER_ZERO) 591 len += AES_BLOCK_LEN; 592 return (len); 593 case CRYPTO_AES_NIST_GCM_16: 594 if (en->tls_vminor == TLS_MINOR_VER_TWO) 595 len += sizeof(uint64_t); 596 return (len); 597 case CRYPTO_CHACHA20_POLY1305: 598 return (len); 599 default: 600 return (0); 601 } 602 } 603 604 static size_t 605 tls_mac_len(struct tls_enable *en) 606 { 607 switch (en->cipher_algorithm) { 608 case CRYPTO_AES_CBC: 609 switch (en->auth_algorithm) { 610 case CRYPTO_SHA1_HMAC: 611 return (SHA1_HASH_LEN); 612 case CRYPTO_SHA2_256_HMAC: 613 return (SHA2_256_HASH_LEN); 614 case CRYPTO_SHA2_384_HMAC: 615 return (SHA2_384_HASH_LEN); 616 default: 617 return (0); 618 } 619 case CRYPTO_AES_NIST_GCM_16: 620 return (AES_GMAC_HASH_LEN); 621 case CRYPTO_CHACHA20_POLY1305: 622 return (POLY1305_HASH_LEN); 623 default: 624 return (0); 625 } 626 } 627 628 /* Includes maximum padding for MTE. */ 629 static size_t 630 tls_trailer_len(struct tls_enable *en) 631 { 632 size_t len; 633 634 len = tls_mac_len(en); 635 if (en->cipher_algorithm == CRYPTO_AES_CBC) 636 len += AES_BLOCK_LEN; 637 if (en->tls_vminor == TLS_MINOR_VER_THREE) 638 len++; 639 return (len); 640 } 641 642 /* 'len' is the length of the payload application data. */ 643 static void 644 tls_mte_aad(struct tls_enable *en, size_t len, 645 const struct tls_record_layer *hdr, uint64_t seqno, struct tls_mac_data *ad) 646 { 647 ad->seq = htobe64(seqno); 648 ad->type = hdr->tls_type; 649 ad->tls_vmajor = hdr->tls_vmajor; 650 ad->tls_vminor = hdr->tls_vminor; 651 ad->tls_length = htons(len); 652 } 653 654 static void 655 tls_12_aead_aad(struct tls_enable *en, size_t len, 656 const struct tls_record_layer *hdr, uint64_t seqno, 657 struct tls_aead_data *ad) 658 { 659 ad->seq = htobe64(seqno); 660 ad->type = hdr->tls_type; 661 ad->tls_vmajor = hdr->tls_vmajor; 662 ad->tls_vminor = hdr->tls_vminor; 663 ad->tls_length = htons(len); 664 } 665 666 static void 667 tls_13_aad(struct tls_enable *en, const struct tls_record_layer *hdr, 668 uint64_t seqno, struct tls_aead_data_13 *ad) 669 { 670 ad->type = hdr->tls_type; 671 ad->tls_vmajor = hdr->tls_vmajor; 672 ad->tls_vminor = hdr->tls_vminor; 673 ad->tls_length = hdr->tls_length; 674 } 675 676 static void 677 tls_12_gcm_nonce(struct tls_enable *en, const struct tls_record_layer *hdr, 678 char *nonce) 679 { 680 memcpy(nonce, en->iv, TLS_AEAD_GCM_LEN); 681 memcpy(nonce + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t)); 682 } 683 684 static void 685 tls_13_nonce(struct tls_enable *en, uint64_t seqno, char *nonce) 686 { 687 static_assert(TLS_1_3_GCM_IV_LEN == TLS_CHACHA20_IV_LEN, 688 "TLS 1.3 nonce length mismatch"); 689 memcpy(nonce, en->iv, TLS_1_3_GCM_IV_LEN); 690 *(uint64_t *)(nonce + 4) ^= htobe64(seqno); 691 } 692 693 /* 694 * Decrypt a TLS record 'len' bytes long at 'src' and store the result at 695 * 'dst'. If the TLS record header length doesn't match or 'dst' doesn't 696 * have sufficient room ('avail'), fail the test. 697 */ 698 static size_t 699 decrypt_tls_aes_cbc_mte(struct tls_enable *en, uint64_t seqno, const void *src, 700 size_t len, void *dst, size_t avail, uint8_t *record_type) 701 { 702 const struct tls_record_layer *hdr; 703 struct tls_mac_data aad; 704 const char *iv; 705 char *buf; 706 size_t hdr_len, mac_len, payload_len; 707 int padding; 708 709 hdr = src; 710 hdr_len = tls_header_len(en); 711 mac_len = tls_mac_len(en); 712 ATF_REQUIRE(hdr->tls_vmajor == TLS_MAJOR_VER_ONE); 713 ATF_REQUIRE(hdr->tls_vminor == en->tls_vminor); 714 715 /* First, decrypt the outer payload into a temporary buffer. */ 716 payload_len = len - hdr_len; 717 buf = malloc(payload_len); 718 if (en->tls_vminor == TLS_MINOR_VER_ZERO) 719 iv = en->iv; 720 else 721 iv = (void *)(hdr + 1); 722 ATF_REQUIRE(cbc_decrypt(tls_EVP_CIPHER(en), en->cipher_key, iv, 723 (const u_char *)src + hdr_len, buf, payload_len)); 724 725 /* 726 * Copy the last encrypted block to use as the IV for the next 727 * record for TLS 1.0. 728 */ 729 if (en->tls_vminor == TLS_MINOR_VER_ZERO) 730 memcpy(__DECONST(uint8_t *, en->iv), (const u_char *)src + 731 (len - AES_BLOCK_LEN), AES_BLOCK_LEN); 732 733 /* 734 * Verify trailing padding and strip. 735 * 736 * The kernel always generates the smallest amount of padding. 737 */ 738 padding = buf[payload_len - 1] + 1; 739 ATF_REQUIRE(padding > 0 && padding <= AES_BLOCK_LEN); 740 ATF_REQUIRE(payload_len >= mac_len + padding); 741 payload_len -= padding; 742 743 /* Verify HMAC. */ 744 payload_len -= mac_len; 745 tls_mte_aad(en, payload_len, hdr, seqno, &aad); 746 ATF_REQUIRE(verify_hash(tls_EVP_MD(en), en->auth_key, en->auth_key_len, 747 &aad, sizeof(aad), buf, payload_len, buf + payload_len)); 748 749 ATF_REQUIRE(payload_len <= avail); 750 memcpy(dst, buf, payload_len); 751 *record_type = hdr->tls_type; 752 return (payload_len); 753 } 754 755 static size_t 756 decrypt_tls_12_aead(struct tls_enable *en, uint64_t seqno, const void *src, 757 size_t len, void *dst, uint8_t *record_type) 758 { 759 const struct tls_record_layer *hdr; 760 struct tls_aead_data aad; 761 char nonce[12]; 762 size_t hdr_len, mac_len, payload_len; 763 764 hdr = src; 765 766 hdr_len = tls_header_len(en); 767 mac_len = tls_mac_len(en); 768 payload_len = len - (hdr_len + mac_len); 769 ATF_REQUIRE(hdr->tls_vmajor == TLS_MAJOR_VER_ONE); 770 ATF_REQUIRE(hdr->tls_vminor == TLS_MINOR_VER_TWO); 771 772 tls_12_aead_aad(en, payload_len, hdr, seqno, &aad); 773 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 774 tls_12_gcm_nonce(en, hdr, nonce); 775 else 776 tls_13_nonce(en, seqno, nonce); 777 778 ATF_REQUIRE(aead_decrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 779 &aad, sizeof(aad), (const char *)src + hdr_len, dst, payload_len, 780 (const char *)src + hdr_len + payload_len, mac_len)); 781 782 *record_type = hdr->tls_type; 783 return (payload_len); 784 } 785 786 static size_t 787 decrypt_tls_13_aead(struct tls_enable *en, uint64_t seqno, const void *src, 788 size_t len, void *dst, uint8_t *record_type) 789 { 790 const struct tls_record_layer *hdr; 791 struct tls_aead_data_13 aad; 792 char nonce[12]; 793 char *buf; 794 size_t hdr_len, mac_len, payload_len; 795 796 hdr = src; 797 798 hdr_len = tls_header_len(en); 799 mac_len = tls_mac_len(en); 800 payload_len = len - (hdr_len + mac_len); 801 ATF_REQUIRE(payload_len >= 1); 802 ATF_REQUIRE(hdr->tls_type == TLS_RLTYPE_APP); 803 ATF_REQUIRE(hdr->tls_vmajor == TLS_MAJOR_VER_ONE); 804 ATF_REQUIRE(hdr->tls_vminor == TLS_MINOR_VER_TWO); 805 806 tls_13_aad(en, hdr, seqno, &aad); 807 tls_13_nonce(en, seqno, nonce); 808 809 /* 810 * Have to use a temporary buffer for the output due to the 811 * record type as the last byte of the trailer. 812 */ 813 buf = malloc(payload_len); 814 815 ATF_REQUIRE(aead_decrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 816 &aad, sizeof(aad), (const char *)src + hdr_len, buf, payload_len, 817 (const char *)src + hdr_len + payload_len, mac_len)); 818 819 /* Trim record type. */ 820 *record_type = buf[payload_len - 1]; 821 payload_len--; 822 823 memcpy(dst, buf, payload_len); 824 free(buf); 825 826 return (payload_len); 827 } 828 829 static size_t 830 decrypt_tls_aead(struct tls_enable *en, uint64_t seqno, const void *src, 831 size_t len, void *dst, size_t avail, uint8_t *record_type) 832 { 833 const struct tls_record_layer *hdr; 834 size_t payload_len; 835 836 hdr = src; 837 ATF_REQUIRE(ntohs(hdr->tls_length) + sizeof(*hdr) == len); 838 839 payload_len = len - (tls_header_len(en) + tls_trailer_len(en)); 840 ATF_REQUIRE(payload_len <= avail); 841 842 if (en->tls_vminor == TLS_MINOR_VER_TWO) { 843 ATF_REQUIRE(decrypt_tls_12_aead(en, seqno, src, len, dst, 844 record_type) == payload_len); 845 } else { 846 ATF_REQUIRE(decrypt_tls_13_aead(en, seqno, src, len, dst, 847 record_type) == payload_len); 848 } 849 850 return (payload_len); 851 } 852 853 static size_t 854 decrypt_tls_record(struct tls_enable *en, uint64_t seqno, const void *src, 855 size_t len, void *dst, size_t avail, uint8_t *record_type) 856 { 857 if (en->cipher_algorithm == CRYPTO_AES_CBC) 858 return (decrypt_tls_aes_cbc_mte(en, seqno, src, len, dst, avail, 859 record_type)); 860 else 861 return (decrypt_tls_aead(en, seqno, src, len, dst, avail, 862 record_type)); 863 } 864 865 /* 866 * Encrypt a TLS record of type 'record_type' with payload 'len' bytes 867 * long at 'src' and store the result at 'dst'. If 'dst' doesn't have 868 * sufficient room ('avail'), fail the test. 869 */ 870 static size_t 871 encrypt_tls_12_aead(struct tls_enable *en, uint8_t record_type, uint64_t seqno, 872 const void *src, size_t len, void *dst) 873 { 874 struct tls_record_layer *hdr; 875 struct tls_aead_data aad; 876 char nonce[12]; 877 size_t hdr_len, mac_len, record_len; 878 879 hdr = dst; 880 881 hdr_len = tls_header_len(en); 882 mac_len = tls_mac_len(en); 883 record_len = hdr_len + len + mac_len; 884 885 hdr->tls_type = record_type; 886 hdr->tls_vmajor = TLS_MAJOR_VER_ONE; 887 hdr->tls_vminor = TLS_MINOR_VER_TWO; 888 hdr->tls_length = htons(record_len - sizeof(*hdr)); 889 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 890 memcpy(hdr + 1, &seqno, sizeof(seqno)); 891 892 tls_12_aead_aad(en, len, hdr, seqno, &aad); 893 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 894 tls_12_gcm_nonce(en, hdr, nonce); 895 else 896 tls_13_nonce(en, seqno, nonce); 897 898 ATF_REQUIRE(aead_encrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 899 &aad, sizeof(aad), src, (char *)dst + hdr_len, len, 900 (char *)dst + hdr_len + len, mac_len)); 901 902 return (record_len); 903 } 904 905 static size_t 906 encrypt_tls_13_aead(struct tls_enable *en, uint8_t record_type, uint64_t seqno, 907 const void *src, size_t len, void *dst, size_t padding) 908 { 909 struct tls_record_layer *hdr; 910 struct tls_aead_data_13 aad; 911 char nonce[12]; 912 char *buf; 913 size_t hdr_len, mac_len, record_len; 914 915 hdr = dst; 916 917 hdr_len = tls_header_len(en); 918 mac_len = tls_mac_len(en); 919 record_len = hdr_len + len + 1 + padding + mac_len; 920 921 hdr->tls_type = TLS_RLTYPE_APP; 922 hdr->tls_vmajor = TLS_MAJOR_VER_ONE; 923 hdr->tls_vminor = TLS_MINOR_VER_TWO; 924 hdr->tls_length = htons(record_len - sizeof(*hdr)); 925 926 tls_13_aad(en, hdr, seqno, &aad); 927 tls_13_nonce(en, seqno, nonce); 928 929 /* 930 * Have to use a temporary buffer for the input so that the record 931 * type can be appended. 932 */ 933 buf = malloc(len + 1 + padding); 934 memcpy(buf, src, len); 935 buf[len] = record_type; 936 memset(buf + len + 1, 0, padding); 937 938 ATF_REQUIRE(aead_encrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 939 &aad, sizeof(aad), buf, (char *)dst + hdr_len, len + 1 + padding, 940 (char *)dst + hdr_len + len + 1 + padding, mac_len)); 941 942 free(buf); 943 944 return (record_len); 945 } 946 947 static size_t 948 encrypt_tls_aead(struct tls_enable *en, uint8_t record_type, uint64_t seqno, 949 const void *src, size_t len, void *dst, size_t avail, size_t padding) 950 { 951 size_t record_len; 952 953 record_len = tls_header_len(en) + len + padding + tls_trailer_len(en); 954 ATF_REQUIRE(record_len <= avail); 955 956 if (en->tls_vminor == TLS_MINOR_VER_TWO) { 957 ATF_REQUIRE(padding == 0); 958 ATF_REQUIRE(encrypt_tls_12_aead(en, record_type, seqno, src, 959 len, dst) == record_len); 960 } else 961 ATF_REQUIRE(encrypt_tls_13_aead(en, record_type, seqno, src, 962 len, dst, padding) == record_len); 963 964 return (record_len); 965 } 966 967 static size_t 968 encrypt_tls_record(struct tls_enable *en, uint8_t record_type, uint64_t seqno, 969 const void *src, size_t len, void *dst, size_t avail, size_t padding) 970 { 971 return (encrypt_tls_aead(en, record_type, seqno, src, len, dst, avail, 972 padding)); 973 } 974 975 static void 976 test_ktls_transmit_app_data(const atf_tc_t *tc, struct tls_enable *en, 977 uint64_t seqno, size_t len) 978 { 979 struct kevent ev; 980 struct tls_record_layer *hdr; 981 char *plaintext, *decrypted, *outbuf; 982 size_t decrypted_len, outbuf_len, outbuf_cap, record_len, written; 983 ssize_t rv; 984 int kq, sockets[2]; 985 uint8_t record_type; 986 987 plaintext = alloc_buffer(len); 988 decrypted = malloc(len); 989 outbuf_cap = tls_header_len(en) + TLS_MAX_MSG_SIZE_V10_2 + 990 tls_trailer_len(en); 991 outbuf = malloc(outbuf_cap); 992 hdr = (struct tls_record_layer *)outbuf; 993 994 ATF_REQUIRE((kq = kqueue()) != -1); 995 996 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 997 998 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en, 999 sizeof(*en)) == 0); 1000 check_tls_mode(tc, sockets[1], TCP_TXTLS_MODE); 1001 1002 EV_SET(&ev, sockets[0], EVFILT_READ, EV_ADD, 0, 0, NULL); 1003 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 1004 EV_SET(&ev, sockets[1], EVFILT_WRITE, EV_ADD, 0, 0, NULL); 1005 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 1006 1007 decrypted_len = 0; 1008 outbuf_len = 0; 1009 written = 0; 1010 1011 while (decrypted_len != len) { 1012 ATF_REQUIRE(kevent(kq, NULL, 0, &ev, 1, NULL) == 1); 1013 1014 switch (ev.filter) { 1015 case EVFILT_WRITE: 1016 /* Try to write any remaining data. */ 1017 rv = write(ev.ident, plaintext + written, 1018 len - written); 1019 ATF_REQUIRE_MSG(rv > 0, 1020 "failed to write to socket"); 1021 written += rv; 1022 if (written == len) { 1023 ev.flags = EV_DISABLE; 1024 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, 1025 NULL) == 0); 1026 } 1027 break; 1028 1029 case EVFILT_READ: 1030 ATF_REQUIRE((ev.flags & EV_EOF) == 0); 1031 1032 /* 1033 * Try to read data for the next TLS record 1034 * into outbuf. Start by reading the header 1035 * to determine how much additional data to 1036 * read. 1037 */ 1038 if (outbuf_len < sizeof(struct tls_record_layer)) { 1039 rv = read(ev.ident, outbuf + outbuf_len, 1040 sizeof(struct tls_record_layer) - 1041 outbuf_len); 1042 ATF_REQUIRE_MSG(rv > 0, 1043 "failed to read from socket"); 1044 outbuf_len += rv; 1045 } 1046 1047 if (outbuf_len < sizeof(struct tls_record_layer)) 1048 break; 1049 1050 record_len = sizeof(struct tls_record_layer) + 1051 ntohs(hdr->tls_length); 1052 ATF_REQUIRE(record_len <= outbuf_cap); 1053 ATF_REQUIRE(record_len > outbuf_len); 1054 rv = read(ev.ident, outbuf + outbuf_len, 1055 record_len - outbuf_len); 1056 if (rv == -1 && errno == EAGAIN) 1057 break; 1058 ATF_REQUIRE_MSG(rv > 0, "failed to read from socket"); 1059 1060 outbuf_len += rv; 1061 if (outbuf_len == record_len) { 1062 decrypted_len += decrypt_tls_record(en, seqno, 1063 outbuf, outbuf_len, 1064 decrypted + decrypted_len, 1065 len - decrypted_len, &record_type); 1066 ATF_REQUIRE(record_type == TLS_RLTYPE_APP); 1067 1068 seqno++; 1069 outbuf_len = 0; 1070 } 1071 break; 1072 } 1073 } 1074 1075 ATF_REQUIRE_MSG(written == decrypted_len, 1076 "read %zu decrypted bytes, but wrote %zu", decrypted_len, written); 1077 1078 ATF_REQUIRE(memcmp(plaintext, decrypted, len) == 0); 1079 1080 free(outbuf); 1081 free(decrypted); 1082 free(plaintext); 1083 1084 close_sockets(sockets); 1085 ATF_REQUIRE(close(kq) == 0); 1086 } 1087 1088 static void 1089 ktls_send_control_message(int fd, uint8_t type, void *data, size_t len) 1090 { 1091 struct msghdr msg; 1092 struct cmsghdr *cmsg; 1093 char cbuf[CMSG_SPACE(sizeof(type))]; 1094 struct iovec iov; 1095 1096 memset(&msg, 0, sizeof(msg)); 1097 1098 msg.msg_control = cbuf; 1099 msg.msg_controllen = sizeof(cbuf); 1100 cmsg = CMSG_FIRSTHDR(&msg); 1101 cmsg->cmsg_level = IPPROTO_TCP; 1102 cmsg->cmsg_type = TLS_SET_RECORD_TYPE; 1103 cmsg->cmsg_len = CMSG_LEN(sizeof(type)); 1104 *(uint8_t *)CMSG_DATA(cmsg) = type; 1105 1106 iov.iov_base = data; 1107 iov.iov_len = len; 1108 msg.msg_iov = &iov; 1109 msg.msg_iovlen = 1; 1110 1111 ATF_REQUIRE(sendmsg(fd, &msg, 0) == (ssize_t)len); 1112 } 1113 1114 static void 1115 test_ktls_transmit_control(const atf_tc_t *tc, struct tls_enable *en, 1116 uint64_t seqno, uint8_t type, size_t len) 1117 { 1118 struct tls_record_layer *hdr; 1119 char *plaintext, *decrypted, *outbuf; 1120 size_t outbuf_cap, payload_len, record_len; 1121 ssize_t rv; 1122 int sockets[2]; 1123 uint8_t record_type; 1124 1125 ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2); 1126 1127 plaintext = alloc_buffer(len); 1128 decrypted = malloc(len); 1129 outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en); 1130 outbuf = malloc(outbuf_cap); 1131 hdr = (struct tls_record_layer *)outbuf; 1132 1133 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1134 1135 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en, 1136 sizeof(*en)) == 0); 1137 check_tls_mode(tc, sockets[1], TCP_TXTLS_MODE); 1138 1139 fd_set_blocking(sockets[0]); 1140 fd_set_blocking(sockets[1]); 1141 1142 ktls_send_control_message(sockets[1], type, plaintext, len); 1143 1144 /* 1145 * First read the header to determine how much additional data 1146 * to read. 1147 */ 1148 rv = read(sockets[0], outbuf, sizeof(struct tls_record_layer)); 1149 ATF_REQUIRE(rv == sizeof(struct tls_record_layer)); 1150 payload_len = ntohs(hdr->tls_length); 1151 record_len = payload_len + sizeof(struct tls_record_layer); 1152 ATF_REQUIRE(record_len <= outbuf_cap); 1153 rv = read(sockets[0], outbuf + sizeof(struct tls_record_layer), 1154 payload_len); 1155 ATF_REQUIRE(rv == (ssize_t)payload_len); 1156 1157 rv = decrypt_tls_record(en, seqno, outbuf, record_len, decrypted, len, 1158 &record_type); 1159 1160 ATF_REQUIRE_MSG((ssize_t)len == rv, 1161 "read %zd decrypted bytes, but wrote %zu", rv, len); 1162 ATF_REQUIRE(record_type == type); 1163 1164 ATF_REQUIRE(memcmp(plaintext, decrypted, len) == 0); 1165 1166 free(outbuf); 1167 free(decrypted); 1168 free(plaintext); 1169 1170 close_sockets(sockets); 1171 } 1172 1173 static void 1174 test_ktls_transmit_empty_fragment(const atf_tc_t *tc, struct tls_enable *en, 1175 uint64_t seqno) 1176 { 1177 struct tls_record_layer *hdr; 1178 char *outbuf; 1179 size_t outbuf_cap, payload_len, record_len; 1180 ssize_t rv; 1181 int sockets[2]; 1182 uint8_t record_type; 1183 1184 outbuf_cap = tls_header_len(en) + tls_trailer_len(en); 1185 outbuf = malloc(outbuf_cap); 1186 hdr = (struct tls_record_layer *)outbuf; 1187 1188 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1189 1190 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en, 1191 sizeof(*en)) == 0); 1192 check_tls_mode(tc, sockets[1], TCP_TXTLS_MODE); 1193 1194 fd_set_blocking(sockets[0]); 1195 fd_set_blocking(sockets[1]); 1196 1197 /* 1198 * A write of zero bytes should send an empty fragment only for 1199 * TLS 1.0, otherwise an error should be raised. 1200 */ 1201 rv = write(sockets[1], NULL, 0); 1202 if (rv == 0) { 1203 ATF_REQUIRE(en->cipher_algorithm == CRYPTO_AES_CBC); 1204 ATF_REQUIRE(en->tls_vminor == TLS_MINOR_VER_ZERO); 1205 } else { 1206 ATF_REQUIRE(rv == -1); 1207 ATF_REQUIRE(errno == EINVAL); 1208 goto out; 1209 } 1210 1211 /* 1212 * First read the header to determine how much additional data 1213 * to read. 1214 */ 1215 rv = read(sockets[0], outbuf, sizeof(struct tls_record_layer)); 1216 ATF_REQUIRE(rv == sizeof(struct tls_record_layer)); 1217 payload_len = ntohs(hdr->tls_length); 1218 record_len = payload_len + sizeof(struct tls_record_layer); 1219 ATF_REQUIRE(record_len <= outbuf_cap); 1220 rv = read(sockets[0], outbuf + sizeof(struct tls_record_layer), 1221 payload_len); 1222 ATF_REQUIRE(rv == (ssize_t)payload_len); 1223 1224 rv = decrypt_tls_record(en, seqno, outbuf, record_len, NULL, 0, 1225 &record_type); 1226 1227 ATF_REQUIRE_MSG(rv == 0, 1228 "read %zd decrypted bytes for an empty fragment", rv); 1229 ATF_REQUIRE(record_type == TLS_RLTYPE_APP); 1230 1231 out: 1232 free(outbuf); 1233 1234 close_sockets(sockets); 1235 } 1236 1237 static size_t 1238 ktls_receive_tls_record(struct tls_enable *en, int fd, uint8_t record_type, 1239 void *data, size_t len) 1240 { 1241 struct msghdr msg; 1242 struct cmsghdr *cmsg; 1243 struct tls_get_record *tgr; 1244 char cbuf[CMSG_SPACE(sizeof(*tgr))]; 1245 struct iovec iov; 1246 ssize_t rv; 1247 1248 memset(&msg, 0, sizeof(msg)); 1249 1250 msg.msg_control = cbuf; 1251 msg.msg_controllen = sizeof(cbuf); 1252 1253 iov.iov_base = data; 1254 iov.iov_len = len; 1255 msg.msg_iov = &iov; 1256 msg.msg_iovlen = 1; 1257 1258 ATF_REQUIRE((rv = recvmsg(fd, &msg, 0)) > 0); 1259 1260 ATF_REQUIRE((msg.msg_flags & (MSG_EOR | MSG_CTRUNC)) == MSG_EOR); 1261 1262 cmsg = CMSG_FIRSTHDR(&msg); 1263 ATF_REQUIRE(cmsg != NULL); 1264 ATF_REQUIRE(cmsg->cmsg_level == IPPROTO_TCP); 1265 ATF_REQUIRE(cmsg->cmsg_type == TLS_GET_RECORD); 1266 ATF_REQUIRE(cmsg->cmsg_len == CMSG_LEN(sizeof(*tgr))); 1267 1268 tgr = (struct tls_get_record *)CMSG_DATA(cmsg); 1269 ATF_REQUIRE(tgr->tls_type == record_type); 1270 ATF_REQUIRE(tgr->tls_vmajor == en->tls_vmajor); 1271 /* XXX: Not sure if this is what OpenSSL expects? */ 1272 if (en->tls_vminor == TLS_MINOR_VER_THREE) 1273 ATF_REQUIRE(tgr->tls_vminor == TLS_MINOR_VER_TWO); 1274 else 1275 ATF_REQUIRE(tgr->tls_vminor == en->tls_vminor); 1276 ATF_REQUIRE(tgr->tls_length == htons(rv)); 1277 1278 return (rv); 1279 } 1280 1281 static void 1282 test_ktls_receive_app_data(const atf_tc_t *tc, struct tls_enable *en, 1283 uint64_t seqno, size_t len, size_t padding) 1284 { 1285 struct kevent ev; 1286 char *plaintext, *received, *outbuf; 1287 size_t outbuf_cap, outbuf_len, outbuf_sent, received_len, todo, written; 1288 ssize_t rv; 1289 int kq, sockets[2]; 1290 1291 plaintext = alloc_buffer(len); 1292 received = malloc(len); 1293 outbuf_cap = tls_header_len(en) + TLS_MAX_MSG_SIZE_V10_2 + 1294 tls_trailer_len(en); 1295 outbuf = malloc(outbuf_cap); 1296 1297 ATF_REQUIRE((kq = kqueue()) != -1); 1298 1299 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1300 1301 ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1302 sizeof(*en)) == 0); 1303 check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE); 1304 1305 EV_SET(&ev, sockets[0], EVFILT_READ, EV_ADD, 0, 0, NULL); 1306 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 1307 EV_SET(&ev, sockets[1], EVFILT_WRITE, EV_ADD, 0, 0, NULL); 1308 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 1309 1310 received_len = 0; 1311 outbuf_len = 0; 1312 written = 0; 1313 1314 while (received_len != len) { 1315 ATF_REQUIRE(kevent(kq, NULL, 0, &ev, 1, NULL) == 1); 1316 1317 switch (ev.filter) { 1318 case EVFILT_WRITE: 1319 /* 1320 * Compose the next TLS record to send. 1321 */ 1322 if (outbuf_len == 0) { 1323 ATF_REQUIRE(written < len); 1324 todo = len - written; 1325 if (todo > TLS_MAX_MSG_SIZE_V10_2 - padding) 1326 todo = TLS_MAX_MSG_SIZE_V10_2 - padding; 1327 outbuf_len = encrypt_tls_record(en, 1328 TLS_RLTYPE_APP, seqno, plaintext + written, 1329 todo, outbuf, outbuf_cap, padding); 1330 outbuf_sent = 0; 1331 written += todo; 1332 seqno++; 1333 } 1334 1335 /* 1336 * Try to write the remainder of the current 1337 * TLS record. 1338 */ 1339 rv = write(ev.ident, outbuf + outbuf_sent, 1340 outbuf_len - outbuf_sent); 1341 ATF_REQUIRE_MSG(rv > 0, 1342 "failed to write to socket"); 1343 outbuf_sent += rv; 1344 if (outbuf_sent == outbuf_len) { 1345 outbuf_len = 0; 1346 if (written == len) { 1347 ev.flags = EV_DISABLE; 1348 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, 1349 NULL) == 0); 1350 } 1351 } 1352 break; 1353 1354 case EVFILT_READ: 1355 ATF_REQUIRE((ev.flags & EV_EOF) == 0); 1356 1357 rv = ktls_receive_tls_record(en, ev.ident, 1358 TLS_RLTYPE_APP, received + received_len, 1359 len - received_len); 1360 received_len += rv; 1361 break; 1362 } 1363 } 1364 1365 ATF_REQUIRE_MSG(written == received_len, 1366 "read %zu decrypted bytes, but wrote %zu", received_len, written); 1367 1368 ATF_REQUIRE(memcmp(plaintext, received, len) == 0); 1369 1370 free(outbuf); 1371 free(received); 1372 free(plaintext); 1373 1374 close_sockets(sockets); 1375 ATF_REQUIRE(close(kq) == 0); 1376 } 1377 1378 #define TLS_10_TESTS(M) \ 1379 M(aes128_cbc_1_0_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1380 CRYPTO_SHA1_HMAC) \ 1381 M(aes256_cbc_1_0_sha1, CRYPTO_AES_CBC, 256 / 8, \ 1382 CRYPTO_SHA1_HMAC) 1383 1384 #define TLS_13_TESTS(M) \ 1385 M(aes128_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1386 TLS_MINOR_VER_THREE) \ 1387 M(aes256_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0, \ 1388 TLS_MINOR_VER_THREE) \ 1389 M(chacha20_poly1305_1_3, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1390 TLS_MINOR_VER_THREE) 1391 1392 #define AES_CBC_TESTS(M) \ 1393 M(aes128_cbc_1_0_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1394 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ZERO) \ 1395 M(aes256_cbc_1_0_sha1, CRYPTO_AES_CBC, 256 / 8, \ 1396 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ZERO) \ 1397 M(aes128_cbc_1_1_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1398 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ONE) \ 1399 M(aes256_cbc_1_1_sha1, CRYPTO_AES_CBC, 256 / 8, \ 1400 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ONE) \ 1401 M(aes128_cbc_1_2_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1402 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_TWO) \ 1403 M(aes256_cbc_1_2_sha1, CRYPTO_AES_CBC, 256 / 8, \ 1404 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_TWO) \ 1405 M(aes128_cbc_1_2_sha256, CRYPTO_AES_CBC, 128 / 8, \ 1406 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_TWO) \ 1407 M(aes256_cbc_1_2_sha256, CRYPTO_AES_CBC, 256 / 8, \ 1408 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_TWO) \ 1409 M(aes128_cbc_1_2_sha384, CRYPTO_AES_CBC, 128 / 8, \ 1410 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_TWO) \ 1411 M(aes256_cbc_1_2_sha384, CRYPTO_AES_CBC, 256 / 8, \ 1412 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_TWO) \ 1413 1414 #define AES_GCM_TESTS(M) \ 1415 M(aes128_gcm_1_2, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1416 TLS_MINOR_VER_TWO) \ 1417 M(aes256_gcm_1_2, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0, \ 1418 TLS_MINOR_VER_TWO) \ 1419 M(aes128_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1420 TLS_MINOR_VER_THREE) \ 1421 M(aes256_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0, \ 1422 TLS_MINOR_VER_THREE) 1423 1424 #define CHACHA20_TESTS(M) \ 1425 M(chacha20_poly1305_1_2, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1426 TLS_MINOR_VER_TWO) \ 1427 M(chacha20_poly1305_1_3, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1428 TLS_MINOR_VER_THREE) 1429 1430 #define GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1431 auth_alg, minor, name, len) \ 1432 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_##name); \ 1433 ATF_TC_BODY(ktls_transmit_##cipher_name##_##name, tc) \ 1434 { \ 1435 struct tls_enable en; \ 1436 uint64_t seqno; \ 1437 \ 1438 ATF_REQUIRE_KTLS(); \ 1439 seqno = random(); \ 1440 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1441 &en); \ 1442 test_ktls_transmit_app_data(tc, &en, seqno, len); \ 1443 free_tls_enable(&en); \ 1444 } 1445 1446 #define ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1447 auth_alg, minor, name) \ 1448 ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_##name); 1449 1450 #define GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1451 auth_alg, minor, name, type, len) \ 1452 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_##name); \ 1453 ATF_TC_BODY(ktls_transmit_##cipher_name##_##name, tc) \ 1454 { \ 1455 struct tls_enable en; \ 1456 uint64_t seqno; \ 1457 \ 1458 ATF_REQUIRE_KTLS(); \ 1459 seqno = random(); \ 1460 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1461 &en); \ 1462 test_ktls_transmit_control(tc, &en, seqno, type, len); \ 1463 free_tls_enable(&en); \ 1464 } 1465 1466 #define ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1467 auth_alg, minor, name) \ 1468 ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_##name); 1469 1470 #define GEN_TRANSMIT_EMPTY_FRAGMENT_TEST(cipher_name, cipher_alg, \ 1471 key_size, auth_alg, minor) \ 1472 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_empty_fragment); \ 1473 ATF_TC_BODY(ktls_transmit_##cipher_name##_empty_fragment, tc) \ 1474 { \ 1475 struct tls_enable en; \ 1476 uint64_t seqno; \ 1477 \ 1478 ATF_REQUIRE_KTLS(); \ 1479 seqno = random(); \ 1480 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1481 &en); \ 1482 test_ktls_transmit_empty_fragment(tc, &en, seqno); \ 1483 free_tls_enable(&en); \ 1484 } 1485 1486 #define ADD_TRANSMIT_EMPTY_FRAGMENT_TEST(cipher_name, cipher_alg, \ 1487 key_size, auth_alg, minor) \ 1488 ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_empty_fragment); 1489 1490 #define GEN_TRANSMIT_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 1491 minor) \ 1492 GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1493 auth_alg, minor, short, 64) \ 1494 GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1495 auth_alg, minor, long, 64 * 1024) \ 1496 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1497 auth_alg, minor, control, 0x21 /* Alert */, 32) 1498 1499 #define ADD_TRANSMIT_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 1500 minor) \ 1501 ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1502 auth_alg, minor, short) \ 1503 ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1504 auth_alg, minor, long) \ 1505 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1506 auth_alg, minor, control) 1507 1508 /* 1509 * For each supported cipher suite, run three transmit tests: 1510 * 1511 * - a short test which sends 64 bytes of application data (likely as 1512 * a single TLS record) 1513 * 1514 * - a long test which sends 64KB of application data (split across 1515 * multiple TLS records) 1516 * 1517 * - a control test which sends a single record with a specific 1518 * content type via sendmsg() 1519 */ 1520 AES_CBC_TESTS(GEN_TRANSMIT_TESTS); 1521 AES_GCM_TESTS(GEN_TRANSMIT_TESTS); 1522 CHACHA20_TESTS(GEN_TRANSMIT_TESTS); 1523 1524 #define GEN_TRANSMIT_PADDING_TESTS(cipher_name, cipher_alg, key_size, \ 1525 auth_alg, minor) \ 1526 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1527 auth_alg, minor, padding_1, 0x21 /* Alert */, 1) \ 1528 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1529 auth_alg, minor, padding_2, 0x21 /* Alert */, 2) \ 1530 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1531 auth_alg, minor, padding_3, 0x21 /* Alert */, 3) \ 1532 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1533 auth_alg, minor, padding_4, 0x21 /* Alert */, 4) \ 1534 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1535 auth_alg, minor, padding_5, 0x21 /* Alert */, 5) \ 1536 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1537 auth_alg, minor, padding_6, 0x21 /* Alert */, 6) \ 1538 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1539 auth_alg, minor, padding_7, 0x21 /* Alert */, 7) \ 1540 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1541 auth_alg, minor, padding_8, 0x21 /* Alert */, 8) \ 1542 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1543 auth_alg, minor, padding_9, 0x21 /* Alert */, 9) \ 1544 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1545 auth_alg, minor, padding_10, 0x21 /* Alert */, 10) \ 1546 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1547 auth_alg, minor, padding_11, 0x21 /* Alert */, 11) \ 1548 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1549 auth_alg, minor, padding_12, 0x21 /* Alert */, 12) \ 1550 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1551 auth_alg, minor, padding_13, 0x21 /* Alert */, 13) \ 1552 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1553 auth_alg, minor, padding_14, 0x21 /* Alert */, 14) \ 1554 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1555 auth_alg, minor, padding_15, 0x21 /* Alert */, 15) \ 1556 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1557 auth_alg, minor, padding_16, 0x21 /* Alert */, 16) 1558 1559 #define ADD_TRANSMIT_PADDING_TESTS(cipher_name, cipher_alg, key_size, \ 1560 auth_alg, minor) \ 1561 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1562 auth_alg, minor, padding_1) \ 1563 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1564 auth_alg, minor, padding_2) \ 1565 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1566 auth_alg, minor, padding_3) \ 1567 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1568 auth_alg, minor, padding_4) \ 1569 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1570 auth_alg, minor, padding_5) \ 1571 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1572 auth_alg, minor, padding_6) \ 1573 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1574 auth_alg, minor, padding_7) \ 1575 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1576 auth_alg, minor, padding_8) \ 1577 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1578 auth_alg, minor, padding_9) \ 1579 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1580 auth_alg, minor, padding_10) \ 1581 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1582 auth_alg, minor, padding_11) \ 1583 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1584 auth_alg, minor, padding_12) \ 1585 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1586 auth_alg, minor, padding_13) \ 1587 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1588 auth_alg, minor, padding_14) \ 1589 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1590 auth_alg, minor, padding_15) \ 1591 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1592 auth_alg, minor, padding_16) 1593 1594 /* 1595 * For AES-CBC MTE cipher suites using padding, add tests of messages 1596 * with each possible padding size. Note that the padding_<N> tests 1597 * do not necessarily test <N> bytes of padding as the padding is a 1598 * function of the cipher suite's MAC length. However, cycling 1599 * through all of the payload sizes from 1 to 16 should exercise all 1600 * of the possible padding lengths for each suite. 1601 */ 1602 AES_CBC_TESTS(GEN_TRANSMIT_PADDING_TESTS); 1603 1604 /* 1605 * Test "empty fragments" which are TLS records with no payload that 1606 * OpenSSL can send for TLS 1.0 connections. 1607 */ 1608 AES_CBC_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST); 1609 AES_GCM_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST); 1610 CHACHA20_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST); 1611 1612 static void 1613 test_ktls_invalid_transmit_cipher_suite(const atf_tc_t *tc, 1614 struct tls_enable *en) 1615 { 1616 int sockets[2]; 1617 1618 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1619 1620 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en, 1621 sizeof(*en)) == -1); 1622 ATF_REQUIRE(errno == EINVAL); 1623 1624 close_sockets(sockets); 1625 } 1626 1627 #define GEN_INVALID_TRANSMIT_TEST(name, cipher_alg, key_size, auth_alg, \ 1628 minor) \ 1629 ATF_TC_WITHOUT_HEAD(ktls_transmit_invalid_##name); \ 1630 ATF_TC_BODY(ktls_transmit_invalid_##name, tc) \ 1631 { \ 1632 struct tls_enable en; \ 1633 uint64_t seqno; \ 1634 \ 1635 ATF_REQUIRE_KTLS(); \ 1636 seqno = random(); \ 1637 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1638 &en); \ 1639 test_ktls_invalid_transmit_cipher_suite(tc, &en); \ 1640 free_tls_enable(&en); \ 1641 } 1642 1643 #define ADD_INVALID_TRANSMIT_TEST(name, cipher_alg, key_size, auth_alg, \ 1644 minor) \ 1645 ATF_TP_ADD_TC(tp, ktls_transmit_invalid_##name); 1646 1647 #define INVALID_CIPHER_SUITES(M) \ 1648 M(aes128_cbc_1_0_sha256, CRYPTO_AES_CBC, 128 / 8, \ 1649 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_ZERO) \ 1650 M(aes128_cbc_1_0_sha384, CRYPTO_AES_CBC, 128 / 8, \ 1651 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_ZERO) \ 1652 M(aes128_gcm_1_0, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1653 TLS_MINOR_VER_ZERO) \ 1654 M(chacha20_poly1305_1_0, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1655 TLS_MINOR_VER_ZERO) \ 1656 M(aes128_cbc_1_1_sha256, CRYPTO_AES_CBC, 128 / 8, \ 1657 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_ONE) \ 1658 M(aes128_cbc_1_1_sha384, CRYPTO_AES_CBC, 128 / 8, \ 1659 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_ONE) \ 1660 M(aes128_gcm_1_1, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1661 TLS_MINOR_VER_ONE) \ 1662 M(chacha20_poly1305_1_1, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1663 TLS_MINOR_VER_ONE) \ 1664 M(aes128_cbc_1_3_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1665 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_THREE) \ 1666 M(aes128_cbc_1_3_sha256, CRYPTO_AES_CBC, 128 / 8, \ 1667 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_THREE) \ 1668 M(aes128_cbc_1_3_sha384, CRYPTO_AES_CBC, 128 / 8, \ 1669 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_THREE) 1670 1671 /* 1672 * Ensure that invalid cipher suites are rejected for transmit. 1673 */ 1674 INVALID_CIPHER_SUITES(GEN_INVALID_TRANSMIT_TEST); 1675 1676 #define GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1677 auth_alg, minor, name, len, padding) \ 1678 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_##name); \ 1679 ATF_TC_BODY(ktls_receive_##cipher_name##_##name, tc) \ 1680 { \ 1681 struct tls_enable en; \ 1682 uint64_t seqno; \ 1683 \ 1684 ATF_REQUIRE_KTLS(); \ 1685 seqno = random(); \ 1686 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1687 &en); \ 1688 test_ktls_receive_app_data(tc, &en, seqno, len, padding); \ 1689 free_tls_enable(&en); \ 1690 } 1691 1692 #define ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1693 auth_alg, minor, name) \ 1694 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_##name); 1695 1696 #define GEN_RECEIVE_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 1697 minor) \ 1698 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1699 auth_alg, minor, short, 64, 0) \ 1700 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1701 auth_alg, minor, long, 64 * 1024, 0) 1702 1703 #define ADD_RECEIVE_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 1704 minor) \ 1705 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1706 auth_alg, minor, short) \ 1707 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1708 auth_alg, minor, long) 1709 1710 /* 1711 * For each supported cipher suite, run two receive tests: 1712 * 1713 * - a short test which sends 64 bytes of application data (likely as 1714 * a single TLS record) 1715 * 1716 * - a long test which sends 64KB of application data (split across 1717 * multiple TLS records) 1718 * 1719 * Note that receive is currently only supported for TLS 1.2 AEAD 1720 * cipher suites. 1721 */ 1722 AES_GCM_TESTS(GEN_RECEIVE_TESTS); 1723 CHACHA20_TESTS(GEN_RECEIVE_TESTS); 1724 1725 #define GEN_PADDING_RECEIVE_TESTS(cipher_name, cipher_alg, key_size, \ 1726 auth_alg, minor) \ 1727 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1728 auth_alg, minor, short_padded, 64, 16) \ 1729 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1730 auth_alg, minor, long_padded, 64 * 1024, 15) 1731 1732 #define ADD_PADDING_RECEIVE_TESTS(cipher_name, cipher_alg, key_size, \ 1733 auth_alg, minor) \ 1734 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1735 auth_alg, minor, short_padded) \ 1736 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1737 auth_alg, minor, long_padded) 1738 1739 /* 1740 * For TLS 1.3 cipher suites, run two additional receive tests which 1741 * use add padding to each record. 1742 */ 1743 TLS_13_TESTS(GEN_PADDING_RECEIVE_TESTS); 1744 1745 static void 1746 test_ktls_invalid_receive_cipher_suite(const atf_tc_t *tc, 1747 struct tls_enable *en) 1748 { 1749 int sockets[2]; 1750 1751 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1752 1753 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1754 sizeof(*en)) == -1); 1755 ATF_REQUIRE(errno == EINVAL); 1756 1757 close_sockets(sockets); 1758 } 1759 1760 #define GEN_INVALID_RECEIVE_TEST(name, cipher_alg, key_size, auth_alg, \ 1761 minor) \ 1762 ATF_TC_WITHOUT_HEAD(ktls_receive_invalid_##name); \ 1763 ATF_TC_BODY(ktls_receive_invalid_##name, tc) \ 1764 { \ 1765 struct tls_enable en; \ 1766 uint64_t seqno; \ 1767 \ 1768 ATF_REQUIRE_KTLS(); \ 1769 seqno = random(); \ 1770 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1771 &en); \ 1772 test_ktls_invalid_receive_cipher_suite(tc, &en); \ 1773 free_tls_enable(&en); \ 1774 } 1775 1776 #define ADD_INVALID_RECEIVE_TEST(name, cipher_alg, key_size, auth_alg, \ 1777 minor) \ 1778 ATF_TP_ADD_TC(tp, ktls_receive_invalid_##name); 1779 1780 /* 1781 * Ensure that invalid cipher suites are rejected for receive. 1782 */ 1783 INVALID_CIPHER_SUITES(GEN_INVALID_RECEIVE_TEST); 1784 1785 static void 1786 test_ktls_unsupported_receive_cipher_suite(const atf_tc_t *tc, 1787 struct tls_enable *en) 1788 { 1789 int sockets[2]; 1790 1791 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1792 1793 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1794 sizeof(*en)) == -1); 1795 ATF_REQUIRE(errno == EPROTONOSUPPORT); 1796 1797 close_sockets(sockets); 1798 } 1799 1800 #define GEN_UNSUPPORTED_RECEIVE_TEST(name, cipher_alg, key_size, \ 1801 auth_alg, minor) \ 1802 ATF_TC_WITHOUT_HEAD(ktls_receive_unsupported_##name); \ 1803 ATF_TC_BODY(ktls_receive_unsupported_##name, tc) \ 1804 { \ 1805 struct tls_enable en; \ 1806 uint64_t seqno; \ 1807 \ 1808 ATF_REQUIRE_KTLS(); \ 1809 seqno = random(); \ 1810 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1811 &en); \ 1812 test_ktls_unsupported_receive_cipher_suite(tc, &en); \ 1813 free_tls_enable(&en); \ 1814 } 1815 1816 #define ADD_UNSUPPORTED_RECEIVE_TEST(name, cipher_alg, key_size, \ 1817 auth_alg, minor) \ 1818 ATF_TP_ADD_TC(tp, ktls_receive_unsupported_##name); 1819 1820 /* 1821 * Ensure that valid cipher suites not supported for receive are 1822 * rejected. 1823 */ 1824 AES_CBC_TESTS(GEN_UNSUPPORTED_RECEIVE_TEST); 1825 1826 /* 1827 * Try to perform an invalid sendto(2) on a TXTLS-enabled socket, to exercise 1828 * KTLS error handling in the socket layer. 1829 */ 1830 ATF_TC_WITHOUT_HEAD(ktls_sendto_baddst); 1831 ATF_TC_BODY(ktls_sendto_baddst, tc) 1832 { 1833 char buf[32]; 1834 struct sockaddr_in dst; 1835 struct tls_enable en; 1836 ssize_t n; 1837 int s; 1838 1839 ATF_REQUIRE_KTLS(); 1840 1841 s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); 1842 ATF_REQUIRE(s >= 0); 1843 1844 build_tls_enable(CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, 1845 TLS_MINOR_VER_THREE, (uint64_t)random(), &en); 1846 1847 ATF_REQUIRE(setsockopt(s, IPPROTO_TCP, TCP_TXTLS_ENABLE, &en, 1848 sizeof(en)) == 0); 1849 1850 memset(&dst, 0, sizeof(dst)); 1851 dst.sin_family = AF_INET; 1852 dst.sin_len = sizeof(dst); 1853 dst.sin_addr.s_addr = htonl(INADDR_BROADCAST); 1854 dst.sin_port = htons(12345); 1855 1856 memset(buf, 0, sizeof(buf)); 1857 n = sendto(s, buf, sizeof(buf), 0, (struct sockaddr *)&dst, 1858 sizeof(dst)); 1859 1860 /* Can't transmit to the broadcast address over TCP. */ 1861 ATF_REQUIRE_ERRNO(EACCES, n == -1); 1862 ATF_REQUIRE(close(s) == 0); 1863 } 1864 1865 ATF_TP_ADD_TCS(tp) 1866 { 1867 /* Transmit tests */ 1868 AES_CBC_TESTS(ADD_TRANSMIT_TESTS); 1869 AES_GCM_TESTS(ADD_TRANSMIT_TESTS); 1870 CHACHA20_TESTS(ADD_TRANSMIT_TESTS); 1871 AES_CBC_TESTS(ADD_TRANSMIT_PADDING_TESTS); 1872 AES_CBC_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST); 1873 AES_GCM_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST); 1874 CHACHA20_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST); 1875 INVALID_CIPHER_SUITES(ADD_INVALID_TRANSMIT_TEST); 1876 1877 /* Receive tests */ 1878 AES_CBC_TESTS(ADD_UNSUPPORTED_RECEIVE_TEST); 1879 AES_GCM_TESTS(ADD_RECEIVE_TESTS); 1880 CHACHA20_TESTS(ADD_RECEIVE_TESTS); 1881 TLS_13_TESTS(ADD_PADDING_RECEIVE_TESTS); 1882 INVALID_CIPHER_SUITES(ADD_INVALID_RECEIVE_TEST); 1883 1884 /* Miscellaneous */ 1885 ATF_TP_ADD_TC(tp, ktls_sendto_baddst); 1886 1887 return (atf_no_error()); 1888 } 1889