xref: /freebsd/tests/sys/kern/ktls_test.c (revision 883d1742d3d74635afc3ae2d8208974ac51e3dd9)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Netflix Inc.
5  * Written by: John Baldwin <jhb@FreeBSD.org>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/endian.h>
31 #include <sys/event.h>
32 #include <sys/ktls.h>
33 #include <sys/socket.h>
34 #include <sys/sysctl.h>
35 #include <netinet/in.h>
36 #include <netinet/tcp.h>
37 #include <crypto/cryptodev.h>
38 #include <assert.h>
39 #include <err.h>
40 #include <fcntl.h>
41 #include <libutil.h>
42 #include <netdb.h>
43 #include <poll.h>
44 #include <stdbool.h>
45 #include <stdlib.h>
46 #include <atf-c.h>
47 
48 #include <openssl/err.h>
49 #include <openssl/evp.h>
50 #include <openssl/hmac.h>
51 
52 static void
53 require_ktls(void)
54 {
55 	size_t len;
56 	bool enable;
57 
58 	len = sizeof(enable);
59 	if (sysctlbyname("kern.ipc.tls.enable", &enable, &len, NULL, 0) == -1) {
60 		if (errno == ENOENT)
61 			atf_tc_skip("kernel does not support TLS offload");
62 		atf_libc_error(errno, "Failed to read kern.ipc.tls.enable");
63 	}
64 
65 	if (!enable)
66 		atf_tc_skip("Kernel TLS is disabled");
67 }
68 
69 #define	ATF_REQUIRE_KTLS()	require_ktls()
70 
71 static void
72 check_tls_mode(const atf_tc_t *tc, int s, int sockopt)
73 {
74 	if (atf_tc_get_config_var_as_bool_wd(tc, "ktls.require_ifnet", false)) {
75 		socklen_t len;
76 		int mode;
77 
78 		len = sizeof(mode);
79 		if (getsockopt(s, IPPROTO_TCP, sockopt, &mode, &len) == -1)
80 			atf_libc_error(errno, "Failed to fetch TLS mode");
81 
82 		if (mode != TCP_TLS_MODE_IFNET)
83 			atf_tc_skip("connection did not use ifnet TLS");
84 	}
85 
86 	if (atf_tc_get_config_var_as_bool_wd(tc, "ktls.require_toe", false)) {
87 		socklen_t len;
88 		int mode;
89 
90 		len = sizeof(mode);
91 		if (getsockopt(s, IPPROTO_TCP, sockopt, &mode, &len) == -1)
92 			atf_libc_error(errno, "Failed to fetch TLS mode");
93 
94 		if (mode != TCP_TLS_MODE_TOE)
95 			atf_tc_skip("connection did not use TOE TLS");
96 	}
97 }
98 
99 static void __printflike(2, 3)
100 debug(const atf_tc_t *tc, const char *fmt, ...)
101 {
102 	if (!atf_tc_get_config_var_as_bool_wd(tc, "ktls.debug", false))
103 		return;
104 
105 	va_list ap;
106 	va_start(ap, fmt);
107 	vprintf(fmt, ap);
108 	va_end(ap);
109 }
110 
111 static void
112 debug_hexdump(const atf_tc_t *tc, const void *buf, int length,
113     const char *label)
114 {
115 	if (!atf_tc_get_config_var_as_bool_wd(tc, "ktls.debug", false))
116 		return;
117 
118 	if (label != NULL)
119 		printf("%s:\n", label);
120 	hexdump(buf, length, NULL, 0);
121 }
122 
123 static char
124 rdigit(void)
125 {
126 	/* ASCII printable values between 0x20 and 0x7e */
127 	return (0x20 + random() % (0x7f - 0x20));
128 }
129 
130 static char *
131 alloc_buffer(size_t len)
132 {
133 	char *buf;
134 	size_t i;
135 
136 	if (len == 0)
137 		return (NULL);
138 	buf = malloc(len);
139 	for (i = 0; i < len; i++)
140 		buf[i] = rdigit();
141 	return (buf);
142 }
143 
144 static bool
145 socketpair_tcp(int sv[2])
146 {
147 	struct pollfd pfd;
148 	struct sockaddr_in sin;
149 	socklen_t len;
150 	int as, cs, ls;
151 
152 	ls = socket(PF_INET, SOCK_STREAM, 0);
153 	if (ls == -1) {
154 		warn("socket() for listen");
155 		return (false);
156 	}
157 
158 	memset(&sin, 0, sizeof(sin));
159 	sin.sin_len = sizeof(sin);
160 	sin.sin_family = AF_INET;
161 	sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
162 	if (bind(ls, (struct sockaddr *)&sin, sizeof(sin)) == -1) {
163 		warn("bind");
164 		close(ls);
165 		return (false);
166 	}
167 
168 	if (listen(ls, 1) == -1) {
169 		warn("listen");
170 		close(ls);
171 		return (false);
172 	}
173 
174 	len = sizeof(sin);
175 	if (getsockname(ls, (struct sockaddr *)&sin, &len) == -1) {
176 		warn("getsockname");
177 		close(ls);
178 		return (false);
179 	}
180 
181 	cs = socket(PF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0);
182 	if (cs == -1) {
183 		warn("socket() for connect");
184 		close(ls);
185 		return (false);
186 	}
187 
188 	if (connect(cs, (struct sockaddr *)&sin, sizeof(sin)) == -1) {
189 		if (errno != EINPROGRESS) {
190 			warn("connect");
191 			close(ls);
192 			close(cs);
193 			return (false);
194 		}
195 	}
196 
197 	as = accept4(ls, NULL, NULL, SOCK_NONBLOCK);
198 	if (as == -1) {
199 		warn("accept4");
200 		close(ls);
201 		close(cs);
202 		return (false);
203 	}
204 
205 	close(ls);
206 
207 	pfd.fd = cs;
208 	pfd.events = POLLOUT;
209 	pfd.revents = 0;
210 	ATF_REQUIRE_INTEQ(1, poll(&pfd, 1, INFTIM));
211 	ATF_REQUIRE_INTEQ(POLLOUT, pfd.revents);
212 
213 	sv[0] = cs;
214 	sv[1] = as;
215 	return (true);
216 }
217 
218 static bool
219 echo_socket(const atf_tc_t *tc, int sv[2])
220 {
221 	const char *cause, *host, *port;
222 	struct addrinfo hints, *ai, *tofree;
223 	int error, flags, s;
224 
225 	host = atf_tc_get_config_var(tc, "ktls.host");
226 	port = atf_tc_get_config_var_wd(tc, "ktls.port", "echo");
227 	memset(&hints, 0, sizeof(hints));
228 	hints.ai_family = AF_UNSPEC;
229 	hints.ai_socktype = SOCK_STREAM;
230 	hints.ai_protocol = IPPROTO_TCP;
231 	error = getaddrinfo(host, port, &hints, &tofree);
232 	if (error != 0) {
233 		warnx("getaddrinfo(%s:%s) failed: %s", host, port,
234 		    gai_strerror(error));
235 		return (false);
236 	}
237 
238 	cause = NULL;
239 	for (ai = tofree; ai != NULL; ai = ai->ai_next) {
240 		s = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol);
241 		if (s == -1) {
242 			cause = "socket";
243 			error = errno;
244 			continue;
245 		}
246 
247 		if (connect(s, ai->ai_addr, ai->ai_addrlen) == -1) {
248 			cause = "connect";
249 			error = errno;
250 			close(s);
251 			continue;
252 		}
253 
254 		freeaddrinfo(tofree);
255 
256 		ATF_REQUIRE((flags = fcntl(s, F_GETFL)) != -1);
257 		flags |= O_NONBLOCK;
258 		ATF_REQUIRE(fcntl(s, F_SETFL, flags) != -1);
259 
260 		sv[0] = s;
261 		sv[1] = s;
262 		return (true);
263 	}
264 
265 	warnc(error, "%s", cause);
266 	freeaddrinfo(tofree);
267 	return (false);
268 }
269 
270 static bool
271 open_sockets(const atf_tc_t *tc, int sv[2])
272 {
273 	if (atf_tc_has_config_var(tc, "ktls.host"))
274 		return (echo_socket(tc, sv));
275 	else
276 		return (socketpair_tcp(sv));
277 }
278 
279 static void
280 close_sockets(int sv[2])
281 {
282 	if (sv[0] != sv[1])
283 		ATF_REQUIRE(close(sv[1]) == 0);
284 	ATF_REQUIRE(close(sv[0]) == 0);
285 }
286 
287 static void
288 fd_set_blocking(int fd)
289 {
290 	int flags;
291 
292 	ATF_REQUIRE((flags = fcntl(fd, F_GETFL)) != -1);
293 	flags &= ~O_NONBLOCK;
294 	ATF_REQUIRE(fcntl(fd, F_SETFL, flags) != -1);
295 }
296 
297 static bool
298 cbc_crypt(const EVP_CIPHER *cipher, const char *key, const char *iv,
299     const char *input, char *output, size_t size, int enc)
300 {
301 	EVP_CIPHER_CTX *ctx;
302 	int outl, total;
303 
304 	ctx = EVP_CIPHER_CTX_new();
305 	if (ctx == NULL) {
306 		warnx("EVP_CIPHER_CTX_new failed: %s",
307 		    ERR_error_string(ERR_get_error(), NULL));
308 		return (false);
309 	}
310 	if (EVP_CipherInit_ex(ctx, cipher, NULL, (const u_char *)key,
311 	    (const u_char *)iv, enc) != 1) {
312 		warnx("EVP_CipherInit_ex failed: %s",
313 		    ERR_error_string(ERR_get_error(), NULL));
314 		EVP_CIPHER_CTX_free(ctx);
315 		return (false);
316 	}
317 	EVP_CIPHER_CTX_set_padding(ctx, 0);
318 	if (EVP_CipherUpdate(ctx, (u_char *)output, &outl,
319 	    (const u_char *)input, size) != 1) {
320 		warnx("EVP_CipherUpdate failed: %s",
321 		    ERR_error_string(ERR_get_error(), NULL));
322 		EVP_CIPHER_CTX_free(ctx);
323 		return (false);
324 	}
325 	total = outl;
326 	if (EVP_CipherFinal_ex(ctx, (u_char *)output + outl, &outl) != 1) {
327 		warnx("EVP_CipherFinal_ex failed: %s",
328 		    ERR_error_string(ERR_get_error(), NULL));
329 		EVP_CIPHER_CTX_free(ctx);
330 		return (false);
331 	}
332 	total += outl;
333 	if ((size_t)total != size) {
334 		warnx("decrypt size mismatch: %zu vs %d", size, total);
335 		EVP_CIPHER_CTX_free(ctx);
336 		return (false);
337 	}
338 	EVP_CIPHER_CTX_free(ctx);
339 	return (true);
340 }
341 
342 static bool
343 cbc_encrypt(const EVP_CIPHER *cipher, const char *key, const char *iv,
344     const char *input, char *output, size_t size)
345 {
346 	return (cbc_crypt(cipher, key, iv, input, output, size, 1));
347 }
348 
349 static bool
350 cbc_decrypt(const EVP_CIPHER *cipher, const char *key, const char *iv,
351     const char *input, char *output, size_t size)
352 {
353 	return (cbc_crypt(cipher, key, iv, input, output, size, 0));
354 }
355 
356 static bool
357 compute_hash(const EVP_MD *md, const void *key, size_t key_len, const void *aad,
358     size_t aad_len, const void *buffer, size_t len, void *digest,
359     u_int *digest_len)
360 {
361 	HMAC_CTX *ctx;
362 
363 	ctx = HMAC_CTX_new();
364 	if (ctx == NULL) {
365 		warnx("HMAC_CTX_new failed: %s",
366 		    ERR_error_string(ERR_get_error(), NULL));
367 		return (false);
368 	}
369 	if (HMAC_Init_ex(ctx, key, key_len, md, NULL) != 1) {
370 		warnx("HMAC_Init_ex failed: %s",
371 		    ERR_error_string(ERR_get_error(), NULL));
372 		HMAC_CTX_free(ctx);
373 		return (false);
374 	}
375 	if (HMAC_Update(ctx, aad, aad_len) != 1) {
376 		warnx("HMAC_Update (aad) failed: %s",
377 		    ERR_error_string(ERR_get_error(), NULL));
378 		HMAC_CTX_free(ctx);
379 		return (false);
380 	}
381 	if (HMAC_Update(ctx, buffer, len) != 1) {
382 		warnx("HMAC_Update (payload) failed: %s",
383 		    ERR_error_string(ERR_get_error(), NULL));
384 		HMAC_CTX_free(ctx);
385 		return (false);
386 	}
387 	if (HMAC_Final(ctx, digest, digest_len) != 1) {
388 		warnx("HMAC_Final failed: %s",
389 		    ERR_error_string(ERR_get_error(), NULL));
390 		HMAC_CTX_free(ctx);
391 		return (false);
392 	}
393 	HMAC_CTX_free(ctx);
394 	return (true);
395 }
396 
397 static bool
398 verify_hash(const EVP_MD *md, const void *key, size_t key_len, const void *aad,
399     size_t aad_len, const void *buffer, size_t len, const void *digest)
400 {
401 	unsigned char digest2[EVP_MAX_MD_SIZE];
402 	u_int digest_len;
403 
404 	if (!compute_hash(md, key, key_len, aad, aad_len, buffer, len, digest2,
405 	    &digest_len))
406 		return (false);
407 	if (memcmp(digest, digest2, digest_len) != 0) {
408 		warnx("HMAC mismatch");
409 		return (false);
410 	}
411 	return (true);
412 }
413 
414 static bool
415 aead_encrypt(const EVP_CIPHER *cipher, const char *key, const char *nonce,
416     const void *aad, size_t aad_len, const char *input, char *output,
417     size_t size, char *tag, size_t tag_len)
418 {
419 	EVP_CIPHER_CTX *ctx;
420 	int outl, total;
421 
422 	ctx = EVP_CIPHER_CTX_new();
423 	if (ctx == NULL) {
424 		warnx("EVP_CIPHER_CTX_new failed: %s",
425 		    ERR_error_string(ERR_get_error(), NULL));
426 		return (false);
427 	}
428 	if (EVP_EncryptInit_ex(ctx, cipher, NULL, (const u_char *)key,
429 	    (const u_char *)nonce) != 1) {
430 		warnx("EVP_EncryptInit_ex failed: %s",
431 		    ERR_error_string(ERR_get_error(), NULL));
432 		EVP_CIPHER_CTX_free(ctx);
433 		return (false);
434 	}
435 	EVP_CIPHER_CTX_set_padding(ctx, 0);
436 	if (aad != NULL) {
437 		if (EVP_EncryptUpdate(ctx, NULL, &outl, (const u_char *)aad,
438 		    aad_len) != 1) {
439 			warnx("EVP_EncryptUpdate for AAD failed: %s",
440 			    ERR_error_string(ERR_get_error(), NULL));
441 			EVP_CIPHER_CTX_free(ctx);
442 			return (false);
443 		}
444 	}
445 	if (EVP_EncryptUpdate(ctx, (u_char *)output, &outl,
446 	    (const u_char *)input, size) != 1) {
447 		warnx("EVP_EncryptUpdate failed: %s",
448 		    ERR_error_string(ERR_get_error(), NULL));
449 		EVP_CIPHER_CTX_free(ctx);
450 		return (false);
451 	}
452 	total = outl;
453 	if (EVP_EncryptFinal_ex(ctx, (u_char *)output + outl, &outl) != 1) {
454 		warnx("EVP_EncryptFinal_ex failed: %s",
455 		    ERR_error_string(ERR_get_error(), NULL));
456 		EVP_CIPHER_CTX_free(ctx);
457 		return (false);
458 	}
459 	total += outl;
460 	if ((size_t)total != size) {
461 		warnx("encrypt size mismatch: %zu vs %d", size, total);
462 		EVP_CIPHER_CTX_free(ctx);
463 		return (false);
464 	}
465 	if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, tag_len, tag) !=
466 	    1) {
467 		warnx("EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_GET_TAG) failed: %s",
468 		    ERR_error_string(ERR_get_error(), NULL));
469 		EVP_CIPHER_CTX_free(ctx);
470 		return (false);
471 	}
472 	EVP_CIPHER_CTX_free(ctx);
473 	return (true);
474 }
475 
476 static bool
477 aead_decrypt(const EVP_CIPHER *cipher, const char *key, const char *nonce,
478     const void *aad, size_t aad_len, const char *input, char *output,
479     size_t size, const char *tag, size_t tag_len)
480 {
481 	EVP_CIPHER_CTX *ctx;
482 	int outl, total;
483 	bool valid;
484 
485 	ctx = EVP_CIPHER_CTX_new();
486 	if (ctx == NULL) {
487 		warnx("EVP_CIPHER_CTX_new failed: %s",
488 		    ERR_error_string(ERR_get_error(), NULL));
489 		return (false);
490 	}
491 	if (EVP_DecryptInit_ex(ctx, cipher, NULL, (const u_char *)key,
492 	    (const u_char *)nonce) != 1) {
493 		warnx("EVP_DecryptInit_ex failed: %s",
494 		    ERR_error_string(ERR_get_error(), NULL));
495 		EVP_CIPHER_CTX_free(ctx);
496 		return (false);
497 	}
498 	EVP_CIPHER_CTX_set_padding(ctx, 0);
499 	if (aad != NULL) {
500 		if (EVP_DecryptUpdate(ctx, NULL, &outl, (const u_char *)aad,
501 		    aad_len) != 1) {
502 			warnx("EVP_DecryptUpdate for AAD failed: %s",
503 			    ERR_error_string(ERR_get_error(), NULL));
504 			EVP_CIPHER_CTX_free(ctx);
505 			return (false);
506 		}
507 	}
508 	if (EVP_DecryptUpdate(ctx, (u_char *)output, &outl,
509 	    (const u_char *)input, size) != 1) {
510 		warnx("EVP_DecryptUpdate failed: %s",
511 		    ERR_error_string(ERR_get_error(), NULL));
512 		EVP_CIPHER_CTX_free(ctx);
513 		return (false);
514 	}
515 	total = outl;
516 	if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, tag_len,
517 	    __DECONST(char *, tag)) != 1) {
518 		warnx("EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_TAG) failed: %s",
519 		    ERR_error_string(ERR_get_error(), NULL));
520 		EVP_CIPHER_CTX_free(ctx);
521 		return (false);
522 	}
523 	valid = (EVP_DecryptFinal_ex(ctx, (u_char *)output + outl, &outl) == 1);
524 	total += outl;
525 	if ((size_t)total != size) {
526 		warnx("decrypt size mismatch: %zu vs %d", size, total);
527 		EVP_CIPHER_CTX_free(ctx);
528 		return (false);
529 	}
530 	if (!valid)
531 		warnx("tag mismatch");
532 	EVP_CIPHER_CTX_free(ctx);
533 	return (valid);
534 }
535 
536 static void
537 build_tls_enable(const atf_tc_t *tc, int cipher_alg, size_t cipher_key_len,
538     int auth_alg, int minor, uint64_t seqno, struct tls_enable *en)
539 {
540 	u_int auth_key_len, iv_len;
541 
542 	memset(en, 0, sizeof(*en));
543 
544 	switch (cipher_alg) {
545 	case CRYPTO_AES_CBC:
546 		if (minor == TLS_MINOR_VER_ZERO)
547 			iv_len = AES_BLOCK_LEN;
548 		else
549 			iv_len = 0;
550 		break;
551 	case CRYPTO_AES_NIST_GCM_16:
552 		if (minor == TLS_MINOR_VER_TWO)
553 			iv_len = TLS_AEAD_GCM_LEN;
554 		else
555 			iv_len = TLS_1_3_GCM_IV_LEN;
556 		break;
557 	case CRYPTO_CHACHA20_POLY1305:
558 		iv_len = TLS_CHACHA20_IV_LEN;
559 		break;
560 	default:
561 		iv_len = 0;
562 		break;
563 	}
564 	switch (auth_alg) {
565 	case CRYPTO_SHA1_HMAC:
566 		auth_key_len = SHA1_HASH_LEN;
567 		break;
568 	case CRYPTO_SHA2_256_HMAC:
569 		auth_key_len = SHA2_256_HASH_LEN;
570 		break;
571 	case CRYPTO_SHA2_384_HMAC:
572 		auth_key_len = SHA2_384_HASH_LEN;
573 		break;
574 	default:
575 		auth_key_len = 0;
576 		break;
577 	}
578 	en->cipher_key = alloc_buffer(cipher_key_len);
579 	debug_hexdump(tc, en->cipher_key, cipher_key_len, "cipher key");
580 	en->iv = alloc_buffer(iv_len);
581 	if (iv_len != 0)
582 		debug_hexdump(tc, en->iv, iv_len, "iv");
583 	en->auth_key = alloc_buffer(auth_key_len);
584 	if (auth_key_len != 0)
585 		debug_hexdump(tc, en->auth_key, auth_key_len, "auth key");
586 	en->cipher_algorithm = cipher_alg;
587 	en->cipher_key_len = cipher_key_len;
588 	en->iv_len = iv_len;
589 	en->auth_algorithm = auth_alg;
590 	en->auth_key_len = auth_key_len;
591 	en->tls_vmajor = TLS_MAJOR_VER_ONE;
592 	en->tls_vminor = minor;
593 	be64enc(en->rec_seq, seqno);
594 	debug(tc, "seqno: %ju\n", (uintmax_t)seqno);
595 }
596 
597 static void
598 free_tls_enable(struct tls_enable *en)
599 {
600 	free(__DECONST(void *, en->cipher_key));
601 	free(__DECONST(void *, en->iv));
602 	free(__DECONST(void *, en->auth_key));
603 }
604 
605 static const EVP_CIPHER *
606 tls_EVP_CIPHER(const struct tls_enable *en)
607 {
608 	switch (en->cipher_algorithm) {
609 	case CRYPTO_AES_CBC:
610 		switch (en->cipher_key_len) {
611 		case 128 / 8:
612 			return (EVP_aes_128_cbc());
613 		case 256 / 8:
614 			return (EVP_aes_256_cbc());
615 		default:
616 			return (NULL);
617 		}
618 		break;
619 	case CRYPTO_AES_NIST_GCM_16:
620 		switch (en->cipher_key_len) {
621 		case 128 / 8:
622 			return (EVP_aes_128_gcm());
623 		case 256 / 8:
624 			return (EVP_aes_256_gcm());
625 		default:
626 			return (NULL);
627 		}
628 		break;
629 	case CRYPTO_CHACHA20_POLY1305:
630 		return (EVP_chacha20_poly1305());
631 	default:
632 		return (NULL);
633 	}
634 }
635 
636 static const EVP_MD *
637 tls_EVP_MD(const struct tls_enable *en)
638 {
639 	switch (en->auth_algorithm) {
640 	case CRYPTO_SHA1_HMAC:
641 		return (EVP_sha1());
642 	case CRYPTO_SHA2_256_HMAC:
643 		return (EVP_sha256());
644 	case CRYPTO_SHA2_384_HMAC:
645 		return (EVP_sha384());
646 	default:
647 		return (NULL);
648 	}
649 }
650 
651 static size_t
652 tls_header_len(struct tls_enable *en)
653 {
654 	size_t len;
655 
656 	len = sizeof(struct tls_record_layer);
657 	switch (en->cipher_algorithm) {
658 	case CRYPTO_AES_CBC:
659 		if (en->tls_vminor != TLS_MINOR_VER_ZERO)
660 			len += AES_BLOCK_LEN;
661 		return (len);
662 	case CRYPTO_AES_NIST_GCM_16:
663 		if (en->tls_vminor == TLS_MINOR_VER_TWO)
664 			len += sizeof(uint64_t);
665 		return (len);
666 	case CRYPTO_CHACHA20_POLY1305:
667 		return (len);
668 	default:
669 		return (0);
670 	}
671 }
672 
673 static size_t
674 tls_mac_len(struct tls_enable *en)
675 {
676 	switch (en->cipher_algorithm) {
677 	case CRYPTO_AES_CBC:
678 		switch (en->auth_algorithm) {
679 		case CRYPTO_SHA1_HMAC:
680 			return (SHA1_HASH_LEN);
681 		case CRYPTO_SHA2_256_HMAC:
682 			return (SHA2_256_HASH_LEN);
683 		case CRYPTO_SHA2_384_HMAC:
684 			return (SHA2_384_HASH_LEN);
685 		default:
686 			return (0);
687 		}
688 	case CRYPTO_AES_NIST_GCM_16:
689 		return (AES_GMAC_HASH_LEN);
690 	case CRYPTO_CHACHA20_POLY1305:
691 		return (POLY1305_HASH_LEN);
692 	default:
693 		return (0);
694 	}
695 }
696 
697 /* Includes maximum padding for MTE. */
698 static size_t
699 tls_trailer_len(struct tls_enable *en)
700 {
701 	size_t len;
702 
703 	len = tls_mac_len(en);
704 	if (en->cipher_algorithm == CRYPTO_AES_CBC)
705 		len += AES_BLOCK_LEN;
706 	if (en->tls_vminor == TLS_MINOR_VER_THREE)
707 		len++;
708 	return (len);
709 }
710 
711 /* Minimum valid record payload size for a given cipher suite. */
712 static size_t
713 tls_minimum_record_payload(struct tls_enable *en)
714 {
715 	size_t len;
716 
717 	len = tls_header_len(en);
718 	if (en->cipher_algorithm == CRYPTO_AES_CBC)
719 		len += roundup2(tls_mac_len(en) + 1, AES_BLOCK_LEN);
720 	else
721 		len += tls_mac_len(en);
722 	if (en->tls_vminor == TLS_MINOR_VER_THREE)
723 		len++;
724 	return (len - sizeof(struct tls_record_layer));
725 }
726 
727 /* 'len' is the length of the payload application data. */
728 static void
729 tls_mte_aad(struct tls_enable *en, size_t len,
730     const struct tls_record_layer *hdr, uint64_t seqno, struct tls_mac_data *ad)
731 {
732 	ad->seq = htobe64(seqno);
733 	ad->type = hdr->tls_type;
734 	ad->tls_vmajor = hdr->tls_vmajor;
735 	ad->tls_vminor = hdr->tls_vminor;
736 	ad->tls_length = htons(len);
737 }
738 
739 static void
740 tls_12_aead_aad(struct tls_enable *en, size_t len,
741     const struct tls_record_layer *hdr, uint64_t seqno,
742     struct tls_aead_data *ad)
743 {
744 	ad->seq = htobe64(seqno);
745 	ad->type = hdr->tls_type;
746 	ad->tls_vmajor = hdr->tls_vmajor;
747 	ad->tls_vminor = hdr->tls_vminor;
748 	ad->tls_length = htons(len);
749 }
750 
751 static void
752 tls_13_aad(struct tls_enable *en, const struct tls_record_layer *hdr,
753     uint64_t seqno, struct tls_aead_data_13 *ad)
754 {
755 	ad->type = hdr->tls_type;
756 	ad->tls_vmajor = hdr->tls_vmajor;
757 	ad->tls_vminor = hdr->tls_vminor;
758 	ad->tls_length = hdr->tls_length;
759 }
760 
761 static void
762 tls_12_gcm_nonce(struct tls_enable *en, const struct tls_record_layer *hdr,
763     char *nonce)
764 {
765 	memcpy(nonce, en->iv, TLS_AEAD_GCM_LEN);
766 	memcpy(nonce + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t));
767 }
768 
769 static void
770 tls_13_nonce(struct tls_enable *en, uint64_t seqno, char *nonce)
771 {
772 	static_assert(TLS_1_3_GCM_IV_LEN == TLS_CHACHA20_IV_LEN,
773 	    "TLS 1.3 nonce length mismatch");
774 	memcpy(nonce, en->iv, TLS_1_3_GCM_IV_LEN);
775 	*(uint64_t *)(nonce + 4) ^= htobe64(seqno);
776 }
777 
778 /*
779  * Decrypt a TLS record 'len' bytes long at 'src' and store the result at
780  * 'dst'.  If the TLS record header length doesn't match or 'dst' doesn't
781  * have sufficient room ('avail'), fail the test.
782  */
783 static size_t
784 decrypt_tls_aes_cbc_mte(const atf_tc_t *tc, struct tls_enable *en,
785     uint64_t seqno, const void *src, size_t len, void *dst, size_t avail,
786     uint8_t *record_type)
787 {
788 	const struct tls_record_layer *hdr;
789 	struct tls_mac_data aad;
790 	const char *iv;
791 	char *buf;
792 	size_t hdr_len, mac_len, payload_len;
793 	int padding;
794 
795 	hdr = src;
796 	hdr_len = tls_header_len(en);
797 	mac_len = tls_mac_len(en);
798 	ATF_REQUIRE_INTEQ(TLS_MAJOR_VER_ONE, hdr->tls_vmajor);
799 	ATF_REQUIRE_INTEQ(en->tls_vminor, hdr->tls_vminor);
800 	debug(tc, "decrypting MTE record seqno %ju:\n", (uintmax_t)seqno);
801 	debug_hexdump(tc, src, len, NULL);
802 
803 	/* First, decrypt the outer payload into a temporary buffer. */
804 	payload_len = len - hdr_len;
805 	buf = malloc(payload_len);
806 	if (en->tls_vminor == TLS_MINOR_VER_ZERO)
807 		iv = en->iv;
808 	else
809 		iv = (void *)(hdr + 1);
810 	debug_hexdump(tc, iv, AES_BLOCK_LEN, "iv");
811 	ATF_REQUIRE(cbc_decrypt(tls_EVP_CIPHER(en), en->cipher_key, iv,
812 	    (const u_char *)src + hdr_len, buf, payload_len));
813 	debug_hexdump(tc, buf, payload_len, "decrypted buffer");
814 
815 	/*
816 	 * Copy the last encrypted block to use as the IV for the next
817 	 * record for TLS 1.0.
818 	 */
819 	if (en->tls_vminor == TLS_MINOR_VER_ZERO)
820 		memcpy(__DECONST(uint8_t *, en->iv), (const u_char *)src +
821 		    (len - AES_BLOCK_LEN), AES_BLOCK_LEN);
822 
823 	/*
824 	 * Verify trailing padding and strip.
825 	 *
826 	 * The kernel always generates the smallest amount of padding.
827 	 */
828 	padding = buf[payload_len - 1] + 1;
829 	ATF_REQUIRE_MSG(padding > 0 && padding <= AES_BLOCK_LEN,
830 	    "invalid padding %d", padding);
831 	ATF_REQUIRE_MSG(payload_len >= mac_len + padding,
832 	    "payload_len (%zu) < mac_len (%zu) + padding (%d)", payload_len,
833 	    mac_len, padding);
834 	payload_len -= padding;
835 
836 	/* Verify HMAC. */
837 	payload_len -= mac_len;
838 	tls_mte_aad(en, payload_len, hdr, seqno, &aad);
839 	debug_hexdump(tc, &aad, sizeof(aad), "aad");
840 	ATF_REQUIRE(verify_hash(tls_EVP_MD(en), en->auth_key, en->auth_key_len,
841 	    &aad, sizeof(aad), buf, payload_len, buf + payload_len));
842 
843 	ATF_REQUIRE_MSG(payload_len <= avail, "payload_len (%zu) < avail (%zu)",
844 	    payload_len, avail);
845 	memcpy(dst, buf, payload_len);
846 	*record_type = hdr->tls_type;
847 	return (payload_len);
848 }
849 
850 static size_t
851 decrypt_tls_12_aead(const atf_tc_t *tc, struct tls_enable *en, uint64_t seqno,
852     const void *src, size_t len, void *dst, uint8_t *record_type)
853 {
854 	const struct tls_record_layer *hdr;
855 	struct tls_aead_data aad;
856 	char nonce[12];
857 	size_t hdr_len, mac_len, payload_len;
858 
859 	hdr = src;
860 
861 	hdr_len = tls_header_len(en);
862 	mac_len = tls_mac_len(en);
863 	payload_len = len - (hdr_len + mac_len);
864 	ATF_REQUIRE_INTEQ(TLS_MAJOR_VER_ONE, hdr->tls_vmajor);
865 	ATF_REQUIRE_INTEQ(TLS_MINOR_VER_TWO, hdr->tls_vminor);
866 	debug(tc, "decrypting TLS 1.2 record seqno %ju:\n", (uintmax_t)seqno);
867 	debug_hexdump(tc, src, len, NULL);
868 
869 	tls_12_aead_aad(en, payload_len, hdr, seqno, &aad);
870 	debug_hexdump(tc, &aad, sizeof(aad), "aad");
871 	if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
872 		tls_12_gcm_nonce(en, hdr, nonce);
873 	else
874 		tls_13_nonce(en, seqno, nonce);
875 	debug_hexdump(tc, nonce, sizeof(nonce), "nonce");
876 
877 	ATF_REQUIRE(aead_decrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce,
878 	    &aad, sizeof(aad), (const char *)src + hdr_len, dst, payload_len,
879 	    (const char *)src + hdr_len + payload_len, mac_len));
880 
881 	*record_type = hdr->tls_type;
882 	return (payload_len);
883 }
884 
885 static size_t
886 decrypt_tls_13_aead(const atf_tc_t *tc, struct tls_enable *en, uint64_t seqno,
887     const void *src, size_t len, void *dst, uint8_t *record_type)
888 {
889 	const struct tls_record_layer *hdr;
890 	struct tls_aead_data_13 aad;
891 	char nonce[12];
892 	char *buf;
893 	size_t hdr_len, mac_len, payload_len;
894 
895 	hdr = src;
896 
897 	hdr_len = tls_header_len(en);
898 	mac_len = tls_mac_len(en);
899 	payload_len = len - (hdr_len + mac_len);
900 	ATF_REQUIRE_MSG(payload_len >= 1,
901 	    "payload_len (%zu) too short: len %zu hdr_len %zu mac_len %zu",
902 	    payload_len, len, hdr_len, mac_len);
903 	ATF_REQUIRE_INTEQ(TLS_RLTYPE_APP, hdr->tls_type);
904 	ATF_REQUIRE_INTEQ(TLS_MAJOR_VER_ONE, hdr->tls_vmajor);
905 	ATF_REQUIRE_INTEQ(TLS_MINOR_VER_TWO, hdr->tls_vminor);
906 	debug(tc, "decrypting TLS 1.3 record seqno %ju:\n", (uintmax_t)seqno);
907 	debug_hexdump(tc, src, len, NULL);
908 
909 	tls_13_aad(en, hdr, seqno, &aad);
910 	debug_hexdump(tc, &aad, sizeof(aad), "aad");
911 	tls_13_nonce(en, seqno, nonce);
912 	debug_hexdump(tc, nonce, sizeof(nonce), "nonce");
913 
914 	/*
915 	 * Have to use a temporary buffer for the output due to the
916 	 * record type as the last byte of the trailer.
917 	 */
918 	buf = malloc(payload_len);
919 
920 	ATF_REQUIRE(aead_decrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce,
921 	    &aad, sizeof(aad), (const char *)src + hdr_len, buf, payload_len,
922 	    (const char *)src + hdr_len + payload_len, mac_len));
923 	debug_hexdump(tc, buf, payload_len, "decrypted buffer");
924 
925 	/* Trim record type. */
926 	*record_type = buf[payload_len - 1];
927 	payload_len--;
928 
929 	memcpy(dst, buf, payload_len);
930 	free(buf);
931 
932 	return (payload_len);
933 }
934 
935 static size_t
936 decrypt_tls_aead(const atf_tc_t *tc, struct tls_enable *en, uint64_t seqno,
937     const void *src, size_t len, void *dst, size_t avail, uint8_t *record_type)
938 {
939 	const struct tls_record_layer *hdr;
940 	size_t payload_len;
941 
942 	hdr = src;
943 	ATF_REQUIRE_INTEQ(len, ntohs(hdr->tls_length) + sizeof(*hdr));
944 
945 	payload_len = len - (tls_header_len(en) + tls_trailer_len(en));
946 	ATF_REQUIRE_MSG(payload_len <= avail, "payload_len (%zu) > avail (%zu)",
947 	    payload_len, avail);
948 
949 	if (en->tls_vminor == TLS_MINOR_VER_TWO) {
950 		ATF_REQUIRE_INTEQ(payload_len, decrypt_tls_12_aead(tc, en,
951 		    seqno, src, len, dst, record_type));
952 	} else {
953 		ATF_REQUIRE_INTEQ(payload_len, decrypt_tls_13_aead(tc, en,
954 		    seqno, src, len, dst, record_type));
955 	}
956 
957 	return (payload_len);
958 }
959 
960 static size_t
961 decrypt_tls_record(const atf_tc_t *tc, struct tls_enable *en, uint64_t seqno,
962     const void *src, size_t len, void *dst, size_t avail, uint8_t *record_type)
963 {
964 	if (en->cipher_algorithm == CRYPTO_AES_CBC)
965 		return (decrypt_tls_aes_cbc_mte(tc, en, seqno, src, len, dst,
966 		    avail, record_type));
967 	else
968 		return (decrypt_tls_aead(tc, en, seqno, src, len, dst, avail,
969 		    record_type));
970 }
971 
972 /*
973  * Encrypt a TLS record of type 'record_type' with payload 'len' bytes
974  * long at 'src' and store the result at 'dst'.  If 'dst' doesn't have
975  * sufficient room ('avail'), fail the test.  'padding' is the amount
976  * of additional padding to include beyond any amount mandated by the
977  * cipher suite.
978  */
979 static size_t
980 encrypt_tls_aes_cbc_mte(const atf_tc_t *tc, struct tls_enable *en,
981     uint8_t record_type, uint64_t seqno, const void *src, size_t len, void *dst,
982     size_t avail, size_t padding)
983 {
984 	struct tls_record_layer *hdr;
985 	struct tls_mac_data aad;
986 	char *buf, *iv;
987 	size_t hdr_len, mac_len, record_len;
988 	u_int digest_len, i;
989 
990 	ATF_REQUIRE_INTEQ(0, padding % 16);
991 
992 	hdr = dst;
993 	buf = dst;
994 
995 	debug(tc, "encrypting MTE record seqno %ju:\n", (uintmax_t)seqno);
996 	hdr_len = tls_header_len(en);
997 	mac_len = tls_mac_len(en);
998 	padding += (AES_BLOCK_LEN - (len + mac_len) % AES_BLOCK_LEN);
999 	ATF_REQUIRE_MSG(padding > 0 && padding <= 255, "invalid padding (%zu)",
1000 	    padding);
1001 
1002 	record_len = hdr_len + len + mac_len + padding;
1003 	ATF_REQUIRE_MSG(record_len <= avail, "record_len (%zu) > avail (%zu): "
1004 	    "hdr_len %zu, len %zu, mac_len %zu, padding %zu", record_len,
1005 	    avail, hdr_len, len, mac_len, padding);
1006 
1007 	hdr->tls_type = record_type;
1008 	hdr->tls_vmajor = TLS_MAJOR_VER_ONE;
1009 	hdr->tls_vminor = en->tls_vminor;
1010 	hdr->tls_length = htons(record_len - sizeof(*hdr));
1011 	iv = (char *)(hdr + 1);
1012 	for (i = 0; i < AES_BLOCK_LEN; i++)
1013 		iv[i] = rdigit();
1014 	debug_hexdump(tc, iv, AES_BLOCK_LEN, "explicit IV");
1015 
1016 	/* Copy plaintext to ciphertext region. */
1017 	memcpy(buf + hdr_len, src, len);
1018 
1019 	/* Compute HMAC. */
1020 	tls_mte_aad(en, len, hdr, seqno, &aad);
1021 	debug_hexdump(tc, &aad, sizeof(aad), "aad");
1022 	debug_hexdump(tc, src, len, "plaintext");
1023 	ATF_REQUIRE(compute_hash(tls_EVP_MD(en), en->auth_key, en->auth_key_len,
1024 	    &aad, sizeof(aad), src, len, buf + hdr_len + len, &digest_len));
1025 	ATF_REQUIRE_INTEQ(mac_len, digest_len);
1026 
1027 	/* Store padding. */
1028 	for (i = 0; i < padding; i++)
1029 		buf[hdr_len + len + mac_len + i] = padding - 1;
1030 	debug_hexdump(tc, buf + hdr_len + len, mac_len + padding,
1031 	    "MAC and padding");
1032 
1033 	/* Encrypt the record. */
1034 	ATF_REQUIRE(cbc_encrypt(tls_EVP_CIPHER(en), en->cipher_key, iv,
1035 	    buf + hdr_len, buf + hdr_len, len + mac_len + padding));
1036 	debug_hexdump(tc, dst, record_len, "encrypted record");
1037 
1038 	return (record_len);
1039 }
1040 
1041 static size_t
1042 encrypt_tls_12_aead(const atf_tc_t *tc, struct tls_enable *en,
1043     uint8_t record_type, uint64_t seqno, const void *src, size_t len, void *dst)
1044 {
1045 	struct tls_record_layer *hdr;
1046 	struct tls_aead_data aad;
1047 	char nonce[12];
1048 	size_t hdr_len, mac_len, record_len;
1049 
1050 	hdr = dst;
1051 
1052 	debug(tc, "encrypting TLS 1.2 record seqno %ju:\n", (uintmax_t)seqno);
1053 	hdr_len = tls_header_len(en);
1054 	mac_len = tls_mac_len(en);
1055 	record_len = hdr_len + len + mac_len;
1056 
1057 	hdr->tls_type = record_type;
1058 	hdr->tls_vmajor = TLS_MAJOR_VER_ONE;
1059 	hdr->tls_vminor = TLS_MINOR_VER_TWO;
1060 	hdr->tls_length = htons(record_len - sizeof(*hdr));
1061 	if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
1062 		memcpy(hdr + 1, &seqno, sizeof(seqno));
1063 
1064 	tls_12_aead_aad(en, len, hdr, seqno, &aad);
1065 	debug_hexdump(tc, &aad, sizeof(aad), "aad");
1066 	if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
1067 		tls_12_gcm_nonce(en, hdr, nonce);
1068 	else
1069 		tls_13_nonce(en, seqno, nonce);
1070 	debug_hexdump(tc, nonce, sizeof(nonce), "nonce");
1071 
1072 	debug_hexdump(tc, src, len, "plaintext");
1073 	ATF_REQUIRE(aead_encrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce,
1074 	    &aad, sizeof(aad), src, (char *)dst + hdr_len, len,
1075 	    (char *)dst + hdr_len + len, mac_len));
1076 	debug_hexdump(tc, dst, record_len, "encrypted record");
1077 
1078 	return (record_len);
1079 }
1080 
1081 static size_t
1082 encrypt_tls_13_aead(const atf_tc_t *tc, struct tls_enable *en,
1083     uint8_t record_type, uint64_t seqno, const void *src, size_t len, void *dst,
1084     size_t padding)
1085 {
1086 	struct tls_record_layer *hdr;
1087 	struct tls_aead_data_13 aad;
1088 	char nonce[12];
1089 	char *buf;
1090 	size_t hdr_len, mac_len, record_len;
1091 
1092 	hdr = dst;
1093 
1094 	debug(tc, "encrypting TLS 1.3 record seqno %ju:\n", (uintmax_t)seqno);
1095 	hdr_len = tls_header_len(en);
1096 	mac_len = tls_mac_len(en);
1097 	record_len = hdr_len + len + 1 + padding + mac_len;
1098 
1099 	hdr->tls_type = TLS_RLTYPE_APP;
1100 	hdr->tls_vmajor = TLS_MAJOR_VER_ONE;
1101 	hdr->tls_vminor = TLS_MINOR_VER_TWO;
1102 	hdr->tls_length = htons(record_len - sizeof(*hdr));
1103 
1104 	tls_13_aad(en, hdr, seqno, &aad);
1105 	debug_hexdump(tc, &aad, sizeof(aad), "aad");
1106 	tls_13_nonce(en, seqno, nonce);
1107 	debug_hexdump(tc, nonce, sizeof(nonce), "nonce");
1108 
1109 	/*
1110 	 * Have to use a temporary buffer for the input so that the record
1111 	 * type can be appended.
1112 	 */
1113 	buf = malloc(len + 1 + padding);
1114 	memcpy(buf, src, len);
1115 	buf[len] = record_type;
1116 	memset(buf + len + 1, 0, padding);
1117 	debug_hexdump(tc, buf, len + 1 + padding, "plaintext + type + padding");
1118 
1119 	ATF_REQUIRE(aead_encrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce,
1120 	    &aad, sizeof(aad), buf, (char *)dst + hdr_len, len + 1 + padding,
1121 	    (char *)dst + hdr_len + len + 1 + padding, mac_len));
1122 	debug_hexdump(tc, dst, record_len, "encrypted record");
1123 
1124 	free(buf);
1125 
1126 	return (record_len);
1127 }
1128 
1129 static size_t
1130 encrypt_tls_aead(const atf_tc_t *tc, struct tls_enable *en,
1131     uint8_t record_type, uint64_t seqno, const void *src, size_t len, void *dst,
1132     size_t avail, size_t padding)
1133 {
1134 	size_t record_len;
1135 
1136 	record_len = tls_header_len(en) + len + padding + tls_trailer_len(en);
1137 	ATF_REQUIRE_MSG(record_len <= avail, "record_len (%zu) > avail (%zu): "
1138 	    "header %zu len %zu padding %zu trailer %zu", record_len, avail,
1139 	    tls_header_len(en), len, padding, tls_trailer_len(en));
1140 
1141 	if (en->tls_vminor == TLS_MINOR_VER_TWO) {
1142 		ATF_REQUIRE_INTEQ(0, padding);
1143 		ATF_REQUIRE_INTEQ(record_len, encrypt_tls_12_aead(tc, en,
1144 		    record_type, seqno, src, len, dst));
1145 	} else
1146 		ATF_REQUIRE_INTEQ(record_len, encrypt_tls_13_aead(tc, en,
1147 		    record_type, seqno, src, len, dst, padding));
1148 
1149 	return (record_len);
1150 }
1151 
1152 static size_t
1153 encrypt_tls_record(const atf_tc_t *tc, struct tls_enable *en,
1154     uint8_t record_type, uint64_t seqno, const void *src, size_t len, void *dst,
1155     size_t avail, size_t padding)
1156 {
1157 	if (en->cipher_algorithm == CRYPTO_AES_CBC)
1158 		return (encrypt_tls_aes_cbc_mte(tc, en, record_type, seqno, src,
1159 		    len, dst, avail, padding));
1160 	else
1161 		return (encrypt_tls_aead(tc, en, record_type, seqno, src, len,
1162 		    dst, avail, padding));
1163 }
1164 
1165 static void
1166 test_ktls_transmit_app_data(const atf_tc_t *tc, struct tls_enable *en,
1167     uint64_t seqno, size_t len)
1168 {
1169 	struct kevent ev;
1170 	struct tls_record_layer *hdr;
1171 	char *plaintext, *decrypted, *outbuf;
1172 	size_t decrypted_len, outbuf_len, outbuf_cap, record_len, written;
1173 	ssize_t rv;
1174 	int kq, sockets[2];
1175 	uint8_t record_type;
1176 
1177 	plaintext = alloc_buffer(len);
1178 	debug_hexdump(tc, plaintext, len, "plaintext");
1179 	decrypted = malloc(len);
1180 	outbuf_cap = tls_header_len(en) + TLS_MAX_MSG_SIZE_V10_2 +
1181 	    tls_trailer_len(en);
1182 	outbuf = malloc(outbuf_cap);
1183 	hdr = (struct tls_record_layer *)outbuf;
1184 
1185 	ATF_REQUIRE((kq = kqueue()) != -1);
1186 
1187 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
1188 
1189 	ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en,
1190 	    sizeof(*en)) == 0);
1191 	check_tls_mode(tc, sockets[1], TCP_TXTLS_MODE);
1192 
1193 	EV_SET(&ev, sockets[0], EVFILT_READ, EV_ADD, 0, 0, NULL);
1194 	ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0);
1195 	EV_SET(&ev, sockets[1], EVFILT_WRITE, EV_ADD, 0, 0, NULL);
1196 	ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0);
1197 
1198 	decrypted_len = 0;
1199 	outbuf_len = 0;
1200 	written = 0;
1201 
1202 	while (decrypted_len != len) {
1203 		ATF_REQUIRE(kevent(kq, NULL, 0, &ev, 1, NULL) == 1);
1204 
1205 		switch (ev.filter) {
1206 		case EVFILT_WRITE:
1207 			/* Try to write any remaining data. */
1208 			rv = write(ev.ident, plaintext + written,
1209 			    len - written);
1210 			ATF_REQUIRE_MSG(rv > 0,
1211 			    "failed to write to socket");
1212 			written += rv;
1213 			if (written == len) {
1214 				ev.flags = EV_DISABLE;
1215 				ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0,
1216 				    NULL) == 0);
1217 			}
1218 			break;
1219 
1220 		case EVFILT_READ:
1221 			ATF_REQUIRE((ev.flags & EV_EOF) == 0);
1222 
1223 			/*
1224 			 * Try to read data for the next TLS record
1225 			 * into outbuf.  Start by reading the header
1226 			 * to determine how much additional data to
1227 			 * read.
1228 			 */
1229 			if (outbuf_len < sizeof(struct tls_record_layer)) {
1230 				rv = read(ev.ident, outbuf + outbuf_len,
1231 				    sizeof(struct tls_record_layer) -
1232 				    outbuf_len);
1233 				ATF_REQUIRE_MSG(rv > 0,
1234 				    "failed to read from socket");
1235 				outbuf_len += rv;
1236 
1237 				if (outbuf_len ==
1238 				    sizeof(struct tls_record_layer)) {
1239 					debug(tc, "TLS header for seqno %ju:\n",
1240 					    (uintmax_t)seqno);
1241 					debug_hexdump(tc, outbuf, outbuf_len,
1242 					    NULL);
1243 				}
1244 			}
1245 
1246 			if (outbuf_len < sizeof(struct tls_record_layer))
1247 				break;
1248 
1249 			record_len = sizeof(struct tls_record_layer) +
1250 			    ntohs(hdr->tls_length);
1251 			debug(tc, "record_len %zu outbuf_cap %zu\n",
1252 			    record_len, outbuf_cap);
1253 			ATF_REQUIRE(record_len <= outbuf_cap);
1254 			ATF_REQUIRE(record_len > outbuf_len);
1255 			rv = read(ev.ident, outbuf + outbuf_len,
1256 			    record_len - outbuf_len);
1257 			if (rv == -1 && errno == EAGAIN)
1258 				break;
1259 			ATF_REQUIRE_MSG(rv > 0,
1260 			    "failed to read from socket: %s", strerror(errno));
1261 
1262 			outbuf_len += rv;
1263 			if (outbuf_len == record_len) {
1264 				decrypted_len += decrypt_tls_record(tc, en,
1265 				    seqno, outbuf, outbuf_len,
1266 				    decrypted + decrypted_len,
1267 				    len - decrypted_len, &record_type);
1268 				ATF_REQUIRE_INTEQ(TLS_RLTYPE_APP, record_type);
1269 
1270 				seqno++;
1271 				outbuf_len = 0;
1272 			}
1273 			break;
1274 		}
1275 	}
1276 
1277 	ATF_REQUIRE_MSG(written == decrypted_len,
1278 	    "read %zu decrypted bytes, but wrote %zu", decrypted_len, written);
1279 
1280 	ATF_REQUIRE(memcmp(plaintext, decrypted, len) == 0);
1281 
1282 	free(outbuf);
1283 	free(decrypted);
1284 	free(plaintext);
1285 
1286 	close_sockets(sockets);
1287 	ATF_REQUIRE(close(kq) == 0);
1288 }
1289 
1290 static void
1291 ktls_send_control_message(int fd, uint8_t type, void *data, size_t len)
1292 {
1293 	struct msghdr msg;
1294 	struct cmsghdr *cmsg;
1295 	char cbuf[CMSG_SPACE(sizeof(type))];
1296 	struct iovec iov;
1297 
1298 	memset(&msg, 0, sizeof(msg));
1299 
1300 	msg.msg_control = cbuf;
1301 	msg.msg_controllen = sizeof(cbuf);
1302 	cmsg = CMSG_FIRSTHDR(&msg);
1303 	cmsg->cmsg_level = IPPROTO_TCP;
1304 	cmsg->cmsg_type = TLS_SET_RECORD_TYPE;
1305 	cmsg->cmsg_len = CMSG_LEN(sizeof(type));
1306 	*(uint8_t *)CMSG_DATA(cmsg) = type;
1307 
1308 	iov.iov_base = data;
1309 	iov.iov_len = len;
1310 	msg.msg_iov = &iov;
1311 	msg.msg_iovlen = 1;
1312 
1313 	ATF_REQUIRE_INTEQ((ssize_t)len, sendmsg(fd, &msg, 0));
1314 }
1315 
1316 static void
1317 test_ktls_transmit_control(const atf_tc_t *tc, struct tls_enable *en,
1318     uint64_t seqno, uint8_t type, size_t len)
1319 {
1320 	struct tls_record_layer *hdr;
1321 	char *plaintext, *decrypted, *outbuf;
1322 	size_t outbuf_cap, payload_len, record_len;
1323 	ssize_t rv;
1324 	int sockets[2];
1325 	uint8_t record_type;
1326 
1327 	ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2);
1328 
1329 	plaintext = alloc_buffer(len);
1330 	decrypted = malloc(len);
1331 	outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en);
1332 	outbuf = malloc(outbuf_cap);
1333 	hdr = (struct tls_record_layer *)outbuf;
1334 
1335 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
1336 
1337 	ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en,
1338 	    sizeof(*en)) == 0);
1339 	check_tls_mode(tc, sockets[1], TCP_TXTLS_MODE);
1340 
1341 	fd_set_blocking(sockets[0]);
1342 	fd_set_blocking(sockets[1]);
1343 
1344 	ktls_send_control_message(sockets[1], type, plaintext, len);
1345 
1346 	/*
1347 	 * First read the header to determine how much additional data
1348 	 * to read.
1349 	 */
1350 	rv = read(sockets[0], outbuf, sizeof(struct tls_record_layer));
1351 	ATF_REQUIRE_INTEQ(sizeof(struct tls_record_layer), rv);
1352 	payload_len = ntohs(hdr->tls_length);
1353 	record_len = payload_len + sizeof(struct tls_record_layer);
1354 	ATF_REQUIRE_MSG(record_len <= outbuf_cap,
1355 	    "record_len (%zu) > outbuf_cap (%zu)", record_len, outbuf_cap);
1356 	rv = read(sockets[0], outbuf + sizeof(struct tls_record_layer),
1357 	    payload_len);
1358 	ATF_REQUIRE_INTEQ((ssize_t)payload_len, rv);
1359 
1360 	rv = decrypt_tls_record(tc, en, seqno, outbuf, record_len, decrypted,
1361 	    len, &record_type);
1362 
1363 	ATF_REQUIRE_MSG((ssize_t)len == rv,
1364 	    "read %zd decrypted bytes, but wrote %zu", rv, len);
1365 	ATF_REQUIRE_INTEQ(type, record_type);
1366 
1367 	ATF_REQUIRE(memcmp(plaintext, decrypted, len) == 0);
1368 
1369 	free(outbuf);
1370 	free(decrypted);
1371 	free(plaintext);
1372 
1373 	close_sockets(sockets);
1374 }
1375 
1376 static void
1377 test_ktls_transmit_empty_fragment(const atf_tc_t *tc, struct tls_enable *en,
1378     uint64_t seqno)
1379 {
1380 	struct tls_record_layer *hdr;
1381 	char *outbuf;
1382 	size_t outbuf_cap, payload_len, record_len;
1383 	ssize_t rv;
1384 	int sockets[2];
1385 	uint8_t record_type;
1386 
1387 	outbuf_cap = tls_header_len(en) + tls_trailer_len(en);
1388 	outbuf = malloc(outbuf_cap);
1389 	hdr = (struct tls_record_layer *)outbuf;
1390 
1391 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
1392 
1393 	ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en,
1394 	    sizeof(*en)) == 0);
1395 	check_tls_mode(tc, sockets[1], TCP_TXTLS_MODE);
1396 
1397 	fd_set_blocking(sockets[0]);
1398 	fd_set_blocking(sockets[1]);
1399 
1400 	/*
1401 	 * A write of zero bytes should send an empty fragment only for
1402 	 * TLS 1.0, otherwise an error should be raised.
1403 	 */
1404 	rv = write(sockets[1], NULL, 0);
1405 	if (rv == 0) {
1406 		ATF_REQUIRE_INTEQ(CRYPTO_AES_CBC, en->cipher_algorithm);
1407 		ATF_REQUIRE_INTEQ(TLS_MINOR_VER_ZERO, en->tls_vminor);
1408 	} else {
1409 		ATF_REQUIRE_INTEQ(-1, rv);
1410 		ATF_REQUIRE_ERRNO(EINVAL, true);
1411 		goto out;
1412 	}
1413 
1414 	/*
1415 	 * First read the header to determine how much additional data
1416 	 * to read.
1417 	 */
1418 	rv = read(sockets[0], outbuf, sizeof(struct tls_record_layer));
1419 	ATF_REQUIRE_INTEQ(sizeof(struct tls_record_layer), rv);
1420 	payload_len = ntohs(hdr->tls_length);
1421 	record_len = payload_len + sizeof(struct tls_record_layer);
1422 	ATF_REQUIRE_MSG(record_len <= outbuf_cap,
1423 	    "record_len (%zu) > outbuf_cap (%zu)", record_len, outbuf_cap);
1424 	rv = read(sockets[0], outbuf + sizeof(struct tls_record_layer),
1425 	    payload_len);
1426 	ATF_REQUIRE_INTEQ((ssize_t)payload_len, rv);
1427 
1428 	rv = decrypt_tls_record(tc, en, seqno, outbuf, record_len, NULL, 0,
1429 	    &record_type);
1430 
1431 	ATF_REQUIRE_MSG(rv == 0,
1432 	    "read %zd decrypted bytes for an empty fragment", rv);
1433 	ATF_REQUIRE_INTEQ(TLS_RLTYPE_APP, record_type);
1434 
1435 out:
1436 	free(outbuf);
1437 
1438 	close_sockets(sockets);
1439 }
1440 
1441 static size_t
1442 ktls_receive_tls_record(struct tls_enable *en, int fd, uint8_t record_type,
1443     void *data, size_t len)
1444 {
1445 	struct msghdr msg;
1446 	struct cmsghdr *cmsg;
1447 	struct tls_get_record *tgr;
1448 	char cbuf[CMSG_SPACE(sizeof(*tgr))];
1449 	struct iovec iov;
1450 	ssize_t rv;
1451 
1452 	memset(&msg, 0, sizeof(msg));
1453 
1454 	msg.msg_control = cbuf;
1455 	msg.msg_controllen = sizeof(cbuf);
1456 
1457 	iov.iov_base = data;
1458 	iov.iov_len = len;
1459 	msg.msg_iov = &iov;
1460 	msg.msg_iovlen = 1;
1461 
1462 	ATF_REQUIRE((rv = recvmsg(fd, &msg, 0)) > 0);
1463 
1464 	ATF_REQUIRE((msg.msg_flags & (MSG_EOR | MSG_CTRUNC)) == MSG_EOR);
1465 
1466 	cmsg = CMSG_FIRSTHDR(&msg);
1467 	ATF_REQUIRE(cmsg != NULL);
1468 	ATF_REQUIRE_INTEQ(IPPROTO_TCP, cmsg->cmsg_level);
1469 	ATF_REQUIRE_INTEQ(TLS_GET_RECORD, cmsg->cmsg_type);
1470 	ATF_REQUIRE_INTEQ(CMSG_LEN(sizeof(*tgr)), cmsg->cmsg_len);
1471 
1472 	tgr = (struct tls_get_record *)CMSG_DATA(cmsg);
1473 	ATF_REQUIRE_INTEQ(record_type, tgr->tls_type);
1474 	ATF_REQUIRE_INTEQ(en->tls_vmajor, tgr->tls_vmajor);
1475 	/* XXX: Not sure if this is what OpenSSL expects? */
1476 	if (en->tls_vminor == TLS_MINOR_VER_THREE)
1477 		ATF_REQUIRE_INTEQ(TLS_MINOR_VER_TWO, tgr->tls_vminor);
1478 	else
1479 		ATF_REQUIRE_INTEQ(en->tls_vminor, tgr->tls_vminor);
1480 	ATF_REQUIRE_INTEQ(htons(rv), tgr->tls_length);
1481 
1482 	return (rv);
1483 }
1484 
1485 static void
1486 test_ktls_receive_app_data(const atf_tc_t *tc, struct tls_enable *en,
1487     uint64_t seqno, size_t len, size_t padding)
1488 {
1489 	struct kevent ev;
1490 	char *plaintext, *received, *outbuf;
1491 	size_t outbuf_cap, outbuf_len, outbuf_sent, received_len, todo, written;
1492 	ssize_t rv;
1493 	int kq, sockets[2];
1494 
1495 	plaintext = alloc_buffer(len);
1496 	received = malloc(len);
1497 	outbuf_cap = tls_header_len(en) + TLS_MAX_MSG_SIZE_V10_2 +
1498 	    tls_trailer_len(en);
1499 	outbuf = malloc(outbuf_cap);
1500 
1501 	ATF_REQUIRE((kq = kqueue()) != -1);
1502 
1503 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
1504 
1505 	ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en,
1506 	    sizeof(*en)) == 0);
1507 	check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE);
1508 
1509 	EV_SET(&ev, sockets[0], EVFILT_READ, EV_ADD, 0, 0, NULL);
1510 	ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0);
1511 	EV_SET(&ev, sockets[1], EVFILT_WRITE, EV_ADD, 0, 0, NULL);
1512 	ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0);
1513 
1514 	received_len = 0;
1515 	outbuf_len = 0;
1516 	written = 0;
1517 
1518 	while (received_len != len) {
1519 		ATF_REQUIRE(kevent(kq, NULL, 0, &ev, 1, NULL) == 1);
1520 
1521 		switch (ev.filter) {
1522 		case EVFILT_WRITE:
1523 			/*
1524 			 * Compose the next TLS record to send.
1525 			 */
1526 			if (outbuf_len == 0) {
1527 				ATF_REQUIRE(written < len);
1528 				todo = len - written;
1529 				if (todo > TLS_MAX_MSG_SIZE_V10_2 - padding)
1530 					todo = TLS_MAX_MSG_SIZE_V10_2 - padding;
1531 				outbuf_len = encrypt_tls_record(tc, en,
1532 				    TLS_RLTYPE_APP, seqno, plaintext + written,
1533 				    todo, outbuf, outbuf_cap, padding);
1534 				outbuf_sent = 0;
1535 				written += todo;
1536 				seqno++;
1537 			}
1538 
1539 			/*
1540 			 * Try to write the remainder of the current
1541 			 * TLS record.
1542 			 */
1543 			rv = write(ev.ident, outbuf + outbuf_sent,
1544 			    outbuf_len - outbuf_sent);
1545 			ATF_REQUIRE_MSG(rv > 0,
1546 			    "failed to write to socket: %s", strerror(errno));
1547 			outbuf_sent += rv;
1548 			if (outbuf_sent == outbuf_len) {
1549 				outbuf_len = 0;
1550 				if (written == len) {
1551 					ev.flags = EV_DISABLE;
1552 					ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0,
1553 					    NULL) == 0);
1554 				}
1555 			}
1556 			break;
1557 
1558 		case EVFILT_READ:
1559 			ATF_REQUIRE((ev.flags & EV_EOF) == 0);
1560 
1561 			rv = ktls_receive_tls_record(en, ev.ident,
1562 			    TLS_RLTYPE_APP, received + received_len,
1563 			    len - received_len);
1564 			received_len += rv;
1565 			break;
1566 		}
1567 	}
1568 
1569 	ATF_REQUIRE_MSG(written == received_len,
1570 	    "read %zu decrypted bytes, but wrote %zu", received_len, written);
1571 
1572 	ATF_REQUIRE(memcmp(plaintext, received, len) == 0);
1573 
1574 	free(outbuf);
1575 	free(received);
1576 	free(plaintext);
1577 
1578 	close_sockets(sockets);
1579 	ATF_REQUIRE(close(kq) == 0);
1580 }
1581 
1582 static void
1583 ktls_receive_tls_error(int fd, int expected_error)
1584 {
1585 	struct msghdr msg;
1586 	struct tls_get_record *tgr;
1587 	char cbuf[CMSG_SPACE(sizeof(*tgr))];
1588 	char buf[64];
1589 	struct iovec iov;
1590 
1591 	memset(&msg, 0, sizeof(msg));
1592 
1593 	msg.msg_control = cbuf;
1594 	msg.msg_controllen = sizeof(cbuf);
1595 
1596 	iov.iov_base = buf;
1597 	iov.iov_len = sizeof(buf);
1598 	msg.msg_iov = &iov;
1599 	msg.msg_iovlen = 1;
1600 
1601 	ATF_REQUIRE(recvmsg(fd, &msg, 0) == -1);
1602 	if (expected_error != 0)
1603 		ATF_REQUIRE_ERRNO(expected_error, true);
1604 }
1605 
1606 static void
1607 test_ktls_receive_corrupted_record(const atf_tc_t *tc, struct tls_enable *en,
1608     uint64_t seqno, size_t len, ssize_t offset)
1609 {
1610 	char *plaintext, *outbuf;
1611 	size_t outbuf_cap, outbuf_len;
1612 	ssize_t rv;
1613 	int sockets[2];
1614 
1615 	ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2);
1616 
1617 	plaintext = alloc_buffer(len);
1618 	outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en);
1619 	outbuf = malloc(outbuf_cap);
1620 
1621 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
1622 
1623 	ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en,
1624 	    sizeof(*en)) == 0);
1625 	check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE);
1626 
1627 	fd_set_blocking(sockets[0]);
1628 	fd_set_blocking(sockets[1]);
1629 
1630 	outbuf_len = encrypt_tls_record(tc, en, TLS_RLTYPE_APP, seqno,
1631 	    plaintext, len, outbuf, outbuf_cap, 0);
1632 
1633 	/* A negative offset is an offset from the end. */
1634 	if (offset < 0)
1635 		offset += outbuf_len;
1636 	outbuf[offset] ^= 0x01;
1637 
1638 	rv = write(sockets[1], outbuf, outbuf_len);
1639 	ATF_REQUIRE_INTEQ((ssize_t)outbuf_len, rv);
1640 
1641 	ktls_receive_tls_error(sockets[0], EBADMSG);
1642 
1643 	free(outbuf);
1644 	free(plaintext);
1645 
1646 	close_sockets(sockets);
1647 }
1648 
1649 static void
1650 test_ktls_receive_corrupted_iv(const atf_tc_t *tc, struct tls_enable *en,
1651     uint64_t seqno, size_t len)
1652 {
1653 	ATF_REQUIRE(tls_header_len(en) > sizeof(struct tls_record_layer));
1654 
1655 	/* Corrupt the first byte of the explicit IV after the header. */
1656 	test_ktls_receive_corrupted_record(tc, en, seqno, len,
1657 	    sizeof(struct tls_record_layer));
1658 }
1659 
1660 static void
1661 test_ktls_receive_corrupted_data(const atf_tc_t *tc, struct tls_enable *en,
1662     uint64_t seqno, size_t len)
1663 {
1664 	ATF_REQUIRE(len > 0);
1665 
1666 	/* Corrupt the first ciphertext byte after the header. */
1667 	test_ktls_receive_corrupted_record(tc, en, seqno, len,
1668 	    tls_header_len(en));
1669 }
1670 
1671 static void
1672 test_ktls_receive_corrupted_mac(const atf_tc_t *tc, struct tls_enable *en,
1673     uint64_t seqno, size_t len)
1674 {
1675 	size_t offset;
1676 
1677 	/* Corrupt the first byte of the MAC. */
1678 	if (en->cipher_algorithm == CRYPTO_AES_CBC)
1679 		offset = tls_header_len(en) + len;
1680 	else
1681 		offset = -tls_mac_len(en);
1682 	test_ktls_receive_corrupted_record(tc, en, seqno, len, offset);
1683 }
1684 
1685 static void
1686 test_ktls_receive_corrupted_padding(const atf_tc_t *tc, struct tls_enable *en,
1687     uint64_t seqno, size_t len)
1688 {
1689 	ATF_REQUIRE_INTEQ(CRYPTO_AES_CBC, en->cipher_algorithm);
1690 
1691 	/* Corrupt the last byte of the padding. */
1692 	test_ktls_receive_corrupted_record(tc, en, seqno, len, -1);
1693 }
1694 
1695 static void
1696 test_ktls_receive_truncated_record(const atf_tc_t *tc, struct tls_enable *en,
1697     uint64_t seqno, size_t len)
1698 {
1699 	char *plaintext, *outbuf;
1700 	size_t outbuf_cap, outbuf_len;
1701 	ssize_t rv;
1702 	int sockets[2];
1703 
1704 	ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2);
1705 
1706 	plaintext = alloc_buffer(len);
1707 	outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en);
1708 	outbuf = malloc(outbuf_cap);
1709 
1710 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
1711 
1712 	ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en,
1713 	    sizeof(*en)) == 0);
1714 	check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE);
1715 
1716 	fd_set_blocking(sockets[0]);
1717 	fd_set_blocking(sockets[1]);
1718 
1719 	outbuf_len = encrypt_tls_record(tc, en, TLS_RLTYPE_APP, seqno,
1720 	    plaintext, len, outbuf, outbuf_cap, 0);
1721 
1722 	rv = write(sockets[1], outbuf, outbuf_len / 2);
1723 	ATF_REQUIRE_INTEQ((ssize_t)(outbuf_len / 2), rv);
1724 
1725 	ATF_REQUIRE(shutdown(sockets[1], SHUT_WR) == 0);
1726 
1727 	ktls_receive_tls_error(sockets[0], EMSGSIZE);
1728 
1729 	free(outbuf);
1730 	free(plaintext);
1731 
1732 	close_sockets(sockets);
1733 }
1734 
1735 static void
1736 test_ktls_receive_bad_major(const atf_tc_t *tc, struct tls_enable *en,
1737     uint64_t seqno, size_t len)
1738 {
1739 	struct tls_record_layer *hdr;
1740 	char *plaintext, *outbuf;
1741 	size_t outbuf_cap, outbuf_len;
1742 	ssize_t rv;
1743 	int sockets[2];
1744 
1745 	ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2);
1746 
1747 	plaintext = alloc_buffer(len);
1748 	outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en);
1749 	outbuf = malloc(outbuf_cap);
1750 
1751 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
1752 
1753 	ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en,
1754 	    sizeof(*en)) == 0);
1755 	check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE);
1756 
1757 	fd_set_blocking(sockets[0]);
1758 	fd_set_blocking(sockets[1]);
1759 
1760 	outbuf_len = encrypt_tls_record(tc, en, TLS_RLTYPE_APP, seqno,
1761 	    plaintext, len, outbuf, outbuf_cap, 0);
1762 
1763 	hdr = (void *)outbuf;
1764 	hdr->tls_vmajor++;
1765 
1766 	rv = write(sockets[1], outbuf, outbuf_len);
1767 	ATF_REQUIRE_INTEQ((ssize_t)outbuf_len, rv);
1768 
1769 	ktls_receive_tls_error(sockets[0], EINVAL);
1770 
1771 	free(outbuf);
1772 	free(plaintext);
1773 
1774 	close_sockets(sockets);
1775 }
1776 
1777 static void
1778 test_ktls_receive_bad_minor(const atf_tc_t *tc, struct tls_enable *en,
1779     uint64_t seqno, size_t len)
1780 {
1781 	struct tls_record_layer *hdr;
1782 	char *plaintext, *outbuf;
1783 	size_t outbuf_cap, outbuf_len;
1784 	ssize_t rv;
1785 	int sockets[2];
1786 
1787 	ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2);
1788 
1789 	plaintext = alloc_buffer(len);
1790 	outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en);
1791 	outbuf = malloc(outbuf_cap);
1792 
1793 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
1794 
1795 	ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en,
1796 	    sizeof(*en)) == 0);
1797 	check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE);
1798 
1799 	fd_set_blocking(sockets[0]);
1800 	fd_set_blocking(sockets[1]);
1801 
1802 	outbuf_len = encrypt_tls_record(tc, en, TLS_RLTYPE_APP, seqno,
1803 	    plaintext, len, outbuf, outbuf_cap, 0);
1804 
1805 	hdr = (void *)outbuf;
1806 	hdr->tls_vminor++;
1807 
1808 	rv = write(sockets[1], outbuf, outbuf_len);
1809 	ATF_REQUIRE_INTEQ((ssize_t)outbuf_len, rv);
1810 
1811 	ktls_receive_tls_error(sockets[0], EINVAL);
1812 
1813 	free(outbuf);
1814 	free(plaintext);
1815 
1816 	close_sockets(sockets);
1817 }
1818 
1819 static void
1820 test_ktls_receive_bad_type(const atf_tc_t *tc, struct tls_enable *en,
1821     uint64_t seqno, size_t len)
1822 {
1823 	struct tls_record_layer *hdr;
1824 	char *plaintext, *outbuf;
1825 	size_t outbuf_cap, outbuf_len;
1826 	ssize_t rv;
1827 	int sockets[2];
1828 
1829 	ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2);
1830 	ATF_REQUIRE_INTEQ(TLS_MINOR_VER_THREE, en->tls_vminor);
1831 
1832 	plaintext = alloc_buffer(len);
1833 	outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en);
1834 	outbuf = malloc(outbuf_cap);
1835 
1836 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
1837 
1838 	ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en,
1839 	    sizeof(*en)) == 0);
1840 	check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE);
1841 
1842 	fd_set_blocking(sockets[0]);
1843 	fd_set_blocking(sockets[1]);
1844 
1845 	outbuf_len = encrypt_tls_record(tc, en, 0x21 /* Alert */, seqno,
1846 	    plaintext, len, outbuf, outbuf_cap, 0);
1847 
1848 	hdr = (void *)outbuf;
1849 	hdr->tls_type = TLS_RLTYPE_APP + 1;
1850 
1851 	rv = write(sockets[1], outbuf, outbuf_len);
1852 	ATF_REQUIRE_INTEQ((ssize_t)outbuf_len, rv);
1853 
1854 	ktls_receive_tls_error(sockets[0], EINVAL);
1855 
1856 	free(outbuf);
1857 	free(plaintext);
1858 
1859 	close_sockets(sockets);
1860 }
1861 
1862 static void
1863 test_ktls_receive_bad_size(const atf_tc_t *tc, struct tls_enable *en,
1864     uint64_t seqno, size_t len)
1865 {
1866 	struct tls_record_layer *hdr;
1867 	char *outbuf;
1868 	size_t outbuf_len;
1869 	ssize_t rv;
1870 	int sockets[2];
1871 
1872 	outbuf_len = sizeof(*hdr) + len;
1873 	outbuf = calloc(1, outbuf_len);
1874 
1875 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
1876 
1877 	ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en,
1878 	    sizeof(*en)) == 0);
1879 	check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE);
1880 
1881 	fd_set_blocking(sockets[0]);
1882 	fd_set_blocking(sockets[1]);
1883 
1884 	hdr = (void *)outbuf;
1885 	hdr->tls_vmajor = en->tls_vmajor;
1886 	if (en->tls_vminor == TLS_MINOR_VER_THREE)
1887 		hdr->tls_vminor = TLS_MINOR_VER_TWO;
1888 	else
1889 		hdr->tls_vminor = en->tls_vminor;
1890 	hdr->tls_type = TLS_RLTYPE_APP;
1891 	hdr->tls_length = htons(len);
1892 
1893 	rv = write(sockets[1], outbuf, outbuf_len);
1894 	ATF_REQUIRE_INTEQ((ssize_t)outbuf_len, rv);
1895 
1896 	ATF_REQUIRE(shutdown(sockets[1], SHUT_WR) == 0);
1897 
1898 	ktls_receive_tls_error(sockets[0], EMSGSIZE);
1899 
1900 	free(outbuf);
1901 
1902 	close_sockets(sockets);
1903 }
1904 
1905 #define	TLS_10_TESTS(M)							\
1906 	M(aes128_cbc_1_0_sha1, CRYPTO_AES_CBC, 128 / 8,			\
1907 	    CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ZERO)			\
1908 	M(aes256_cbc_1_0_sha1, CRYPTO_AES_CBC, 256 / 8,			\
1909 	    CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ZERO)
1910 
1911 #define	TLS_13_TESTS(M)							\
1912 	M(aes128_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0,		\
1913 	    TLS_MINOR_VER_THREE)					\
1914 	M(aes256_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0,		\
1915 	    TLS_MINOR_VER_THREE)					\
1916 	M(chacha20_poly1305_1_3, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0,	\
1917 	    TLS_MINOR_VER_THREE)
1918 
1919 #define	AES_CBC_NONZERO_TESTS(M)					\
1920 	M(aes128_cbc_1_1_sha1, CRYPTO_AES_CBC, 128 / 8,			\
1921 	    CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ONE)			\
1922 	M(aes256_cbc_1_1_sha1, CRYPTO_AES_CBC, 256 / 8,			\
1923 	    CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ONE)			\
1924 	M(aes128_cbc_1_2_sha1, CRYPTO_AES_CBC, 128 / 8,			\
1925 	    CRYPTO_SHA1_HMAC, TLS_MINOR_VER_TWO)			\
1926 	M(aes256_cbc_1_2_sha1, CRYPTO_AES_CBC, 256 / 8,			\
1927 	    CRYPTO_SHA1_HMAC, TLS_MINOR_VER_TWO)			\
1928 	M(aes128_cbc_1_2_sha256, CRYPTO_AES_CBC, 128 / 8,		\
1929 	    CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_TWO)			\
1930 	M(aes256_cbc_1_2_sha256, CRYPTO_AES_CBC, 256 / 8,		\
1931 	    CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_TWO)			\
1932 	M(aes128_cbc_1_2_sha384, CRYPTO_AES_CBC, 128 / 8,		\
1933 	    CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_TWO)			\
1934 	M(aes256_cbc_1_2_sha384, CRYPTO_AES_CBC, 256 / 8,		\
1935 	    CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_TWO)			\
1936 
1937 #define	AES_CBC_TESTS(M)						\
1938 	TLS_10_TESTS(M)							\
1939 	AES_CBC_NONZERO_TESTS(M)
1940 
1941 #define AES_GCM_12_TESTS(M)						\
1942 	M(aes128_gcm_1_2, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0,		\
1943 	    TLS_MINOR_VER_TWO)						\
1944 	M(aes256_gcm_1_2, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0,		\
1945 	    TLS_MINOR_VER_TWO)
1946 
1947 #define AES_GCM_TESTS(M)						\
1948 	AES_GCM_12_TESTS(M)						\
1949 	M(aes128_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0,		\
1950 	    TLS_MINOR_VER_THREE)					\
1951 	M(aes256_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0,		\
1952 	    TLS_MINOR_VER_THREE)
1953 
1954 #define CHACHA20_TESTS(M)						\
1955 	M(chacha20_poly1305_1_2, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0,	\
1956 	    TLS_MINOR_VER_TWO)						\
1957 	M(chacha20_poly1305_1_3, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0,	\
1958 	    TLS_MINOR_VER_THREE)
1959 
1960 #define GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
1961 	    auth_alg, minor, name, len)					\
1962 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_##name);		\
1963 ATF_TC_BODY(ktls_transmit_##cipher_name##_##name, tc)			\
1964 {									\
1965 	struct tls_enable en;						\
1966 	uint64_t seqno;							\
1967 									\
1968 	ATF_REQUIRE_KTLS();						\
1969 	seqno = random();						\
1970 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
1971 	    seqno, &en);						\
1972 	test_ktls_transmit_app_data(tc, &en, seqno, len);		\
1973 	free_tls_enable(&en);						\
1974 }
1975 
1976 #define ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
1977 	    auth_alg, minor, name)					\
1978 	ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_##name);
1979 
1980 #define GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1981 	    auth_alg, minor, name, type, len)				\
1982 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_##name);		\
1983 ATF_TC_BODY(ktls_transmit_##cipher_name##_##name, tc)			\
1984 {									\
1985 	struct tls_enable en;						\
1986 	uint64_t seqno;							\
1987 									\
1988 	ATF_REQUIRE_KTLS();						\
1989 	seqno = random();						\
1990 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
1991 	    seqno, &en);						\
1992 	test_ktls_transmit_control(tc, &en, seqno, type, len);		\
1993 	free_tls_enable(&en);						\
1994 }
1995 
1996 #define ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1997 	    auth_alg, minor, name)					\
1998 	ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_##name);
1999 
2000 #define GEN_TRANSMIT_EMPTY_FRAGMENT_TEST(cipher_name, cipher_alg,	\
2001 	    key_size, auth_alg, minor)					\
2002 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_empty_fragment);	\
2003 ATF_TC_BODY(ktls_transmit_##cipher_name##_empty_fragment, tc)		\
2004 {									\
2005 	struct tls_enable en;						\
2006 	uint64_t seqno;							\
2007 									\
2008 	ATF_REQUIRE_KTLS();						\
2009 	seqno = random();						\
2010 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2011 	    seqno, &en);						\
2012 	test_ktls_transmit_empty_fragment(tc, &en, seqno);		\
2013 	free_tls_enable(&en);						\
2014 }
2015 
2016 #define ADD_TRANSMIT_EMPTY_FRAGMENT_TEST(cipher_name, cipher_alg,	\
2017 	    key_size, auth_alg, minor)					\
2018 	ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_empty_fragment);
2019 
2020 #define GEN_TRANSMIT_TESTS(cipher_name, cipher_alg, key_size, auth_alg,	\
2021 	    minor)							\
2022 	GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2023 	    auth_alg, minor, short, 64)					\
2024 	GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2025 	    auth_alg, minor, long, 64 * 1024)				\
2026 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2027 	    auth_alg, minor, control, 0x21 /* Alert */, 32)
2028 
2029 #define ADD_TRANSMIT_TESTS(cipher_name, cipher_alg, key_size, auth_alg,	\
2030 	    minor)							\
2031 	ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2032 	    auth_alg, minor, short)					\
2033 	ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2034 	    auth_alg, minor, long)					\
2035 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2036 	    auth_alg, minor, control)
2037 
2038 /*
2039  * For each supported cipher suite, run three transmit tests:
2040  *
2041  * - a short test which sends 64 bytes of application data (likely as
2042  *   a single TLS record)
2043  *
2044  * - a long test which sends 64KB of application data (split across
2045  *   multiple TLS records)
2046  *
2047  * - a control test which sends a single record with a specific
2048  *   content type via sendmsg()
2049  */
2050 AES_CBC_TESTS(GEN_TRANSMIT_TESTS);
2051 AES_GCM_TESTS(GEN_TRANSMIT_TESTS);
2052 CHACHA20_TESTS(GEN_TRANSMIT_TESTS);
2053 
2054 #define GEN_TRANSMIT_PADDING_TESTS(cipher_name, cipher_alg, key_size,	\
2055 	    auth_alg, minor)						\
2056 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2057 	    auth_alg, minor, padding_1, 0x21 /* Alert */, 1)		\
2058 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2059 	    auth_alg, minor, padding_2, 0x21 /* Alert */, 2)		\
2060 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2061 	    auth_alg, minor, padding_3, 0x21 /* Alert */, 3)		\
2062 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2063 	    auth_alg, minor, padding_4, 0x21 /* Alert */, 4)		\
2064 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2065 	    auth_alg, minor, padding_5, 0x21 /* Alert */, 5)		\
2066 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2067 	    auth_alg, minor, padding_6, 0x21 /* Alert */, 6)		\
2068 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2069 	    auth_alg, minor, padding_7, 0x21 /* Alert */, 7)		\
2070 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2071 	    auth_alg, minor, padding_8, 0x21 /* Alert */, 8)		\
2072 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2073 	    auth_alg, minor, padding_9, 0x21 /* Alert */, 9)		\
2074 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2075 	    auth_alg, minor, padding_10, 0x21 /* Alert */, 10)		\
2076 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2077 	    auth_alg, minor, padding_11, 0x21 /* Alert */, 11)		\
2078 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2079 	    auth_alg, minor, padding_12, 0x21 /* Alert */, 12)		\
2080 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2081 	    auth_alg, minor, padding_13, 0x21 /* Alert */, 13)		\
2082 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2083 	    auth_alg, minor, padding_14, 0x21 /* Alert */, 14)		\
2084 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2085 	    auth_alg, minor, padding_15, 0x21 /* Alert */, 15)		\
2086 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2087 	    auth_alg, minor, padding_16, 0x21 /* Alert */, 16)
2088 
2089 #define ADD_TRANSMIT_PADDING_TESTS(cipher_name, cipher_alg, key_size,	\
2090 	    auth_alg, minor)						\
2091 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2092 	    auth_alg, minor, padding_1)					\
2093 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2094 	    auth_alg, minor, padding_2)					\
2095 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2096 	    auth_alg, minor, padding_3)					\
2097 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2098 	    auth_alg, minor, padding_4)					\
2099 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2100 	    auth_alg, minor, padding_5)					\
2101 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2102 	    auth_alg, minor, padding_6)					\
2103 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2104 	    auth_alg, minor, padding_7)					\
2105 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2106 	    auth_alg, minor, padding_8)					\
2107 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2108 	    auth_alg, minor, padding_9)					\
2109 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2110 	    auth_alg, minor, padding_10)				\
2111 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2112 	    auth_alg, minor, padding_11)				\
2113 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2114 	    auth_alg, minor, padding_12)				\
2115 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2116 	    auth_alg, minor, padding_13)				\
2117 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2118 	    auth_alg, minor, padding_14)				\
2119 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2120 	    auth_alg, minor, padding_15)				\
2121 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2122 	    auth_alg, minor, padding_16)
2123 
2124 /*
2125  * For AES-CBC MTE cipher suites using padding, add tests of messages
2126  * with each possible padding size.  Note that the padding_<N> tests
2127  * do not necessarily test <N> bytes of padding as the padding is a
2128  * function of the cipher suite's MAC length.  However, cycling
2129  * through all of the payload sizes from 1 to 16 should exercise all
2130  * of the possible padding lengths for each suite.
2131  */
2132 AES_CBC_TESTS(GEN_TRANSMIT_PADDING_TESTS);
2133 
2134 /*
2135  * Test "empty fragments" which are TLS records with no payload that
2136  * OpenSSL can send for TLS 1.0 connections.
2137  */
2138 AES_CBC_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST);
2139 AES_GCM_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST);
2140 CHACHA20_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST);
2141 
2142 static void
2143 test_ktls_invalid_transmit_cipher_suite(const atf_tc_t *tc,
2144     struct tls_enable *en)
2145 {
2146 	int sockets[2];
2147 
2148 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
2149 
2150 	ATF_REQUIRE_ERRNO(EINVAL, setsockopt(sockets[1], IPPROTO_TCP,
2151 	    TCP_TXTLS_ENABLE, en, sizeof(*en)) == -1);
2152 
2153 	close_sockets(sockets);
2154 }
2155 
2156 #define GEN_INVALID_TRANSMIT_TEST(name, cipher_alg, key_size, auth_alg,	\
2157 	    minor)							\
2158 ATF_TC_WITHOUT_HEAD(ktls_transmit_invalid_##name);			\
2159 ATF_TC_BODY(ktls_transmit_invalid_##name, tc)				\
2160 {									\
2161 	struct tls_enable en;						\
2162 	uint64_t seqno;							\
2163 									\
2164 	ATF_REQUIRE_KTLS();						\
2165 	seqno = random();						\
2166 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2167 	    seqno, &en);						\
2168 	test_ktls_invalid_transmit_cipher_suite(tc, &en);		\
2169 	free_tls_enable(&en);						\
2170 }
2171 
2172 #define ADD_INVALID_TRANSMIT_TEST(name, cipher_alg, key_size, auth_alg, \
2173 	    minor)							\
2174 	ATF_TP_ADD_TC(tp, ktls_transmit_invalid_##name);
2175 
2176 #define	INVALID_CIPHER_SUITES(M)					\
2177 	M(aes128_cbc_1_0_sha256, CRYPTO_AES_CBC, 128 / 8,		\
2178 	    CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_ZERO)			\
2179 	M(aes128_cbc_1_0_sha384, CRYPTO_AES_CBC, 128 / 8,		\
2180 	    CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_ZERO)			\
2181 	M(aes128_gcm_1_0, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0,		\
2182 	    TLS_MINOR_VER_ZERO)						\
2183 	M(chacha20_poly1305_1_0, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0,	\
2184 	    TLS_MINOR_VER_ZERO)						\
2185 	M(aes128_cbc_1_1_sha256, CRYPTO_AES_CBC, 128 / 8,		\
2186 	    CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_ONE)			\
2187 	M(aes128_cbc_1_1_sha384, CRYPTO_AES_CBC, 128 / 8,		\
2188 	    CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_ONE)			\
2189 	M(aes128_gcm_1_1, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0,		\
2190 	    TLS_MINOR_VER_ONE)						\
2191 	M(chacha20_poly1305_1_1, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0,	\
2192 	    TLS_MINOR_VER_ONE)						\
2193 	M(aes128_cbc_1_3_sha1, CRYPTO_AES_CBC, 128 / 8,			\
2194 	    CRYPTO_SHA1_HMAC, TLS_MINOR_VER_THREE)			\
2195 	M(aes128_cbc_1_3_sha256, CRYPTO_AES_CBC, 128 / 8,		\
2196 	    CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_THREE)			\
2197 	M(aes128_cbc_1_3_sha384, CRYPTO_AES_CBC, 128 / 8,		\
2198 	    CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_THREE)
2199 
2200 /*
2201  * Ensure that invalid cipher suites are rejected for transmit.
2202  */
2203 INVALID_CIPHER_SUITES(GEN_INVALID_TRANSMIT_TEST);
2204 
2205 #define GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2206 	    auth_alg, minor, name, len, padding)			\
2207 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_##name);		\
2208 ATF_TC_BODY(ktls_receive_##cipher_name##_##name, tc)			\
2209 {									\
2210 	struct tls_enable en;						\
2211 	uint64_t seqno;							\
2212 									\
2213 	ATF_REQUIRE_KTLS();						\
2214 	seqno = random();						\
2215 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2216 	    seqno, &en);						\
2217 	test_ktls_receive_app_data(tc, &en, seqno, len, padding);	\
2218 	free_tls_enable(&en);						\
2219 }
2220 
2221 #define ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2222 	    auth_alg, minor, name)					\
2223 	ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_##name);
2224 
2225 #define GEN_RECEIVE_BAD_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2226 	    auth_alg, minor, len)					\
2227 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_data);		\
2228 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_data, tc)			\
2229 {									\
2230 	struct tls_enable en;						\
2231 	uint64_t seqno;							\
2232 									\
2233 	ATF_REQUIRE_KTLS();						\
2234 	seqno = random();						\
2235 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2236 	    seqno, &en);						\
2237 	test_ktls_receive_corrupted_data(tc, &en, seqno, len);		\
2238 	free_tls_enable(&en);						\
2239 }
2240 
2241 #define ADD_RECEIVE_BAD_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2242 	    auth_alg, minor)						\
2243 	ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_data);
2244 
2245 #define GEN_RECEIVE_BAD_MAC_TEST(cipher_name, cipher_alg, key_size,	\
2246 	    auth_alg, minor, len)					\
2247 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_mac);		\
2248 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_mac, tc)			\
2249 {									\
2250 	struct tls_enable en;						\
2251 	uint64_t seqno;							\
2252 									\
2253 	ATF_REQUIRE_KTLS();						\
2254 	seqno = random();						\
2255 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2256 	    seqno, &en);						\
2257 	test_ktls_receive_corrupted_mac(tc, &en, seqno, len);		\
2258 	free_tls_enable(&en);						\
2259 }
2260 
2261 #define ADD_RECEIVE_BAD_MAC_TEST(cipher_name, cipher_alg, key_size,	\
2262 	    auth_alg, minor)						\
2263 	ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_mac);
2264 
2265 #define GEN_RECEIVE_TRUNCATED_TEST(cipher_name, cipher_alg, key_size,	\
2266 	    auth_alg, minor, len)					\
2267 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_truncated_record);	\
2268 ATF_TC_BODY(ktls_receive_##cipher_name##_truncated_record, tc)		\
2269 {									\
2270 	struct tls_enable en;						\
2271 	uint64_t seqno;							\
2272 									\
2273 	ATF_REQUIRE_KTLS();						\
2274 	seqno = random();						\
2275 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2276 	    seqno, &en);						\
2277 	test_ktls_receive_truncated_record(tc, &en, seqno, len);	\
2278 	free_tls_enable(&en);						\
2279 }
2280 
2281 #define ADD_RECEIVE_TRUNCATED_TEST(cipher_name, cipher_alg, key_size,	\
2282 	    auth_alg, minor)						\
2283 	ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_truncated_record);
2284 
2285 #define GEN_RECEIVE_BAD_MAJOR_TEST(cipher_name, cipher_alg, key_size,	\
2286 	    auth_alg, minor, len)					\
2287 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_major);		\
2288 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_major, tc)			\
2289 {									\
2290 	struct tls_enable en;						\
2291 	uint64_t seqno;							\
2292 									\
2293 	ATF_REQUIRE_KTLS();						\
2294 	seqno = random();						\
2295 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2296 	    seqno, &en);						\
2297 	test_ktls_receive_bad_major(tc, &en, seqno, len);		\
2298 	free_tls_enable(&en);						\
2299 }
2300 
2301 #define ADD_RECEIVE_BAD_MAJOR_TEST(cipher_name, cipher_alg, key_size,	\
2302 	    auth_alg, minor)						\
2303 	ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_major);
2304 
2305 #define GEN_RECEIVE_BAD_MINOR_TEST(cipher_name, cipher_alg, key_size,	\
2306 	    auth_alg, minor, len)					\
2307 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_minor);		\
2308 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_minor, tc)			\
2309 {									\
2310 	struct tls_enable en;						\
2311 	uint64_t seqno;							\
2312 									\
2313 	ATF_REQUIRE_KTLS();						\
2314 	seqno = random();						\
2315 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2316 	    seqno, &en);						\
2317 	test_ktls_receive_bad_minor(tc, &en, seqno, len);		\
2318 	free_tls_enable(&en);						\
2319 }
2320 
2321 #define ADD_RECEIVE_BAD_MINOR_TEST(cipher_name, cipher_alg, key_size,	\
2322 	    auth_alg, minor)						\
2323 	ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_minor);
2324 
2325 #define GEN_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size,	\
2326 	    auth_alg, minor, name, len)					\
2327 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_##name);		\
2328 ATF_TC_BODY(ktls_receive_##cipher_name##_##name, tc)			\
2329 {									\
2330 	struct tls_enable en;						\
2331 	uint64_t seqno;							\
2332 									\
2333 	ATF_REQUIRE_KTLS();						\
2334 	seqno = random();						\
2335 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2336 	    seqno, &en);						\
2337 	test_ktls_receive_bad_size(tc, &en, seqno, (len));		\
2338 	free_tls_enable(&en);						\
2339 }
2340 
2341 #define ADD_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size,	\
2342 	    auth_alg, minor, name)					\
2343 	ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_##name);
2344 
2345 #define GEN_RECEIVE_TESTS(cipher_name, cipher_alg, key_size, auth_alg,	\
2346 	    minor)							\
2347 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2348 	    auth_alg, minor, short, 64, 0)				\
2349 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2350 	    auth_alg, minor, long, 64 * 1024, 0)			\
2351 	GEN_RECEIVE_BAD_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2352 	    auth_alg, minor, 64)					\
2353 	GEN_RECEIVE_BAD_MAC_TEST(cipher_name, cipher_alg, key_size,	\
2354 	    auth_alg, minor, 64)					\
2355 	GEN_RECEIVE_TRUNCATED_TEST(cipher_name, cipher_alg, key_size,	\
2356 	    auth_alg, minor, 64)					\
2357 	GEN_RECEIVE_BAD_MAJOR_TEST(cipher_name, cipher_alg, key_size,	\
2358 	    auth_alg, minor, 64)					\
2359 	GEN_RECEIVE_BAD_MINOR_TEST(cipher_name, cipher_alg, key_size,	\
2360 	    auth_alg, minor, 64)					\
2361 	GEN_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size,	\
2362 	    auth_alg, minor, small_record,				\
2363 	    tls_minimum_record_payload(&en) - 1)			\
2364 	GEN_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size,	\
2365 	    auth_alg, minor, oversized_record,				\
2366 	    TLS_MAX_MSG_SIZE_V10_2 * 2)
2367 
2368 #define ADD_RECEIVE_TESTS(cipher_name, cipher_alg, key_size, auth_alg,	\
2369 	    minor)							\
2370 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2371 	    auth_alg, minor, short)					\
2372 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2373 	    auth_alg, minor, long)					\
2374 	ADD_RECEIVE_BAD_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2375 	    auth_alg, minor)						\
2376 	ADD_RECEIVE_BAD_MAC_TEST(cipher_name, cipher_alg, key_size,	\
2377 	    auth_alg, minor)						\
2378 	ADD_RECEIVE_TRUNCATED_TEST(cipher_name, cipher_alg, key_size,	\
2379 	    auth_alg, minor)						\
2380 	ADD_RECEIVE_BAD_MAJOR_TEST(cipher_name, cipher_alg, key_size,	\
2381 	    auth_alg, minor)						\
2382 	ADD_RECEIVE_BAD_MINOR_TEST(cipher_name, cipher_alg, key_size,	\
2383 	    auth_alg, minor)						\
2384 	ADD_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size,	\
2385 	    auth_alg, minor, small_record)				\
2386 	ADD_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size,	\
2387 	    auth_alg, minor, oversized_record)
2388 
2389 /*
2390  * For each supported cipher suite, run several receive tests:
2391  *
2392  * - a short test which sends 64 bytes of application data (likely as
2393  *   a single TLS record)
2394  *
2395  * - a long test which sends 64KB of application data (split across
2396  *   multiple TLS records)
2397  *
2398  * - a test with corrupted payload data in a single TLS record
2399  *
2400  * - a test with a corrupted MAC in a single TLS record
2401  *
2402  * - a test with a truncated TLS record
2403  *
2404  * - tests with invalid TLS major and minor versions
2405  *
2406  * - a tests with a record whose is one less than the smallest valid
2407  *   size
2408  *
2409  * - a test with an oversized TLS record
2410  */
2411 AES_CBC_NONZERO_TESTS(GEN_RECEIVE_TESTS);
2412 AES_GCM_TESTS(GEN_RECEIVE_TESTS);
2413 CHACHA20_TESTS(GEN_RECEIVE_TESTS);
2414 
2415 #define	GEN_RECEIVE_MTE_PADDING_TESTS(cipher_name, cipher_alg,		\
2416 	    key_size, auth_alg, minor)					\
2417 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2418 	    auth_alg, minor, padding_1, 1, 0)				\
2419 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2420 	    auth_alg, minor, padding_2, 2, 0)				\
2421 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2422 	    auth_alg, minor, padding_3, 3, 0)				\
2423 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2424 	    auth_alg, minor, padding_4, 4, 0)				\
2425 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2426 	    auth_alg, minor, padding_5, 5, 0)				\
2427 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2428 	    auth_alg, minor, padding_6, 6, 0)				\
2429 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2430 	    auth_alg, minor, padding_7, 7, 0)				\
2431 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2432 	    auth_alg, minor, padding_8, 8, 0)				\
2433 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2434 	    auth_alg, minor, padding_9, 9, 0)				\
2435 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2436 	    auth_alg, minor, padding_10, 10, 0)				\
2437 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2438 	    auth_alg, minor, padding_11, 11, 0)				\
2439 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2440 	    auth_alg, minor, padding_12, 12, 0)				\
2441 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2442 	    auth_alg, minor, padding_13, 13, 0)				\
2443 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2444 	    auth_alg, minor, padding_14, 14, 0)				\
2445 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2446 	    auth_alg, minor, padding_15, 15, 0)				\
2447 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2448 	    auth_alg, minor, padding_16, 16, 0)				\
2449 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2450 	    auth_alg, minor, padding_16_extra, 16, 16)			\
2451 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2452 	    auth_alg, minor, padding_32_extra, 16, 32)
2453 
2454 #define ADD_RECEIVE_MTE_PADDING_TESTS(cipher_name, cipher_alg,		\
2455 	    key_size, auth_alg, minor)					\
2456 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2457 	    auth_alg, minor, padding_1)					\
2458 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2459 	    auth_alg, minor, padding_2)					\
2460 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2461 	    auth_alg, minor, padding_3)					\
2462 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2463 	    auth_alg, minor, padding_4)					\
2464 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2465 	    auth_alg, minor, padding_5)					\
2466 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2467 	    auth_alg, minor, padding_6)					\
2468 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2469 	    auth_alg, minor, padding_7)					\
2470 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2471 	    auth_alg, minor, padding_8)					\
2472 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2473 	    auth_alg, minor, padding_9)					\
2474 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2475 	    auth_alg, minor, padding_10)				\
2476 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2477 	    auth_alg, minor, padding_11)				\
2478 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2479 	    auth_alg, minor, padding_12)				\
2480 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2481 	    auth_alg, minor, padding_13)				\
2482 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2483 	    auth_alg, minor, padding_14)				\
2484 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2485 	    auth_alg, minor, padding_15)				\
2486 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2487 	    auth_alg, minor, padding_16)				\
2488 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2489 	    auth_alg, minor, padding_16_extra)				\
2490 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2491 	    auth_alg, minor, padding_32_extra)
2492 
2493 #define GEN_RECEIVE_BAD_PADDING_TEST(cipher_name, cipher_alg, key_size,	\
2494 	    auth_alg, minor, len)					\
2495 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_padding);		\
2496 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_padding, tc)		\
2497 {									\
2498 	struct tls_enable en;						\
2499 	uint64_t seqno;							\
2500 									\
2501 	ATF_REQUIRE_KTLS();						\
2502 	seqno = random();						\
2503 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2504 	    seqno, &en);						\
2505 	test_ktls_receive_corrupted_padding(tc, &en, seqno, len);	\
2506 	free_tls_enable(&en);						\
2507 }
2508 
2509 #define ADD_RECEIVE_BAD_PADDING_TEST(cipher_name, cipher_alg, key_size,	\
2510 	    auth_alg, minor)						\
2511 	ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_padding);
2512 
2513 #define	GEN_RECEIVE_MTE_TESTS(cipher_name, cipher_alg, key_size,	\
2514 	    auth_alg, minor)						\
2515 	GEN_RECEIVE_MTE_PADDING_TESTS(cipher_name, cipher_alg,		\
2516 	    key_size, auth_alg, minor)					\
2517 	GEN_RECEIVE_BAD_PADDING_TEST(cipher_name, cipher_alg, key_size,	\
2518 	    auth_alg, minor, 64)					\
2519 	GEN_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size,	\
2520 	    auth_alg, minor, non_block_size,				\
2521 	    tls_minimum_record_payload(&en) + 1)
2522 
2523 #define	ADD_RECEIVE_MTE_TESTS(cipher_name, cipher_alg, key_size,	\
2524 	    auth_alg, minor)						\
2525 	ADD_RECEIVE_MTE_PADDING_TESTS(cipher_name, cipher_alg,		\
2526 	    key_size, auth_alg, minor)					\
2527 	ADD_RECEIVE_BAD_PADDING_TEST(cipher_name, cipher_alg, key_size,	\
2528 	    auth_alg, minor)						\
2529 	ADD_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size,	\
2530 	    auth_alg, minor, non_block_size)
2531 
2532 /*
2533  * For AES-CBC MTE cipher suites using padding, add tests of messages
2534  * with each possible padding size.  Note that the padding_<N> tests
2535  * do not necessarily test <N> bytes of padding as the padding is a
2536  * function of the cipher suite's MAC length.  However, cycling
2537  * through all of the payload sizes from 1 to 16 should exercise all
2538  * of the possible padding lengths for each suite.
2539  *
2540  * Two additional tests check for additional padding with an extra
2541  * 16 or 32 bytes beyond the normal padding.
2542  *
2543  * Another test checks for corrupted padding.
2544  *
2545  * Another test checks for a record whose payload is not a multiple of
2546  * the AES block size.
2547  */
2548 AES_CBC_NONZERO_TESTS(GEN_RECEIVE_MTE_TESTS);
2549 
2550 #define GEN_RECEIVE_BAD_IV_TEST(cipher_name, cipher_alg, key_size,	\
2551 	    auth_alg, minor)						\
2552 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_iv);		\
2553 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_iv, tc)			\
2554 {									\
2555 	struct tls_enable en;						\
2556 	uint64_t seqno;							\
2557 									\
2558 	ATF_REQUIRE_KTLS();						\
2559 	seqno = random();						\
2560 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2561 	    seqno, &en);						\
2562 	test_ktls_receive_corrupted_iv(tc, &en, seqno, 64);		\
2563 	free_tls_enable(&en);						\
2564 }
2565 
2566 #define ADD_RECEIVE_BAD_IV_TEST(cipher_name, cipher_alg, key_size,	\
2567 	    auth_alg, minor)						\
2568 	ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_iv);
2569 
2570 #define	GEN_RECEIVE_EXPLICIT_IV_TESTS(cipher_name, cipher_alg,		\
2571 	    key_size, auth_alg, minor)					\
2572 	GEN_RECEIVE_BAD_IV_TEST(cipher_name, cipher_alg, key_size,	\
2573 	    auth_alg, minor)						\
2574 	GEN_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size,	\
2575 	    auth_alg, minor, short_header,				\
2576 	    sizeof(struct tls_record_layer) + 1)
2577 
2578 #define	ADD_RECEIVE_EXPLICIT_IV_TESTS(cipher_name, cipher_alg,		\
2579 	    key_size, auth_alg, minor)					\
2580 	ADD_RECEIVE_BAD_IV_TEST(cipher_name, cipher_alg, key_size,	\
2581 	    auth_alg, minor)						\
2582 	ADD_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size,	\
2583 	    auth_alg, minor, short_header)
2584 
2585 /*
2586  * For cipher suites with an explicit IV, run a receive test where the
2587  * explicit IV has been corrupted.  Also run a receive test that sends
2588  * a short record without a complete IV.
2589  */
2590 AES_CBC_NONZERO_TESTS(GEN_RECEIVE_EXPLICIT_IV_TESTS);
2591 AES_GCM_12_TESTS(GEN_RECEIVE_EXPLICIT_IV_TESTS);
2592 
2593 #define GEN_RECEIVE_BAD_TYPE_TEST(cipher_name, cipher_alg, key_size,	\
2594 	    auth_alg, minor, len)					\
2595 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_type);		\
2596 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_type, tc)			\
2597 {									\
2598 	struct tls_enable en;						\
2599 	uint64_t seqno;							\
2600 									\
2601 	ATF_REQUIRE_KTLS();						\
2602 	seqno = random();						\
2603 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2604 	    seqno, &en);						\
2605 	test_ktls_receive_bad_type(tc, &en, seqno, len);		\
2606 	free_tls_enable(&en);						\
2607 }
2608 
2609 #define ADD_RECEIVE_BAD_TYPE_TEST(cipher_name, cipher_alg, key_size,	\
2610 	    auth_alg, minor)						\
2611 	ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_type);
2612 
2613 #define GEN_RECEIVE_TLS13_TESTS(cipher_name, cipher_alg, key_size,	\
2614 	    auth_alg, minor)						\
2615 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2616 	    auth_alg, minor, short_padded, 64, 16)			\
2617 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2618 	    auth_alg, minor, long_padded, 64 * 1024, 15)		\
2619 	GEN_RECEIVE_BAD_TYPE_TEST(cipher_name, cipher_alg, key_size,	\
2620 	    auth_alg, minor, 64)
2621 
2622 #define ADD_RECEIVE_TLS13_TESTS(cipher_name, cipher_alg, key_size,	\
2623 	    auth_alg, minor)						\
2624 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2625 	    auth_alg, minor, short_padded)				\
2626 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2627 	    auth_alg, minor, long_padded)				\
2628 	ADD_RECEIVE_BAD_TYPE_TEST(cipher_name, cipher_alg, key_size,	\
2629 	    auth_alg, minor)
2630 
2631 /*
2632  * For TLS 1.3 cipher suites, run two additional receive tests which
2633  * use add padding to each record.  Also run a test that uses an
2634  * invalid "outer" record type.
2635  */
2636 TLS_13_TESTS(GEN_RECEIVE_TLS13_TESTS);
2637 
2638 static void
2639 test_ktls_invalid_receive_cipher_suite(const atf_tc_t *tc,
2640     struct tls_enable *en)
2641 {
2642 	int sockets[2];
2643 
2644 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
2645 
2646 	ATF_REQUIRE_ERRNO(EINVAL, setsockopt(sockets[1], IPPROTO_TCP,
2647 	    TCP_RXTLS_ENABLE, en, sizeof(*en)) == -1);
2648 
2649 	close_sockets(sockets);
2650 }
2651 
2652 #define GEN_INVALID_RECEIVE_TEST(name, cipher_alg, key_size, auth_alg,	\
2653 	    minor)							\
2654 ATF_TC_WITHOUT_HEAD(ktls_receive_invalid_##name);			\
2655 ATF_TC_BODY(ktls_receive_invalid_##name, tc)				\
2656 {									\
2657 	struct tls_enable en;						\
2658 	uint64_t seqno;							\
2659 									\
2660 	ATF_REQUIRE_KTLS();						\
2661 	seqno = random();						\
2662 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2663 	    seqno, &en);						\
2664 	test_ktls_invalid_receive_cipher_suite(tc, &en);		\
2665 	free_tls_enable(&en);						\
2666 }
2667 
2668 #define ADD_INVALID_RECEIVE_TEST(name, cipher_alg, key_size, auth_alg,	\
2669 	    minor)							\
2670 	ATF_TP_ADD_TC(tp, ktls_receive_invalid_##name);
2671 
2672 /*
2673  * Ensure that invalid cipher suites are rejected for receive.
2674  */
2675 INVALID_CIPHER_SUITES(GEN_INVALID_RECEIVE_TEST);
2676 
2677 static void
2678 test_ktls_unsupported_receive_cipher_suite(const atf_tc_t *tc,
2679     struct tls_enable *en)
2680 {
2681 	int sockets[2];
2682 
2683 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
2684 
2685 	ATF_REQUIRE_ERRNO(EPROTONOSUPPORT, setsockopt(sockets[1], IPPROTO_TCP,
2686 	    TCP_RXTLS_ENABLE, en, sizeof(*en)) == -1);
2687 
2688 	close_sockets(sockets);
2689 }
2690 
2691 #define GEN_UNSUPPORTED_RECEIVE_TEST(name, cipher_alg, key_size,	\
2692 	    auth_alg, minor)						\
2693 ATF_TC_WITHOUT_HEAD(ktls_receive_unsupported_##name);			\
2694 ATF_TC_BODY(ktls_receive_unsupported_##name, tc)			\
2695 {									\
2696 	struct tls_enable en;						\
2697 	uint64_t seqno;							\
2698 									\
2699 	ATF_REQUIRE_KTLS();						\
2700 	seqno = random();						\
2701 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2702 	    seqno, &en);						\
2703 	test_ktls_unsupported_receive_cipher_suite(tc, &en);		\
2704 	free_tls_enable(&en);						\
2705 }
2706 
2707 #define ADD_UNSUPPORTED_RECEIVE_TEST(name, cipher_alg, key_size,	\
2708 	    auth_alg, minor)						\
2709 	ATF_TP_ADD_TC(tp, ktls_receive_unsupported_##name);
2710 
2711 /*
2712  * Ensure that valid cipher suites not supported for receive are
2713  * rejected.
2714  */
2715 TLS_10_TESTS(GEN_UNSUPPORTED_RECEIVE_TEST);
2716 
2717 /*
2718  * Try to perform an invalid sendto(2) on a TXTLS-enabled socket, to exercise
2719  * KTLS error handling in the socket layer.
2720  */
2721 ATF_TC_WITHOUT_HEAD(ktls_sendto_baddst);
2722 ATF_TC_BODY(ktls_sendto_baddst, tc)
2723 {
2724 	char buf[32];
2725 	struct sockaddr_in dst;
2726 	struct tls_enable en;
2727 	ssize_t n;
2728 	int s;
2729 
2730 	ATF_REQUIRE_KTLS();
2731 
2732 	s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
2733 	ATF_REQUIRE(s >= 0);
2734 
2735 	build_tls_enable(tc, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0,
2736 	    TLS_MINOR_VER_THREE, (uint64_t)random(), &en);
2737 
2738 	ATF_REQUIRE(setsockopt(s, IPPROTO_TCP, TCP_TXTLS_ENABLE, &en,
2739 	    sizeof(en)) == 0);
2740 
2741 	memset(&dst, 0, sizeof(dst));
2742 	dst.sin_family = AF_INET;
2743 	dst.sin_len = sizeof(dst);
2744 	dst.sin_addr.s_addr = htonl(INADDR_BROADCAST);
2745 	dst.sin_port = htons(12345);
2746 
2747 	memset(buf, 0, sizeof(buf));
2748 	n = sendto(s, buf, sizeof(buf), 0, (struct sockaddr *)&dst,
2749 	    sizeof(dst));
2750 
2751 	/* Can't transmit to the broadcast address over TCP. */
2752 	ATF_REQUIRE_ERRNO(EACCES, n == -1);
2753 	ATF_REQUIRE(close(s) == 0);
2754 }
2755 
2756 ATF_TP_ADD_TCS(tp)
2757 {
2758 	/* Transmit tests */
2759 	AES_CBC_TESTS(ADD_TRANSMIT_TESTS);
2760 	AES_GCM_TESTS(ADD_TRANSMIT_TESTS);
2761 	CHACHA20_TESTS(ADD_TRANSMIT_TESTS);
2762 	AES_CBC_TESTS(ADD_TRANSMIT_PADDING_TESTS);
2763 	AES_CBC_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST);
2764 	AES_GCM_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST);
2765 	CHACHA20_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST);
2766 	INVALID_CIPHER_SUITES(ADD_INVALID_TRANSMIT_TEST);
2767 
2768 	/* Receive tests */
2769 	TLS_10_TESTS(ADD_UNSUPPORTED_RECEIVE_TEST);
2770 	AES_CBC_NONZERO_TESTS(ADD_RECEIVE_TESTS);
2771 	AES_GCM_TESTS(ADD_RECEIVE_TESTS);
2772 	CHACHA20_TESTS(ADD_RECEIVE_TESTS);
2773 	AES_CBC_NONZERO_TESTS(ADD_RECEIVE_MTE_TESTS);
2774 	AES_CBC_NONZERO_TESTS(ADD_RECEIVE_EXPLICIT_IV_TESTS);
2775 	AES_GCM_12_TESTS(ADD_RECEIVE_EXPLICIT_IV_TESTS);
2776 	TLS_13_TESTS(ADD_RECEIVE_TLS13_TESTS);
2777 	INVALID_CIPHER_SUITES(ADD_INVALID_RECEIVE_TEST);
2778 
2779 	/* Miscellaneous */
2780 	ATF_TP_ADD_TC(tp, ktls_sendto_baddst);
2781 
2782 	return (atf_no_error());
2783 }
2784