xref: /freebsd/tests/sys/kern/ktls_test.c (revision 70bb22868d534435fd8d84173f264b48acacd501)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Netflix Inc.
5  * Written by: John Baldwin <jhb@FreeBSD.org>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/endian.h>
31 #include <sys/event.h>
32 #include <sys/ktls.h>
33 #include <sys/socket.h>
34 #include <sys/sysctl.h>
35 #include <netinet/in.h>
36 #include <netinet/tcp.h>
37 #include <crypto/cryptodev.h>
38 #include <assert.h>
39 #include <err.h>
40 #include <fcntl.h>
41 #include <libutil.h>
42 #include <netdb.h>
43 #include <poll.h>
44 #include <stdbool.h>
45 #include <stdlib.h>
46 #include <atf-c.h>
47 
48 #include <openssl/err.h>
49 #include <openssl/evp.h>
50 #include <openssl/hmac.h>
51 
52 static void
53 require_ktls(void)
54 {
55 	size_t len;
56 	bool enable;
57 
58 	len = sizeof(enable);
59 	if (sysctlbyname("kern.ipc.tls.enable", &enable, &len, NULL, 0) == -1) {
60 		if (errno == ENOENT)
61 			atf_tc_skip("kernel does not support TLS offload");
62 		atf_libc_error(errno, "Failed to read kern.ipc.tls.enable");
63 	}
64 
65 	if (!enable)
66 		atf_tc_skip("Kernel TLS is disabled");
67 }
68 
69 #define	ATF_REQUIRE_KTLS()	require_ktls()
70 
71 static void
72 check_tls_mode(const atf_tc_t *tc, int s, int sockopt)
73 {
74 	if (atf_tc_get_config_var_as_bool_wd(tc, "ktls.require_ifnet", false)) {
75 		socklen_t len;
76 		int mode;
77 
78 		len = sizeof(mode);
79 		if (getsockopt(s, IPPROTO_TCP, sockopt, &mode, &len) == -1)
80 			atf_libc_error(errno, "Failed to fetch TLS mode");
81 
82 		if (mode != TCP_TLS_MODE_IFNET)
83 			atf_tc_skip("connection did not use ifnet TLS");
84 	}
85 
86 	if (atf_tc_get_config_var_as_bool_wd(tc, "ktls.require_toe", false)) {
87 		socklen_t len;
88 		int mode;
89 
90 		len = sizeof(mode);
91 		if (getsockopt(s, IPPROTO_TCP, sockopt, &mode, &len) == -1)
92 			atf_libc_error(errno, "Failed to fetch TLS mode");
93 
94 		if (mode != TCP_TLS_MODE_TOE)
95 			atf_tc_skip("connection did not use TOE TLS");
96 	}
97 }
98 
99 static void __printflike(2, 3)
100 debug(const atf_tc_t *tc, const char *fmt, ...)
101 {
102 	if (!atf_tc_get_config_var_as_bool_wd(tc, "ktls.debug", false))
103 		return;
104 
105 	va_list ap;
106 	va_start(ap, fmt);
107 	vprintf(fmt, ap);
108 	va_end(ap);
109 }
110 
111 static void
112 debug_hexdump(const atf_tc_t *tc, const void *buf, int length,
113     const char *label)
114 {
115 	if (!atf_tc_get_config_var_as_bool_wd(tc, "ktls.debug", false))
116 		return;
117 
118 	if (label != NULL)
119 		printf("%s:\n", label);
120 	hexdump(buf, length, NULL, 0);
121 }
122 
123 static char
124 rdigit(void)
125 {
126 	/* ASCII printable values between 0x20 and 0x7e */
127 	return (0x20 + random() % (0x7f - 0x20));
128 }
129 
130 static char *
131 alloc_buffer(size_t len)
132 {
133 	char *buf;
134 	size_t i;
135 
136 	if (len == 0)
137 		return (NULL);
138 	buf = malloc(len);
139 	for (i = 0; i < len; i++)
140 		buf[i] = rdigit();
141 	return (buf);
142 }
143 
144 static bool
145 socketpair_tcp(int sv[2])
146 {
147 	struct pollfd pfd;
148 	struct sockaddr_in sin;
149 	socklen_t len;
150 	int as, cs, ls;
151 
152 	ls = socket(PF_INET, SOCK_STREAM, 0);
153 	if (ls == -1) {
154 		warn("socket() for listen");
155 		return (false);
156 	}
157 
158 	memset(&sin, 0, sizeof(sin));
159 	sin.sin_len = sizeof(sin);
160 	sin.sin_family = AF_INET;
161 	sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
162 	if (bind(ls, (struct sockaddr *)&sin, sizeof(sin)) == -1) {
163 		warn("bind");
164 		close(ls);
165 		return (false);
166 	}
167 
168 	if (listen(ls, 1) == -1) {
169 		warn("listen");
170 		close(ls);
171 		return (false);
172 	}
173 
174 	len = sizeof(sin);
175 	if (getsockname(ls, (struct sockaddr *)&sin, &len) == -1) {
176 		warn("getsockname");
177 		close(ls);
178 		return (false);
179 	}
180 
181 	cs = socket(PF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0);
182 	if (cs == -1) {
183 		warn("socket() for connect");
184 		close(ls);
185 		return (false);
186 	}
187 
188 	if (connect(cs, (struct sockaddr *)&sin, sizeof(sin)) == -1) {
189 		if (errno != EINPROGRESS) {
190 			warn("connect");
191 			close(ls);
192 			close(cs);
193 			return (false);
194 		}
195 	}
196 
197 	as = accept4(ls, NULL, NULL, SOCK_NONBLOCK);
198 	if (as == -1) {
199 		warn("accept4");
200 		close(ls);
201 		close(cs);
202 		return (false);
203 	}
204 
205 	close(ls);
206 
207 	pfd.fd = cs;
208 	pfd.events = POLLOUT;
209 	pfd.revents = 0;
210 	ATF_REQUIRE(poll(&pfd, 1, INFTIM) == 1);
211 	ATF_REQUIRE(pfd.revents == POLLOUT);
212 
213 	sv[0] = cs;
214 	sv[1] = as;
215 	return (true);
216 }
217 
218 static bool
219 echo_socket(const atf_tc_t *tc, int sv[2])
220 {
221 	const char *cause, *host, *port;
222 	struct addrinfo hints, *ai, *tofree;
223 	int error, flags, s;
224 
225 	host = atf_tc_get_config_var(tc, "ktls.host");
226 	port = atf_tc_get_config_var_wd(tc, "ktls.port", "echo");
227 	memset(&hints, 0, sizeof(hints));
228 	hints.ai_family = AF_UNSPEC;
229 	hints.ai_socktype = SOCK_STREAM;
230 	hints.ai_protocol = IPPROTO_TCP;
231 	error = getaddrinfo(host, port, &hints, &tofree);
232 	if (error != 0) {
233 		warnx("getaddrinfo(%s:%s) failed: %s", host, port,
234 		    gai_strerror(error));
235 		return (false);
236 	}
237 
238 	cause = NULL;
239 	for (ai = tofree; ai != NULL; ai = ai->ai_next) {
240 		s = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol);
241 		if (s == -1) {
242 			cause = "socket";
243 			error = errno;
244 			continue;
245 		}
246 
247 		if (connect(s, ai->ai_addr, ai->ai_addrlen) == -1) {
248 			cause = "connect";
249 			error = errno;
250 			close(s);
251 			continue;
252 		}
253 
254 		freeaddrinfo(tofree);
255 
256 		ATF_REQUIRE((flags = fcntl(s, F_GETFL)) != -1);
257 		flags |= O_NONBLOCK;
258 		ATF_REQUIRE(fcntl(s, F_SETFL, flags) != -1);
259 
260 		sv[0] = s;
261 		sv[1] = s;
262 		return (true);
263 	}
264 
265 	warnc(error, "%s", cause);
266 	freeaddrinfo(tofree);
267 	return (false);
268 }
269 
270 static bool
271 open_sockets(const atf_tc_t *tc, int sv[2])
272 {
273 	if (atf_tc_has_config_var(tc, "ktls.host"))
274 		return (echo_socket(tc, sv));
275 	else
276 		return (socketpair_tcp(sv));
277 }
278 
279 static void
280 close_sockets(int sv[2])
281 {
282 	if (sv[0] != sv[1])
283 		ATF_REQUIRE(close(sv[1]) == 0);
284 	ATF_REQUIRE(close(sv[0]) == 0);
285 }
286 
287 static void
288 fd_set_blocking(int fd)
289 {
290 	int flags;
291 
292 	ATF_REQUIRE((flags = fcntl(fd, F_GETFL)) != -1);
293 	flags &= ~O_NONBLOCK;
294 	ATF_REQUIRE(fcntl(fd, F_SETFL, flags) != -1);
295 }
296 
297 static bool
298 cbc_crypt(const EVP_CIPHER *cipher, const char *key, const char *iv,
299     const char *input, char *output, size_t size, int enc)
300 {
301 	EVP_CIPHER_CTX *ctx;
302 	int outl, total;
303 
304 	ctx = EVP_CIPHER_CTX_new();
305 	if (ctx == NULL) {
306 		warnx("EVP_CIPHER_CTX_new failed: %s",
307 		    ERR_error_string(ERR_get_error(), NULL));
308 		return (false);
309 	}
310 	if (EVP_CipherInit_ex(ctx, cipher, NULL, (const u_char *)key,
311 	    (const u_char *)iv, enc) != 1) {
312 		warnx("EVP_CipherInit_ex failed: %s",
313 		    ERR_error_string(ERR_get_error(), NULL));
314 		EVP_CIPHER_CTX_free(ctx);
315 		return (false);
316 	}
317 	EVP_CIPHER_CTX_set_padding(ctx, 0);
318 	if (EVP_CipherUpdate(ctx, (u_char *)output, &outl,
319 	    (const u_char *)input, size) != 1) {
320 		warnx("EVP_CipherUpdate failed: %s",
321 		    ERR_error_string(ERR_get_error(), NULL));
322 		EVP_CIPHER_CTX_free(ctx);
323 		return (false);
324 	}
325 	total = outl;
326 	if (EVP_CipherFinal_ex(ctx, (u_char *)output + outl, &outl) != 1) {
327 		warnx("EVP_CipherFinal_ex failed: %s",
328 		    ERR_error_string(ERR_get_error(), NULL));
329 		EVP_CIPHER_CTX_free(ctx);
330 		return (false);
331 	}
332 	total += outl;
333 	if ((size_t)total != size) {
334 		warnx("decrypt size mismatch: %zu vs %d", size, total);
335 		EVP_CIPHER_CTX_free(ctx);
336 		return (false);
337 	}
338 	EVP_CIPHER_CTX_free(ctx);
339 	return (true);
340 }
341 
342 static bool
343 cbc_encrypt(const EVP_CIPHER *cipher, const char *key, const char *iv,
344     const char *input, char *output, size_t size)
345 {
346 	return (cbc_crypt(cipher, key, iv, input, output, size, 1));
347 }
348 
349 static bool
350 cbc_decrypt(const EVP_CIPHER *cipher, const char *key, const char *iv,
351     const char *input, char *output, size_t size)
352 {
353 	return (cbc_crypt(cipher, key, iv, input, output, size, 0));
354 }
355 
356 static bool
357 compute_hash(const EVP_MD *md, const void *key, size_t key_len, const void *aad,
358     size_t aad_len, const void *buffer, size_t len, void *digest,
359     u_int *digest_len)
360 {
361 	HMAC_CTX *ctx;
362 
363 	ctx = HMAC_CTX_new();
364 	if (ctx == NULL) {
365 		warnx("HMAC_CTX_new failed: %s",
366 		    ERR_error_string(ERR_get_error(), NULL));
367 		return (false);
368 	}
369 	if (HMAC_Init_ex(ctx, key, key_len, md, NULL) != 1) {
370 		warnx("HMAC_Init_ex failed: %s",
371 		    ERR_error_string(ERR_get_error(), NULL));
372 		HMAC_CTX_free(ctx);
373 		return (false);
374 	}
375 	if (HMAC_Update(ctx, aad, aad_len) != 1) {
376 		warnx("HMAC_Update (aad) failed: %s",
377 		    ERR_error_string(ERR_get_error(), NULL));
378 		HMAC_CTX_free(ctx);
379 		return (false);
380 	}
381 	if (HMAC_Update(ctx, buffer, len) != 1) {
382 		warnx("HMAC_Update (payload) failed: %s",
383 		    ERR_error_string(ERR_get_error(), NULL));
384 		HMAC_CTX_free(ctx);
385 		return (false);
386 	}
387 	if (HMAC_Final(ctx, digest, digest_len) != 1) {
388 		warnx("HMAC_Final failed: %s",
389 		    ERR_error_string(ERR_get_error(), NULL));
390 		HMAC_CTX_free(ctx);
391 		return (false);
392 	}
393 	HMAC_CTX_free(ctx);
394 	return (true);
395 }
396 
397 static bool
398 verify_hash(const EVP_MD *md, const void *key, size_t key_len, const void *aad,
399     size_t aad_len, const void *buffer, size_t len, const void *digest)
400 {
401 	unsigned char digest2[EVP_MAX_MD_SIZE];
402 	u_int digest_len;
403 
404 	if (!compute_hash(md, key, key_len, aad, aad_len, buffer, len, digest2,
405 	    &digest_len))
406 		return (false);
407 	if (memcmp(digest, digest2, digest_len) != 0) {
408 		warnx("HMAC mismatch");
409 		return (false);
410 	}
411 	return (true);
412 }
413 
414 static bool
415 aead_encrypt(const EVP_CIPHER *cipher, const char *key, const char *nonce,
416     const void *aad, size_t aad_len, const char *input, char *output,
417     size_t size, char *tag, size_t tag_len)
418 {
419 	EVP_CIPHER_CTX *ctx;
420 	int outl, total;
421 
422 	ctx = EVP_CIPHER_CTX_new();
423 	if (ctx == NULL) {
424 		warnx("EVP_CIPHER_CTX_new failed: %s",
425 		    ERR_error_string(ERR_get_error(), NULL));
426 		return (false);
427 	}
428 	if (EVP_EncryptInit_ex(ctx, cipher, NULL, (const u_char *)key,
429 	    (const u_char *)nonce) != 1) {
430 		warnx("EVP_EncryptInit_ex failed: %s",
431 		    ERR_error_string(ERR_get_error(), NULL));
432 		EVP_CIPHER_CTX_free(ctx);
433 		return (false);
434 	}
435 	EVP_CIPHER_CTX_set_padding(ctx, 0);
436 	if (aad != NULL) {
437 		if (EVP_EncryptUpdate(ctx, NULL, &outl, (const u_char *)aad,
438 		    aad_len) != 1) {
439 			warnx("EVP_EncryptUpdate for AAD failed: %s",
440 			    ERR_error_string(ERR_get_error(), NULL));
441 			EVP_CIPHER_CTX_free(ctx);
442 			return (false);
443 		}
444 	}
445 	if (EVP_EncryptUpdate(ctx, (u_char *)output, &outl,
446 	    (const u_char *)input, size) != 1) {
447 		warnx("EVP_EncryptUpdate failed: %s",
448 		    ERR_error_string(ERR_get_error(), NULL));
449 		EVP_CIPHER_CTX_free(ctx);
450 		return (false);
451 	}
452 	total = outl;
453 	if (EVP_EncryptFinal_ex(ctx, (u_char *)output + outl, &outl) != 1) {
454 		warnx("EVP_EncryptFinal_ex failed: %s",
455 		    ERR_error_string(ERR_get_error(), NULL));
456 		EVP_CIPHER_CTX_free(ctx);
457 		return (false);
458 	}
459 	total += outl;
460 	if ((size_t)total != size) {
461 		warnx("encrypt size mismatch: %zu vs %d", size, total);
462 		EVP_CIPHER_CTX_free(ctx);
463 		return (false);
464 	}
465 	if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, tag_len, tag) !=
466 	    1) {
467 		warnx("EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_GET_TAG) failed: %s",
468 		    ERR_error_string(ERR_get_error(), NULL));
469 		EVP_CIPHER_CTX_free(ctx);
470 		return (false);
471 	}
472 	EVP_CIPHER_CTX_free(ctx);
473 	return (true);
474 }
475 
476 static bool
477 aead_decrypt(const EVP_CIPHER *cipher, const char *key, const char *nonce,
478     const void *aad, size_t aad_len, const char *input, char *output,
479     size_t size, const char *tag, size_t tag_len)
480 {
481 	EVP_CIPHER_CTX *ctx;
482 	int outl, total;
483 	bool valid;
484 
485 	ctx = EVP_CIPHER_CTX_new();
486 	if (ctx == NULL) {
487 		warnx("EVP_CIPHER_CTX_new failed: %s",
488 		    ERR_error_string(ERR_get_error(), NULL));
489 		return (false);
490 	}
491 	if (EVP_DecryptInit_ex(ctx, cipher, NULL, (const u_char *)key,
492 	    (const u_char *)nonce) != 1) {
493 		warnx("EVP_DecryptInit_ex failed: %s",
494 		    ERR_error_string(ERR_get_error(), NULL));
495 		EVP_CIPHER_CTX_free(ctx);
496 		return (false);
497 	}
498 	EVP_CIPHER_CTX_set_padding(ctx, 0);
499 	if (aad != NULL) {
500 		if (EVP_DecryptUpdate(ctx, NULL, &outl, (const u_char *)aad,
501 		    aad_len) != 1) {
502 			warnx("EVP_DecryptUpdate for AAD failed: %s",
503 			    ERR_error_string(ERR_get_error(), NULL));
504 			EVP_CIPHER_CTX_free(ctx);
505 			return (false);
506 		}
507 	}
508 	if (EVP_DecryptUpdate(ctx, (u_char *)output, &outl,
509 	    (const u_char *)input, size) != 1) {
510 		warnx("EVP_DecryptUpdate failed: %s",
511 		    ERR_error_string(ERR_get_error(), NULL));
512 		EVP_CIPHER_CTX_free(ctx);
513 		return (false);
514 	}
515 	total = outl;
516 	if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, tag_len,
517 	    __DECONST(char *, tag)) != 1) {
518 		warnx("EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_TAG) failed: %s",
519 		    ERR_error_string(ERR_get_error(), NULL));
520 		EVP_CIPHER_CTX_free(ctx);
521 		return (false);
522 	}
523 	valid = (EVP_DecryptFinal_ex(ctx, (u_char *)output + outl, &outl) == 1);
524 	total += outl;
525 	if ((size_t)total != size) {
526 		warnx("decrypt size mismatch: %zu vs %d", size, total);
527 		EVP_CIPHER_CTX_free(ctx);
528 		return (false);
529 	}
530 	if (!valid)
531 		warnx("tag mismatch");
532 	EVP_CIPHER_CTX_free(ctx);
533 	return (valid);
534 }
535 
536 static void
537 build_tls_enable(const atf_tc_t *tc, int cipher_alg, size_t cipher_key_len,
538     int auth_alg, int minor, uint64_t seqno, struct tls_enable *en)
539 {
540 	u_int auth_key_len, iv_len;
541 
542 	memset(en, 0, sizeof(*en));
543 
544 	switch (cipher_alg) {
545 	case CRYPTO_AES_CBC:
546 		if (minor == TLS_MINOR_VER_ZERO)
547 			iv_len = AES_BLOCK_LEN;
548 		else
549 			iv_len = 0;
550 		break;
551 	case CRYPTO_AES_NIST_GCM_16:
552 		if (minor == TLS_MINOR_VER_TWO)
553 			iv_len = TLS_AEAD_GCM_LEN;
554 		else
555 			iv_len = TLS_1_3_GCM_IV_LEN;
556 		break;
557 	case CRYPTO_CHACHA20_POLY1305:
558 		iv_len = TLS_CHACHA20_IV_LEN;
559 		break;
560 	default:
561 		iv_len = 0;
562 		break;
563 	}
564 	switch (auth_alg) {
565 	case CRYPTO_SHA1_HMAC:
566 		auth_key_len = SHA1_HASH_LEN;
567 		break;
568 	case CRYPTO_SHA2_256_HMAC:
569 		auth_key_len = SHA2_256_HASH_LEN;
570 		break;
571 	case CRYPTO_SHA2_384_HMAC:
572 		auth_key_len = SHA2_384_HASH_LEN;
573 		break;
574 	default:
575 		auth_key_len = 0;
576 		break;
577 	}
578 	en->cipher_key = alloc_buffer(cipher_key_len);
579 	debug_hexdump(tc, en->cipher_key, cipher_key_len, "cipher key");
580 	en->iv = alloc_buffer(iv_len);
581 	if (iv_len != 0)
582 		debug_hexdump(tc, en->iv, iv_len, "iv");
583 	en->auth_key = alloc_buffer(auth_key_len);
584 	if (auth_key_len != 0)
585 		debug_hexdump(tc, en->auth_key, auth_key_len, "auth key");
586 	en->cipher_algorithm = cipher_alg;
587 	en->cipher_key_len = cipher_key_len;
588 	en->iv_len = iv_len;
589 	en->auth_algorithm = auth_alg;
590 	en->auth_key_len = auth_key_len;
591 	en->tls_vmajor = TLS_MAJOR_VER_ONE;
592 	en->tls_vminor = minor;
593 	be64enc(en->rec_seq, seqno);
594 	debug(tc, "seqno: %ju\n", (uintmax_t)seqno);
595 }
596 
597 static void
598 free_tls_enable(struct tls_enable *en)
599 {
600 	free(__DECONST(void *, en->cipher_key));
601 	free(__DECONST(void *, en->iv));
602 	free(__DECONST(void *, en->auth_key));
603 }
604 
605 static const EVP_CIPHER *
606 tls_EVP_CIPHER(const struct tls_enable *en)
607 {
608 	switch (en->cipher_algorithm) {
609 	case CRYPTO_AES_CBC:
610 		switch (en->cipher_key_len) {
611 		case 128 / 8:
612 			return (EVP_aes_128_cbc());
613 		case 256 / 8:
614 			return (EVP_aes_256_cbc());
615 		default:
616 			return (NULL);
617 		}
618 		break;
619 	case CRYPTO_AES_NIST_GCM_16:
620 		switch (en->cipher_key_len) {
621 		case 128 / 8:
622 			return (EVP_aes_128_gcm());
623 		case 256 / 8:
624 			return (EVP_aes_256_gcm());
625 		default:
626 			return (NULL);
627 		}
628 		break;
629 	case CRYPTO_CHACHA20_POLY1305:
630 		return (EVP_chacha20_poly1305());
631 	default:
632 		return (NULL);
633 	}
634 }
635 
636 static const EVP_MD *
637 tls_EVP_MD(const struct tls_enable *en)
638 {
639 	switch (en->auth_algorithm) {
640 	case CRYPTO_SHA1_HMAC:
641 		return (EVP_sha1());
642 	case CRYPTO_SHA2_256_HMAC:
643 		return (EVP_sha256());
644 	case CRYPTO_SHA2_384_HMAC:
645 		return (EVP_sha384());
646 	default:
647 		return (NULL);
648 	}
649 }
650 
651 static size_t
652 tls_header_len(struct tls_enable *en)
653 {
654 	size_t len;
655 
656 	len = sizeof(struct tls_record_layer);
657 	switch (en->cipher_algorithm) {
658 	case CRYPTO_AES_CBC:
659 		if (en->tls_vminor != TLS_MINOR_VER_ZERO)
660 			len += AES_BLOCK_LEN;
661 		return (len);
662 	case CRYPTO_AES_NIST_GCM_16:
663 		if (en->tls_vminor == TLS_MINOR_VER_TWO)
664 			len += sizeof(uint64_t);
665 		return (len);
666 	case CRYPTO_CHACHA20_POLY1305:
667 		return (len);
668 	default:
669 		return (0);
670 	}
671 }
672 
673 static size_t
674 tls_mac_len(struct tls_enable *en)
675 {
676 	switch (en->cipher_algorithm) {
677 	case CRYPTO_AES_CBC:
678 		switch (en->auth_algorithm) {
679 		case CRYPTO_SHA1_HMAC:
680 			return (SHA1_HASH_LEN);
681 		case CRYPTO_SHA2_256_HMAC:
682 			return (SHA2_256_HASH_LEN);
683 		case CRYPTO_SHA2_384_HMAC:
684 			return (SHA2_384_HASH_LEN);
685 		default:
686 			return (0);
687 		}
688 	case CRYPTO_AES_NIST_GCM_16:
689 		return (AES_GMAC_HASH_LEN);
690 	case CRYPTO_CHACHA20_POLY1305:
691 		return (POLY1305_HASH_LEN);
692 	default:
693 		return (0);
694 	}
695 }
696 
697 /* Includes maximum padding for MTE. */
698 static size_t
699 tls_trailer_len(struct tls_enable *en)
700 {
701 	size_t len;
702 
703 	len = tls_mac_len(en);
704 	if (en->cipher_algorithm == CRYPTO_AES_CBC)
705 		len += AES_BLOCK_LEN;
706 	if (en->tls_vminor == TLS_MINOR_VER_THREE)
707 		len++;
708 	return (len);
709 }
710 
711 /* Minimum valid record payload size for a given cipher suite. */
712 static size_t
713 tls_minimum_record_payload(struct tls_enable *en)
714 {
715 	size_t len;
716 
717 	len = tls_header_len(en);
718 	if (en->cipher_algorithm == CRYPTO_AES_CBC)
719 		len += roundup2(tls_mac_len(en) + 1, AES_BLOCK_LEN);
720 	else
721 		len += tls_mac_len(en);
722 	if (en->tls_vminor == TLS_MINOR_VER_THREE)
723 		len++;
724 	return (len - sizeof(struct tls_record_layer));
725 }
726 
727 /* 'len' is the length of the payload application data. */
728 static void
729 tls_mte_aad(struct tls_enable *en, size_t len,
730     const struct tls_record_layer *hdr, uint64_t seqno, struct tls_mac_data *ad)
731 {
732 	ad->seq = htobe64(seqno);
733 	ad->type = hdr->tls_type;
734 	ad->tls_vmajor = hdr->tls_vmajor;
735 	ad->tls_vminor = hdr->tls_vminor;
736 	ad->tls_length = htons(len);
737 }
738 
739 static void
740 tls_12_aead_aad(struct tls_enable *en, size_t len,
741     const struct tls_record_layer *hdr, uint64_t seqno,
742     struct tls_aead_data *ad)
743 {
744 	ad->seq = htobe64(seqno);
745 	ad->type = hdr->tls_type;
746 	ad->tls_vmajor = hdr->tls_vmajor;
747 	ad->tls_vminor = hdr->tls_vminor;
748 	ad->tls_length = htons(len);
749 }
750 
751 static void
752 tls_13_aad(struct tls_enable *en, const struct tls_record_layer *hdr,
753     uint64_t seqno, struct tls_aead_data_13 *ad)
754 {
755 	ad->type = hdr->tls_type;
756 	ad->tls_vmajor = hdr->tls_vmajor;
757 	ad->tls_vminor = hdr->tls_vminor;
758 	ad->tls_length = hdr->tls_length;
759 }
760 
761 static void
762 tls_12_gcm_nonce(struct tls_enable *en, const struct tls_record_layer *hdr,
763     char *nonce)
764 {
765 	memcpy(nonce, en->iv, TLS_AEAD_GCM_LEN);
766 	memcpy(nonce + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t));
767 }
768 
769 static void
770 tls_13_nonce(struct tls_enable *en, uint64_t seqno, char *nonce)
771 {
772 	static_assert(TLS_1_3_GCM_IV_LEN == TLS_CHACHA20_IV_LEN,
773 	    "TLS 1.3 nonce length mismatch");
774 	memcpy(nonce, en->iv, TLS_1_3_GCM_IV_LEN);
775 	*(uint64_t *)(nonce + 4) ^= htobe64(seqno);
776 }
777 
778 /*
779  * Decrypt a TLS record 'len' bytes long at 'src' and store the result at
780  * 'dst'.  If the TLS record header length doesn't match or 'dst' doesn't
781  * have sufficient room ('avail'), fail the test.
782  */
783 static size_t
784 decrypt_tls_aes_cbc_mte(const atf_tc_t *tc, struct tls_enable *en,
785     uint64_t seqno, const void *src, size_t len, void *dst, size_t avail,
786     uint8_t *record_type)
787 {
788 	const struct tls_record_layer *hdr;
789 	struct tls_mac_data aad;
790 	const char *iv;
791 	char *buf;
792 	size_t hdr_len, mac_len, payload_len;
793 	int padding;
794 
795 	hdr = src;
796 	hdr_len = tls_header_len(en);
797 	mac_len = tls_mac_len(en);
798 	ATF_REQUIRE(hdr->tls_vmajor == TLS_MAJOR_VER_ONE);
799 	ATF_REQUIRE(hdr->tls_vminor == en->tls_vminor);
800 	debug(tc, "decrypting MTE record seqno %ju:\n", (uintmax_t)seqno);
801 	debug_hexdump(tc, src, len, NULL);
802 
803 	/* First, decrypt the outer payload into a temporary buffer. */
804 	payload_len = len - hdr_len;
805 	buf = malloc(payload_len);
806 	if (en->tls_vminor == TLS_MINOR_VER_ZERO)
807 		iv = en->iv;
808 	else
809 		iv = (void *)(hdr + 1);
810 	debug_hexdump(tc, iv, AES_BLOCK_LEN, "iv");
811 	ATF_REQUIRE(cbc_decrypt(tls_EVP_CIPHER(en), en->cipher_key, iv,
812 	    (const u_char *)src + hdr_len, buf, payload_len));
813 	debug_hexdump(tc, buf, payload_len, "decrypted buffer");
814 
815 	/*
816 	 * Copy the last encrypted block to use as the IV for the next
817 	 * record for TLS 1.0.
818 	 */
819 	if (en->tls_vminor == TLS_MINOR_VER_ZERO)
820 		memcpy(__DECONST(uint8_t *, en->iv), (const u_char *)src +
821 		    (len - AES_BLOCK_LEN), AES_BLOCK_LEN);
822 
823 	/*
824 	 * Verify trailing padding and strip.
825 	 *
826 	 * The kernel always generates the smallest amount of padding.
827 	 */
828 	padding = buf[payload_len - 1] + 1;
829 	ATF_REQUIRE(padding > 0 && padding <= AES_BLOCK_LEN);
830 	ATF_REQUIRE(payload_len >= mac_len + padding);
831 	payload_len -= padding;
832 
833 	/* Verify HMAC. */
834 	payload_len -= mac_len;
835 	tls_mte_aad(en, payload_len, hdr, seqno, &aad);
836 	debug_hexdump(tc, &aad, sizeof(aad), "aad");
837 	ATF_REQUIRE(verify_hash(tls_EVP_MD(en), en->auth_key, en->auth_key_len,
838 	    &aad, sizeof(aad), buf, payload_len, buf + payload_len));
839 
840 	ATF_REQUIRE(payload_len <= avail);
841 	memcpy(dst, buf, payload_len);
842 	*record_type = hdr->tls_type;
843 	return (payload_len);
844 }
845 
846 static size_t
847 decrypt_tls_12_aead(const atf_tc_t *tc, struct tls_enable *en, uint64_t seqno,
848     const void *src, size_t len, void *dst, uint8_t *record_type)
849 {
850 	const struct tls_record_layer *hdr;
851 	struct tls_aead_data aad;
852 	char nonce[12];
853 	size_t hdr_len, mac_len, payload_len;
854 
855 	hdr = src;
856 
857 	hdr_len = tls_header_len(en);
858 	mac_len = tls_mac_len(en);
859 	payload_len = len - (hdr_len + mac_len);
860 	ATF_REQUIRE(hdr->tls_vmajor == TLS_MAJOR_VER_ONE);
861 	ATF_REQUIRE(hdr->tls_vminor == TLS_MINOR_VER_TWO);
862 	debug(tc, "decrypting TLS 1.2 record seqno %ju:\n", (uintmax_t)seqno);
863 	debug_hexdump(tc, src, len, NULL);
864 
865 	tls_12_aead_aad(en, payload_len, hdr, seqno, &aad);
866 	debug_hexdump(tc, &aad, sizeof(aad), "aad");
867 	if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
868 		tls_12_gcm_nonce(en, hdr, nonce);
869 	else
870 		tls_13_nonce(en, seqno, nonce);
871 	debug_hexdump(tc, nonce, sizeof(nonce), "nonce");
872 
873 	ATF_REQUIRE(aead_decrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce,
874 	    &aad, sizeof(aad), (const char *)src + hdr_len, dst, payload_len,
875 	    (const char *)src + hdr_len + payload_len, mac_len));
876 
877 	*record_type = hdr->tls_type;
878 	return (payload_len);
879 }
880 
881 static size_t
882 decrypt_tls_13_aead(const atf_tc_t *tc, struct tls_enable *en, uint64_t seqno,
883     const void *src, size_t len, void *dst, uint8_t *record_type)
884 {
885 	const struct tls_record_layer *hdr;
886 	struct tls_aead_data_13 aad;
887 	char nonce[12];
888 	char *buf;
889 	size_t hdr_len, mac_len, payload_len;
890 
891 	hdr = src;
892 
893 	hdr_len = tls_header_len(en);
894 	mac_len = tls_mac_len(en);
895 	payload_len = len - (hdr_len + mac_len);
896 	ATF_REQUIRE(payload_len >= 1);
897 	ATF_REQUIRE(hdr->tls_type == TLS_RLTYPE_APP);
898 	ATF_REQUIRE(hdr->tls_vmajor == TLS_MAJOR_VER_ONE);
899 	ATF_REQUIRE(hdr->tls_vminor == TLS_MINOR_VER_TWO);
900 	debug(tc, "decrypting TLS 1.3 record seqno %ju:\n", (uintmax_t)seqno);
901 	debug_hexdump(tc, src, len, NULL);
902 
903 	tls_13_aad(en, hdr, seqno, &aad);
904 	debug_hexdump(tc, &aad, sizeof(aad), "aad");
905 	tls_13_nonce(en, seqno, nonce);
906 	debug_hexdump(tc, nonce, sizeof(nonce), "nonce");
907 
908 	/*
909 	 * Have to use a temporary buffer for the output due to the
910 	 * record type as the last byte of the trailer.
911 	 */
912 	buf = malloc(payload_len);
913 
914 	ATF_REQUIRE(aead_decrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce,
915 	    &aad, sizeof(aad), (const char *)src + hdr_len, buf, payload_len,
916 	    (const char *)src + hdr_len + payload_len, mac_len));
917 	debug_hexdump(tc, buf, payload_len, "decrypted buffer");
918 
919 	/* Trim record type. */
920 	*record_type = buf[payload_len - 1];
921 	payload_len--;
922 
923 	memcpy(dst, buf, payload_len);
924 	free(buf);
925 
926 	return (payload_len);
927 }
928 
929 static size_t
930 decrypt_tls_aead(const atf_tc_t *tc, struct tls_enable *en, uint64_t seqno,
931     const void *src, size_t len, void *dst, size_t avail, uint8_t *record_type)
932 {
933 	const struct tls_record_layer *hdr;
934 	size_t payload_len;
935 
936 	hdr = src;
937 	ATF_REQUIRE(ntohs(hdr->tls_length) + sizeof(*hdr) == len);
938 
939 	payload_len = len - (tls_header_len(en) + tls_trailer_len(en));
940 	ATF_REQUIRE(payload_len <= avail);
941 
942 	if (en->tls_vminor == TLS_MINOR_VER_TWO) {
943 		ATF_REQUIRE(decrypt_tls_12_aead(tc, en, seqno, src, len, dst,
944 		    record_type) == payload_len);
945 	} else {
946 		ATF_REQUIRE(decrypt_tls_13_aead(tc, en, seqno, src, len, dst,
947 		    record_type) == payload_len);
948 	}
949 
950 	return (payload_len);
951 }
952 
953 static size_t
954 decrypt_tls_record(const atf_tc_t *tc, struct tls_enable *en, uint64_t seqno,
955     const void *src, size_t len, void *dst, size_t avail, uint8_t *record_type)
956 {
957 	if (en->cipher_algorithm == CRYPTO_AES_CBC)
958 		return (decrypt_tls_aes_cbc_mte(tc, en, seqno, src, len, dst,
959 		    avail, record_type));
960 	else
961 		return (decrypt_tls_aead(tc, en, seqno, src, len, dst, avail,
962 		    record_type));
963 }
964 
965 /*
966  * Encrypt a TLS record of type 'record_type' with payload 'len' bytes
967  * long at 'src' and store the result at 'dst'.  If 'dst' doesn't have
968  * sufficient room ('avail'), fail the test.  'padding' is the amount
969  * of additional padding to include beyond any amount mandated by the
970  * cipher suite.
971  */
972 static size_t
973 encrypt_tls_aes_cbc_mte(const atf_tc_t *tc, struct tls_enable *en,
974     uint8_t record_type, uint64_t seqno, const void *src, size_t len, void *dst,
975     size_t avail, size_t padding)
976 {
977 	struct tls_record_layer *hdr;
978 	struct tls_mac_data aad;
979 	char *buf, *iv;
980 	size_t hdr_len, mac_len, record_len;
981 	u_int digest_len, i;
982 
983 	ATF_REQUIRE(padding % 16 == 0);
984 
985 	hdr = dst;
986 	buf = dst;
987 
988 	debug(tc, "encrypting MTE record seqno %ju:\n", (uintmax_t)seqno);
989 	hdr_len = tls_header_len(en);
990 	mac_len = tls_mac_len(en);
991 	padding += (AES_BLOCK_LEN - (len + mac_len) % AES_BLOCK_LEN);
992 	ATF_REQUIRE(padding > 0 && padding <= 255);
993 
994 	record_len = hdr_len + len + mac_len + padding;
995 	ATF_REQUIRE(record_len <= avail);
996 
997 	hdr->tls_type = record_type;
998 	hdr->tls_vmajor = TLS_MAJOR_VER_ONE;
999 	hdr->tls_vminor = en->tls_vminor;
1000 	hdr->tls_length = htons(record_len - sizeof(*hdr));
1001 	iv = (char *)(hdr + 1);
1002 	for (i = 0; i < AES_BLOCK_LEN; i++)
1003 		iv[i] = rdigit();
1004 	debug_hexdump(tc, iv, AES_BLOCK_LEN, "explicit IV");
1005 
1006 	/* Copy plaintext to ciphertext region. */
1007 	memcpy(buf + hdr_len, src, len);
1008 
1009 	/* Compute HMAC. */
1010 	tls_mte_aad(en, len, hdr, seqno, &aad);
1011 	debug_hexdump(tc, &aad, sizeof(aad), "aad");
1012 	debug_hexdump(tc, src, len, "plaintext");
1013 	ATF_REQUIRE(compute_hash(tls_EVP_MD(en), en->auth_key, en->auth_key_len,
1014 	    &aad, sizeof(aad), src, len, buf + hdr_len + len, &digest_len));
1015 	ATF_REQUIRE(digest_len == mac_len);
1016 
1017 	/* Store padding. */
1018 	for (i = 0; i < padding; i++)
1019 		buf[hdr_len + len + mac_len + i] = padding - 1;
1020 	debug_hexdump(tc, buf + hdr_len + len, mac_len + padding, "MAC and padding");
1021 
1022 	/* Encrypt the record. */
1023 	ATF_REQUIRE(cbc_encrypt(tls_EVP_CIPHER(en), en->cipher_key, iv,
1024 	    buf + hdr_len, buf + hdr_len, len + mac_len + padding));
1025 	debug_hexdump(tc, dst, record_len, "encrypted record");
1026 
1027 	return (record_len);
1028 }
1029 
1030 static size_t
1031 encrypt_tls_12_aead(const atf_tc_t *tc, struct tls_enable *en,
1032     uint8_t record_type, uint64_t seqno, const void *src, size_t len, void *dst)
1033 {
1034 	struct tls_record_layer *hdr;
1035 	struct tls_aead_data aad;
1036 	char nonce[12];
1037 	size_t hdr_len, mac_len, record_len;
1038 
1039 	hdr = dst;
1040 
1041 	debug(tc, "encrypting TLS 1.2 record seqno %ju:\n", (uintmax_t)seqno);
1042 	hdr_len = tls_header_len(en);
1043 	mac_len = tls_mac_len(en);
1044 	record_len = hdr_len + len + mac_len;
1045 
1046 	hdr->tls_type = record_type;
1047 	hdr->tls_vmajor = TLS_MAJOR_VER_ONE;
1048 	hdr->tls_vminor = TLS_MINOR_VER_TWO;
1049 	hdr->tls_length = htons(record_len - sizeof(*hdr));
1050 	if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
1051 		memcpy(hdr + 1, &seqno, sizeof(seqno));
1052 
1053 	tls_12_aead_aad(en, len, hdr, seqno, &aad);
1054 	debug_hexdump(tc, &aad, sizeof(aad), "aad");
1055 	if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
1056 		tls_12_gcm_nonce(en, hdr, nonce);
1057 	else
1058 		tls_13_nonce(en, seqno, nonce);
1059 	debug_hexdump(tc, nonce, sizeof(nonce), "nonce");
1060 
1061 	debug_hexdump(tc, src, len, "plaintext");
1062 	ATF_REQUIRE(aead_encrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce,
1063 	    &aad, sizeof(aad), src, (char *)dst + hdr_len, len,
1064 	    (char *)dst + hdr_len + len, mac_len));
1065 	debug_hexdump(tc, dst, record_len, "encrypted record");
1066 
1067 	return (record_len);
1068 }
1069 
1070 static size_t
1071 encrypt_tls_13_aead(const atf_tc_t *tc, struct tls_enable *en,
1072     uint8_t record_type, uint64_t seqno, const void *src, size_t len, void *dst,
1073     size_t padding)
1074 {
1075 	struct tls_record_layer *hdr;
1076 	struct tls_aead_data_13 aad;
1077 	char nonce[12];
1078 	char *buf;
1079 	size_t hdr_len, mac_len, record_len;
1080 
1081 	hdr = dst;
1082 
1083 	debug(tc, "encrypting TLS 1.3 record seqno %ju:\n", (uintmax_t)seqno);
1084 	hdr_len = tls_header_len(en);
1085 	mac_len = tls_mac_len(en);
1086 	record_len = hdr_len + len + 1 + padding + mac_len;
1087 
1088 	hdr->tls_type = TLS_RLTYPE_APP;
1089 	hdr->tls_vmajor = TLS_MAJOR_VER_ONE;
1090 	hdr->tls_vminor = TLS_MINOR_VER_TWO;
1091 	hdr->tls_length = htons(record_len - sizeof(*hdr));
1092 
1093 	tls_13_aad(en, hdr, seqno, &aad);
1094 	debug_hexdump(tc, &aad, sizeof(aad), "aad");
1095 	tls_13_nonce(en, seqno, nonce);
1096 	debug_hexdump(tc, nonce, sizeof(nonce), "nonce");
1097 
1098 	/*
1099 	 * Have to use a temporary buffer for the input so that the record
1100 	 * type can be appended.
1101 	 */
1102 	buf = malloc(len + 1 + padding);
1103 	memcpy(buf, src, len);
1104 	buf[len] = record_type;
1105 	memset(buf + len + 1, 0, padding);
1106 	debug_hexdump(tc, buf, len + 1 + padding, "plaintext + type + padding");
1107 
1108 	ATF_REQUIRE(aead_encrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce,
1109 	    &aad, sizeof(aad), buf, (char *)dst + hdr_len, len + 1 + padding,
1110 	    (char *)dst + hdr_len + len + 1 + padding, mac_len));
1111 	debug_hexdump(tc, dst, record_len, "encrypted record");
1112 
1113 	free(buf);
1114 
1115 	return (record_len);
1116 }
1117 
1118 static size_t
1119 encrypt_tls_aead(const atf_tc_t *tc, struct tls_enable *en,
1120     uint8_t record_type, uint64_t seqno, const void *src, size_t len, void *dst,
1121     size_t avail, size_t padding)
1122 {
1123 	size_t record_len;
1124 
1125 	record_len = tls_header_len(en) + len + padding + tls_trailer_len(en);
1126 	ATF_REQUIRE(record_len <= avail);
1127 
1128 	if (en->tls_vminor == TLS_MINOR_VER_TWO) {
1129 		ATF_REQUIRE(padding == 0);
1130 		ATF_REQUIRE(encrypt_tls_12_aead(tc, en, record_type, seqno, src,
1131 		    len, dst) == record_len);
1132 	} else
1133 		ATF_REQUIRE(encrypt_tls_13_aead(tc, en, record_type, seqno, src,
1134 		    len, dst, padding) == record_len);
1135 
1136 	return (record_len);
1137 }
1138 
1139 static size_t
1140 encrypt_tls_record(const atf_tc_t *tc, struct tls_enable *en,
1141     uint8_t record_type, uint64_t seqno, const void *src, size_t len, void *dst,
1142     size_t avail, size_t padding)
1143 {
1144 	if (en->cipher_algorithm == CRYPTO_AES_CBC)
1145 		return (encrypt_tls_aes_cbc_mte(tc, en, record_type, seqno, src,
1146 		    len, dst, avail, padding));
1147 	else
1148 		return (encrypt_tls_aead(tc, en, record_type, seqno, src, len,
1149 		    dst, avail, padding));
1150 }
1151 
1152 static void
1153 test_ktls_transmit_app_data(const atf_tc_t *tc, struct tls_enable *en,
1154     uint64_t seqno, size_t len)
1155 {
1156 	struct kevent ev;
1157 	struct tls_record_layer *hdr;
1158 	char *plaintext, *decrypted, *outbuf;
1159 	size_t decrypted_len, outbuf_len, outbuf_cap, record_len, written;
1160 	ssize_t rv;
1161 	int kq, sockets[2];
1162 	uint8_t record_type;
1163 
1164 	plaintext = alloc_buffer(len);
1165 	debug_hexdump(tc, plaintext, len, "plaintext");
1166 	decrypted = malloc(len);
1167 	outbuf_cap = tls_header_len(en) + TLS_MAX_MSG_SIZE_V10_2 +
1168 	    tls_trailer_len(en);
1169 	outbuf = malloc(outbuf_cap);
1170 	hdr = (struct tls_record_layer *)outbuf;
1171 
1172 	ATF_REQUIRE((kq = kqueue()) != -1);
1173 
1174 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
1175 
1176 	ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en,
1177 	    sizeof(*en)) == 0);
1178 	check_tls_mode(tc, sockets[1], TCP_TXTLS_MODE);
1179 
1180 	EV_SET(&ev, sockets[0], EVFILT_READ, EV_ADD, 0, 0, NULL);
1181 	ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0);
1182 	EV_SET(&ev, sockets[1], EVFILT_WRITE, EV_ADD, 0, 0, NULL);
1183 	ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0);
1184 
1185 	decrypted_len = 0;
1186 	outbuf_len = 0;
1187 	written = 0;
1188 
1189 	while (decrypted_len != len) {
1190 		ATF_REQUIRE(kevent(kq, NULL, 0, &ev, 1, NULL) == 1);
1191 
1192 		switch (ev.filter) {
1193 		case EVFILT_WRITE:
1194 			/* Try to write any remaining data. */
1195 			rv = write(ev.ident, plaintext + written,
1196 			    len - written);
1197 			ATF_REQUIRE_MSG(rv > 0,
1198 			    "failed to write to socket");
1199 			written += rv;
1200 			if (written == len) {
1201 				ev.flags = EV_DISABLE;
1202 				ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0,
1203 				    NULL) == 0);
1204 			}
1205 			break;
1206 
1207 		case EVFILT_READ:
1208 			ATF_REQUIRE((ev.flags & EV_EOF) == 0);
1209 
1210 			/*
1211 			 * Try to read data for the next TLS record
1212 			 * into outbuf.  Start by reading the header
1213 			 * to determine how much additional data to
1214 			 * read.
1215 			 */
1216 			if (outbuf_len < sizeof(struct tls_record_layer)) {
1217 				rv = read(ev.ident, outbuf + outbuf_len,
1218 				    sizeof(struct tls_record_layer) -
1219 				    outbuf_len);
1220 				ATF_REQUIRE_MSG(rv > 0,
1221 				    "failed to read from socket");
1222 				outbuf_len += rv;
1223 
1224 				if (outbuf_len ==
1225 				    sizeof(struct tls_record_layer)) {
1226 					debug(tc, "TLS header for seqno %ju:\n",
1227 					    (uintmax_t)seqno);
1228 					debug_hexdump(tc, outbuf, outbuf_len,
1229 					    NULL);
1230 				}
1231 			}
1232 
1233 			if (outbuf_len < sizeof(struct tls_record_layer))
1234 				break;
1235 
1236 			record_len = sizeof(struct tls_record_layer) +
1237 			    ntohs(hdr->tls_length);
1238 			debug(tc, "record_len %zu outbuf_cap %zu\n",
1239 			    record_len, outbuf_cap);
1240 			ATF_REQUIRE(record_len <= outbuf_cap);
1241 			ATF_REQUIRE(record_len > outbuf_len);
1242 			rv = read(ev.ident, outbuf + outbuf_len,
1243 			    record_len - outbuf_len);
1244 			if (rv == -1 && errno == EAGAIN)
1245 				break;
1246 			ATF_REQUIRE_MSG(rv > 0, "failed to read from socket");
1247 
1248 			outbuf_len += rv;
1249 			if (outbuf_len == record_len) {
1250 				decrypted_len += decrypt_tls_record(tc, en,
1251 				    seqno, outbuf, outbuf_len,
1252 				    decrypted + decrypted_len,
1253 				    len - decrypted_len, &record_type);
1254 				ATF_REQUIRE(record_type == TLS_RLTYPE_APP);
1255 
1256 				seqno++;
1257 				outbuf_len = 0;
1258 			}
1259 			break;
1260 		}
1261 	}
1262 
1263 	ATF_REQUIRE_MSG(written == decrypted_len,
1264 	    "read %zu decrypted bytes, but wrote %zu", decrypted_len, written);
1265 
1266 	ATF_REQUIRE(memcmp(plaintext, decrypted, len) == 0);
1267 
1268 	free(outbuf);
1269 	free(decrypted);
1270 	free(plaintext);
1271 
1272 	close_sockets(sockets);
1273 	ATF_REQUIRE(close(kq) == 0);
1274 }
1275 
1276 static void
1277 ktls_send_control_message(int fd, uint8_t type, void *data, size_t len)
1278 {
1279 	struct msghdr msg;
1280 	struct cmsghdr *cmsg;
1281 	char cbuf[CMSG_SPACE(sizeof(type))];
1282 	struct iovec iov;
1283 
1284 	memset(&msg, 0, sizeof(msg));
1285 
1286 	msg.msg_control = cbuf;
1287 	msg.msg_controllen = sizeof(cbuf);
1288 	cmsg = CMSG_FIRSTHDR(&msg);
1289 	cmsg->cmsg_level = IPPROTO_TCP;
1290 	cmsg->cmsg_type = TLS_SET_RECORD_TYPE;
1291 	cmsg->cmsg_len = CMSG_LEN(sizeof(type));
1292 	*(uint8_t *)CMSG_DATA(cmsg) = type;
1293 
1294 	iov.iov_base = data;
1295 	iov.iov_len = len;
1296 	msg.msg_iov = &iov;
1297 	msg.msg_iovlen = 1;
1298 
1299 	ATF_REQUIRE(sendmsg(fd, &msg, 0) == (ssize_t)len);
1300 }
1301 
1302 static void
1303 test_ktls_transmit_control(const atf_tc_t *tc, struct tls_enable *en,
1304     uint64_t seqno, uint8_t type, size_t len)
1305 {
1306 	struct tls_record_layer *hdr;
1307 	char *plaintext, *decrypted, *outbuf;
1308 	size_t outbuf_cap, payload_len, record_len;
1309 	ssize_t rv;
1310 	int sockets[2];
1311 	uint8_t record_type;
1312 
1313 	ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2);
1314 
1315 	plaintext = alloc_buffer(len);
1316 	decrypted = malloc(len);
1317 	outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en);
1318 	outbuf = malloc(outbuf_cap);
1319 	hdr = (struct tls_record_layer *)outbuf;
1320 
1321 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
1322 
1323 	ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en,
1324 	    sizeof(*en)) == 0);
1325 	check_tls_mode(tc, sockets[1], TCP_TXTLS_MODE);
1326 
1327 	fd_set_blocking(sockets[0]);
1328 	fd_set_blocking(sockets[1]);
1329 
1330 	ktls_send_control_message(sockets[1], type, plaintext, len);
1331 
1332 	/*
1333 	 * First read the header to determine how much additional data
1334 	 * to read.
1335 	 */
1336 	rv = read(sockets[0], outbuf, sizeof(struct tls_record_layer));
1337 	ATF_REQUIRE(rv == sizeof(struct tls_record_layer));
1338 	payload_len = ntohs(hdr->tls_length);
1339 	record_len = payload_len + sizeof(struct tls_record_layer);
1340 	ATF_REQUIRE(record_len <= outbuf_cap);
1341 	rv = read(sockets[0], outbuf + sizeof(struct tls_record_layer),
1342 	    payload_len);
1343 	ATF_REQUIRE(rv == (ssize_t)payload_len);
1344 
1345 	rv = decrypt_tls_record(tc, en, seqno, outbuf, record_len, decrypted,
1346 	    len, &record_type);
1347 
1348 	ATF_REQUIRE_MSG((ssize_t)len == rv,
1349 	    "read %zd decrypted bytes, but wrote %zu", rv, len);
1350 	ATF_REQUIRE(record_type == type);
1351 
1352 	ATF_REQUIRE(memcmp(plaintext, decrypted, len) == 0);
1353 
1354 	free(outbuf);
1355 	free(decrypted);
1356 	free(plaintext);
1357 
1358 	close_sockets(sockets);
1359 }
1360 
1361 static void
1362 test_ktls_transmit_empty_fragment(const atf_tc_t *tc, struct tls_enable *en,
1363     uint64_t seqno)
1364 {
1365 	struct tls_record_layer *hdr;
1366 	char *outbuf;
1367 	size_t outbuf_cap, payload_len, record_len;
1368 	ssize_t rv;
1369 	int sockets[2];
1370 	uint8_t record_type;
1371 
1372 	outbuf_cap = tls_header_len(en) + tls_trailer_len(en);
1373 	outbuf = malloc(outbuf_cap);
1374 	hdr = (struct tls_record_layer *)outbuf;
1375 
1376 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
1377 
1378 	ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en,
1379 	    sizeof(*en)) == 0);
1380 	check_tls_mode(tc, sockets[1], TCP_TXTLS_MODE);
1381 
1382 	fd_set_blocking(sockets[0]);
1383 	fd_set_blocking(sockets[1]);
1384 
1385 	/*
1386 	 * A write of zero bytes should send an empty fragment only for
1387 	 * TLS 1.0, otherwise an error should be raised.
1388 	 */
1389 	rv = write(sockets[1], NULL, 0);
1390 	if (rv == 0) {
1391 		ATF_REQUIRE(en->cipher_algorithm == CRYPTO_AES_CBC);
1392 		ATF_REQUIRE(en->tls_vminor == TLS_MINOR_VER_ZERO);
1393 	} else {
1394 		ATF_REQUIRE(rv == -1);
1395 		ATF_REQUIRE(errno == EINVAL);
1396 		goto out;
1397 	}
1398 
1399 	/*
1400 	 * First read the header to determine how much additional data
1401 	 * to read.
1402 	 */
1403 	rv = read(sockets[0], outbuf, sizeof(struct tls_record_layer));
1404 	ATF_REQUIRE(rv == sizeof(struct tls_record_layer));
1405 	payload_len = ntohs(hdr->tls_length);
1406 	record_len = payload_len + sizeof(struct tls_record_layer);
1407 	ATF_REQUIRE(record_len <= outbuf_cap);
1408 	rv = read(sockets[0], outbuf + sizeof(struct tls_record_layer),
1409 	    payload_len);
1410 	ATF_REQUIRE(rv == (ssize_t)payload_len);
1411 
1412 	rv = decrypt_tls_record(tc, en, seqno, outbuf, record_len, NULL, 0,
1413 	    &record_type);
1414 
1415 	ATF_REQUIRE_MSG(rv == 0,
1416 	    "read %zd decrypted bytes for an empty fragment", rv);
1417 	ATF_REQUIRE(record_type == TLS_RLTYPE_APP);
1418 
1419 out:
1420 	free(outbuf);
1421 
1422 	close_sockets(sockets);
1423 }
1424 
1425 static size_t
1426 ktls_receive_tls_record(struct tls_enable *en, int fd, uint8_t record_type,
1427     void *data, size_t len)
1428 {
1429 	struct msghdr msg;
1430 	struct cmsghdr *cmsg;
1431 	struct tls_get_record *tgr;
1432 	char cbuf[CMSG_SPACE(sizeof(*tgr))];
1433 	struct iovec iov;
1434 	ssize_t rv;
1435 
1436 	memset(&msg, 0, sizeof(msg));
1437 
1438 	msg.msg_control = cbuf;
1439 	msg.msg_controllen = sizeof(cbuf);
1440 
1441 	iov.iov_base = data;
1442 	iov.iov_len = len;
1443 	msg.msg_iov = &iov;
1444 	msg.msg_iovlen = 1;
1445 
1446 	ATF_REQUIRE((rv = recvmsg(fd, &msg, 0)) > 0);
1447 
1448 	ATF_REQUIRE((msg.msg_flags & (MSG_EOR | MSG_CTRUNC)) == MSG_EOR);
1449 
1450 	cmsg = CMSG_FIRSTHDR(&msg);
1451 	ATF_REQUIRE(cmsg != NULL);
1452 	ATF_REQUIRE(cmsg->cmsg_level == IPPROTO_TCP);
1453 	ATF_REQUIRE(cmsg->cmsg_type == TLS_GET_RECORD);
1454 	ATF_REQUIRE(cmsg->cmsg_len == CMSG_LEN(sizeof(*tgr)));
1455 
1456 	tgr = (struct tls_get_record *)CMSG_DATA(cmsg);
1457 	ATF_REQUIRE(tgr->tls_type == record_type);
1458 	ATF_REQUIRE(tgr->tls_vmajor == en->tls_vmajor);
1459 	/* XXX: Not sure if this is what OpenSSL expects? */
1460 	if (en->tls_vminor == TLS_MINOR_VER_THREE)
1461 		ATF_REQUIRE(tgr->tls_vminor == TLS_MINOR_VER_TWO);
1462 	else
1463 		ATF_REQUIRE(tgr->tls_vminor == en->tls_vminor);
1464 	ATF_REQUIRE(tgr->tls_length == htons(rv));
1465 
1466 	return (rv);
1467 }
1468 
1469 static void
1470 test_ktls_receive_app_data(const atf_tc_t *tc, struct tls_enable *en,
1471     uint64_t seqno, size_t len, size_t padding)
1472 {
1473 	struct kevent ev;
1474 	char *plaintext, *received, *outbuf;
1475 	size_t outbuf_cap, outbuf_len, outbuf_sent, received_len, todo, written;
1476 	ssize_t rv;
1477 	int kq, sockets[2];
1478 
1479 	plaintext = alloc_buffer(len);
1480 	received = malloc(len);
1481 	outbuf_cap = tls_header_len(en) + TLS_MAX_MSG_SIZE_V10_2 +
1482 	    tls_trailer_len(en);
1483 	outbuf = malloc(outbuf_cap);
1484 
1485 	ATF_REQUIRE((kq = kqueue()) != -1);
1486 
1487 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
1488 
1489 	ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en,
1490 	    sizeof(*en)) == 0);
1491 	check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE);
1492 
1493 	EV_SET(&ev, sockets[0], EVFILT_READ, EV_ADD, 0, 0, NULL);
1494 	ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0);
1495 	EV_SET(&ev, sockets[1], EVFILT_WRITE, EV_ADD, 0, 0, NULL);
1496 	ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0);
1497 
1498 	received_len = 0;
1499 	outbuf_len = 0;
1500 	written = 0;
1501 
1502 	while (received_len != len) {
1503 		ATF_REQUIRE(kevent(kq, NULL, 0, &ev, 1, NULL) == 1);
1504 
1505 		switch (ev.filter) {
1506 		case EVFILT_WRITE:
1507 			/*
1508 			 * Compose the next TLS record to send.
1509 			 */
1510 			if (outbuf_len == 0) {
1511 				ATF_REQUIRE(written < len);
1512 				todo = len - written;
1513 				if (todo > TLS_MAX_MSG_SIZE_V10_2 - padding)
1514 					todo = TLS_MAX_MSG_SIZE_V10_2 - padding;
1515 				outbuf_len = encrypt_tls_record(tc, en,
1516 				    TLS_RLTYPE_APP, seqno, plaintext + written,
1517 				    todo, outbuf, outbuf_cap, padding);
1518 				outbuf_sent = 0;
1519 				written += todo;
1520 				seqno++;
1521 			}
1522 
1523 			/*
1524 			 * Try to write the remainder of the current
1525 			 * TLS record.
1526 			 */
1527 			rv = write(ev.ident, outbuf + outbuf_sent,
1528 			    outbuf_len - outbuf_sent);
1529 			ATF_REQUIRE_MSG(rv > 0,
1530 			    "failed to write to socket");
1531 			outbuf_sent += rv;
1532 			if (outbuf_sent == outbuf_len) {
1533 				outbuf_len = 0;
1534 				if (written == len) {
1535 					ev.flags = EV_DISABLE;
1536 					ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0,
1537 					    NULL) == 0);
1538 				}
1539 			}
1540 			break;
1541 
1542 		case EVFILT_READ:
1543 			ATF_REQUIRE((ev.flags & EV_EOF) == 0);
1544 
1545 			rv = ktls_receive_tls_record(en, ev.ident,
1546 			    TLS_RLTYPE_APP, received + received_len,
1547 			    len - received_len);
1548 			received_len += rv;
1549 			break;
1550 		}
1551 	}
1552 
1553 	ATF_REQUIRE_MSG(written == received_len,
1554 	    "read %zu decrypted bytes, but wrote %zu", received_len, written);
1555 
1556 	ATF_REQUIRE(memcmp(plaintext, received, len) == 0);
1557 
1558 	free(outbuf);
1559 	free(received);
1560 	free(plaintext);
1561 
1562 	close_sockets(sockets);
1563 	ATF_REQUIRE(close(kq) == 0);
1564 }
1565 
1566 static void
1567 ktls_receive_tls_error(int fd, int expected_error)
1568 {
1569 	struct msghdr msg;
1570 	struct tls_get_record *tgr;
1571 	char cbuf[CMSG_SPACE(sizeof(*tgr))];
1572 	char buf[64];
1573 	struct iovec iov;
1574 
1575 	memset(&msg, 0, sizeof(msg));
1576 
1577 	msg.msg_control = cbuf;
1578 	msg.msg_controllen = sizeof(cbuf);
1579 
1580 	iov.iov_base = buf;
1581 	iov.iov_len = sizeof(buf);
1582 	msg.msg_iov = &iov;
1583 	msg.msg_iovlen = 1;
1584 
1585 	ATF_REQUIRE(recvmsg(fd, &msg, 0) == -1);
1586 	if (expected_error != 0)
1587 		ATF_REQUIRE(errno == expected_error);
1588 }
1589 
1590 static void
1591 test_ktls_receive_corrupted_record(const atf_tc_t *tc, struct tls_enable *en,
1592     uint64_t seqno, size_t len, ssize_t offset)
1593 {
1594 	char *plaintext, *outbuf;
1595 	size_t outbuf_cap, outbuf_len;
1596 	ssize_t rv;
1597 	int sockets[2];
1598 
1599 	ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2);
1600 
1601 	plaintext = alloc_buffer(len);
1602 	outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en);
1603 	outbuf = malloc(outbuf_cap);
1604 
1605 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
1606 
1607 	ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en,
1608 	    sizeof(*en)) == 0);
1609 	check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE);
1610 
1611 	fd_set_blocking(sockets[0]);
1612 	fd_set_blocking(sockets[1]);
1613 
1614 	outbuf_len = encrypt_tls_record(tc, en, TLS_RLTYPE_APP, seqno,
1615 	    plaintext, len, outbuf, outbuf_cap, 0);
1616 
1617 	/* A negative offset is an offset from the end. */
1618 	if (offset < 0)
1619 		offset += outbuf_len;
1620 	outbuf[offset] ^= 0x01;
1621 
1622 	rv = write(sockets[1], outbuf, outbuf_len);
1623 	ATF_REQUIRE(rv == (ssize_t)outbuf_len);
1624 
1625 	ktls_receive_tls_error(sockets[0], EBADMSG);
1626 
1627 	free(outbuf);
1628 	free(plaintext);
1629 
1630 	close_sockets(sockets);
1631 }
1632 
1633 static void
1634 test_ktls_receive_corrupted_iv(const atf_tc_t *tc, struct tls_enable *en,
1635     uint64_t seqno, size_t len)
1636 {
1637 	ATF_REQUIRE(tls_header_len(en) > sizeof(struct tls_record_layer));
1638 
1639 	/* Corrupt the first byte of the explicit IV after the header. */
1640 	test_ktls_receive_corrupted_record(tc, en, seqno, len,
1641 	    sizeof(struct tls_record_layer));
1642 }
1643 
1644 static void
1645 test_ktls_receive_corrupted_data(const atf_tc_t *tc, struct tls_enable *en,
1646     uint64_t seqno, size_t len)
1647 {
1648 	ATF_REQUIRE(len > 0);
1649 
1650 	/* Corrupt the first ciphertext byte after the header. */
1651 	test_ktls_receive_corrupted_record(tc, en, seqno, len,
1652 	    tls_header_len(en));
1653 }
1654 
1655 static void
1656 test_ktls_receive_corrupted_mac(const atf_tc_t *tc, struct tls_enable *en,
1657     uint64_t seqno, size_t len)
1658 {
1659 	size_t offset;
1660 
1661 	/* Corrupt the first byte of the MAC. */
1662 	if (en->cipher_algorithm == CRYPTO_AES_CBC)
1663 		offset = tls_header_len(en) + len;
1664 	else
1665 		offset = -tls_mac_len(en);
1666 	test_ktls_receive_corrupted_record(tc, en, seqno, len, offset);
1667 }
1668 
1669 static void
1670 test_ktls_receive_corrupted_padding(const atf_tc_t *tc, struct tls_enable *en,
1671     uint64_t seqno, size_t len)
1672 {
1673 	ATF_REQUIRE(en->cipher_algorithm == CRYPTO_AES_CBC);
1674 
1675 	/* Corrupt the last byte of the padding. */
1676 	test_ktls_receive_corrupted_record(tc, en, seqno, len, -1);
1677 }
1678 
1679 static void
1680 test_ktls_receive_truncated_record(const atf_tc_t *tc, struct tls_enable *en,
1681     uint64_t seqno, size_t len)
1682 {
1683 	char *plaintext, *outbuf;
1684 	size_t outbuf_cap, outbuf_len;
1685 	ssize_t rv;
1686 	int sockets[2];
1687 
1688 	ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2);
1689 
1690 	plaintext = alloc_buffer(len);
1691 	outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en);
1692 	outbuf = malloc(outbuf_cap);
1693 
1694 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
1695 
1696 	ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en,
1697 	    sizeof(*en)) == 0);
1698 	check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE);
1699 
1700 	fd_set_blocking(sockets[0]);
1701 	fd_set_blocking(sockets[1]);
1702 
1703 	outbuf_len = encrypt_tls_record(tc, en, TLS_RLTYPE_APP, seqno,
1704 	    plaintext, len, outbuf, outbuf_cap, 0);
1705 
1706 	rv = write(sockets[1], outbuf, outbuf_len / 2);
1707 	ATF_REQUIRE(rv == (ssize_t)(outbuf_len / 2));
1708 
1709 	ATF_REQUIRE(shutdown(sockets[1], SHUT_WR) == 0);
1710 
1711 	ktls_receive_tls_error(sockets[0], EMSGSIZE);
1712 
1713 	free(outbuf);
1714 	free(plaintext);
1715 
1716 	close_sockets(sockets);
1717 }
1718 
1719 static void
1720 test_ktls_receive_bad_major(const atf_tc_t *tc, struct tls_enable *en,
1721     uint64_t seqno, size_t len)
1722 {
1723 	struct tls_record_layer *hdr;
1724 	char *plaintext, *outbuf;
1725 	size_t outbuf_cap, outbuf_len;
1726 	ssize_t rv;
1727 	int sockets[2];
1728 
1729 	ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2);
1730 
1731 	plaintext = alloc_buffer(len);
1732 	outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en);
1733 	outbuf = malloc(outbuf_cap);
1734 
1735 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
1736 
1737 	ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en,
1738 	    sizeof(*en)) == 0);
1739 	check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE);
1740 
1741 	fd_set_blocking(sockets[0]);
1742 	fd_set_blocking(sockets[1]);
1743 
1744 	outbuf_len = encrypt_tls_record(tc, en, TLS_RLTYPE_APP, seqno,
1745 	    plaintext, len, outbuf, outbuf_cap, 0);
1746 
1747 	hdr = (void *)outbuf;
1748 	hdr->tls_vmajor++;
1749 
1750 	rv = write(sockets[1], outbuf, outbuf_len);
1751 	ATF_REQUIRE(rv == (ssize_t)outbuf_len);
1752 
1753 	ktls_receive_tls_error(sockets[0], EINVAL);
1754 
1755 	free(outbuf);
1756 	free(plaintext);
1757 
1758 	close_sockets(sockets);
1759 }
1760 
1761 static void
1762 test_ktls_receive_bad_minor(const atf_tc_t *tc, struct tls_enable *en,
1763     uint64_t seqno, size_t len)
1764 {
1765 	struct tls_record_layer *hdr;
1766 	char *plaintext, *outbuf;
1767 	size_t outbuf_cap, outbuf_len;
1768 	ssize_t rv;
1769 	int sockets[2];
1770 
1771 	ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2);
1772 
1773 	plaintext = alloc_buffer(len);
1774 	outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en);
1775 	outbuf = malloc(outbuf_cap);
1776 
1777 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
1778 
1779 	ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en,
1780 	    sizeof(*en)) == 0);
1781 	check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE);
1782 
1783 	fd_set_blocking(sockets[0]);
1784 	fd_set_blocking(sockets[1]);
1785 
1786 	outbuf_len = encrypt_tls_record(tc, en, TLS_RLTYPE_APP, seqno,
1787 	    plaintext, len, outbuf, outbuf_cap, 0);
1788 
1789 	hdr = (void *)outbuf;
1790 	hdr->tls_vminor++;
1791 
1792 	rv = write(sockets[1], outbuf, outbuf_len);
1793 	ATF_REQUIRE(rv == (ssize_t)outbuf_len);
1794 
1795 	ktls_receive_tls_error(sockets[0], EINVAL);
1796 
1797 	free(outbuf);
1798 	free(plaintext);
1799 
1800 	close_sockets(sockets);
1801 }
1802 
1803 static void
1804 test_ktls_receive_bad_type(const atf_tc_t *tc, struct tls_enable *en,
1805     uint64_t seqno, size_t len)
1806 {
1807 	struct tls_record_layer *hdr;
1808 	char *plaintext, *outbuf;
1809 	size_t outbuf_cap, outbuf_len;
1810 	ssize_t rv;
1811 	int sockets[2];
1812 
1813 	ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2);
1814 	ATF_REQUIRE(en->tls_vminor == TLS_MINOR_VER_THREE);
1815 
1816 	plaintext = alloc_buffer(len);
1817 	outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en);
1818 	outbuf = malloc(outbuf_cap);
1819 
1820 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
1821 
1822 	ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en,
1823 	    sizeof(*en)) == 0);
1824 	check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE);
1825 
1826 	fd_set_blocking(sockets[0]);
1827 	fd_set_blocking(sockets[1]);
1828 
1829 	outbuf_len = encrypt_tls_record(tc, en, 0x21 /* Alert */, seqno,
1830 	    plaintext, len, outbuf, outbuf_cap, 0);
1831 
1832 	hdr = (void *)outbuf;
1833 	hdr->tls_type = TLS_RLTYPE_APP + 1;
1834 
1835 	rv = write(sockets[1], outbuf, outbuf_len);
1836 	ATF_REQUIRE(rv == (ssize_t)outbuf_len);
1837 
1838 	ktls_receive_tls_error(sockets[0], EINVAL);
1839 
1840 	free(outbuf);
1841 	free(plaintext);
1842 
1843 	close_sockets(sockets);
1844 }
1845 
1846 static void
1847 test_ktls_receive_bad_size(const atf_tc_t *tc, struct tls_enable *en,
1848     uint64_t seqno, size_t len)
1849 {
1850 	struct tls_record_layer *hdr;
1851 	char *outbuf;
1852 	size_t outbuf_len;
1853 	ssize_t rv;
1854 	int sockets[2];
1855 
1856 	outbuf_len = sizeof(*hdr) + len;
1857 	outbuf = calloc(1, outbuf_len);
1858 
1859 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
1860 
1861 	ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en,
1862 	    sizeof(*en)) == 0);
1863 	check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE);
1864 
1865 	fd_set_blocking(sockets[0]);
1866 	fd_set_blocking(sockets[1]);
1867 
1868 	hdr = (void *)outbuf;
1869 	hdr->tls_vmajor = en->tls_vmajor;
1870 	if (en->tls_vminor == TLS_MINOR_VER_THREE)
1871 		hdr->tls_vminor = TLS_MINOR_VER_TWO;
1872 	else
1873 		hdr->tls_vminor = en->tls_vminor;
1874 	hdr->tls_type = TLS_RLTYPE_APP;
1875 	hdr->tls_length = htons(len);
1876 
1877 	rv = write(sockets[1], outbuf, outbuf_len);
1878 	ATF_REQUIRE(rv == (ssize_t)outbuf_len);
1879 
1880 	ATF_REQUIRE(shutdown(sockets[1], SHUT_WR) == 0);
1881 
1882 	ktls_receive_tls_error(sockets[0], EMSGSIZE);
1883 
1884 	free(outbuf);
1885 
1886 	close_sockets(sockets);
1887 }
1888 
1889 #define	TLS_10_TESTS(M)							\
1890 	M(aes128_cbc_1_0_sha1, CRYPTO_AES_CBC, 128 / 8,			\
1891 	    CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ZERO)			\
1892 	M(aes256_cbc_1_0_sha1, CRYPTO_AES_CBC, 256 / 8,			\
1893 	    CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ZERO)
1894 
1895 #define	TLS_13_TESTS(M)							\
1896 	M(aes128_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0,		\
1897 	    TLS_MINOR_VER_THREE)					\
1898 	M(aes256_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0,		\
1899 	    TLS_MINOR_VER_THREE)					\
1900 	M(chacha20_poly1305_1_3, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0,	\
1901 	    TLS_MINOR_VER_THREE)
1902 
1903 #define	AES_CBC_NONZERO_TESTS(M)					\
1904 	M(aes128_cbc_1_1_sha1, CRYPTO_AES_CBC, 128 / 8,			\
1905 	    CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ONE)			\
1906 	M(aes256_cbc_1_1_sha1, CRYPTO_AES_CBC, 256 / 8,			\
1907 	    CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ONE)			\
1908 	M(aes128_cbc_1_2_sha1, CRYPTO_AES_CBC, 128 / 8,			\
1909 	    CRYPTO_SHA1_HMAC, TLS_MINOR_VER_TWO)			\
1910 	M(aes256_cbc_1_2_sha1, CRYPTO_AES_CBC, 256 / 8,			\
1911 	    CRYPTO_SHA1_HMAC, TLS_MINOR_VER_TWO)			\
1912 	M(aes128_cbc_1_2_sha256, CRYPTO_AES_CBC, 128 / 8,		\
1913 	    CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_TWO)			\
1914 	M(aes256_cbc_1_2_sha256, CRYPTO_AES_CBC, 256 / 8,		\
1915 	    CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_TWO)			\
1916 	M(aes128_cbc_1_2_sha384, CRYPTO_AES_CBC, 128 / 8,		\
1917 	    CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_TWO)			\
1918 	M(aes256_cbc_1_2_sha384, CRYPTO_AES_CBC, 256 / 8,		\
1919 	    CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_TWO)			\
1920 
1921 #define	AES_CBC_TESTS(M)						\
1922 	TLS_10_TESTS(M)							\
1923 	AES_CBC_NONZERO_TESTS(M)
1924 
1925 #define AES_GCM_12_TESTS(M)						\
1926 	M(aes128_gcm_1_2, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0,		\
1927 	    TLS_MINOR_VER_TWO)						\
1928 	M(aes256_gcm_1_2, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0,		\
1929 	    TLS_MINOR_VER_TWO)
1930 
1931 #define AES_GCM_TESTS(M)						\
1932 	AES_GCM_12_TESTS(M)						\
1933 	M(aes128_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0,		\
1934 	    TLS_MINOR_VER_THREE)					\
1935 	M(aes256_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0,		\
1936 	    TLS_MINOR_VER_THREE)
1937 
1938 #define CHACHA20_TESTS(M)						\
1939 	M(chacha20_poly1305_1_2, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0,	\
1940 	    TLS_MINOR_VER_TWO)						\
1941 	M(chacha20_poly1305_1_3, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0,	\
1942 	    TLS_MINOR_VER_THREE)
1943 
1944 #define GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
1945 	    auth_alg, minor, name, len)					\
1946 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_##name);		\
1947 ATF_TC_BODY(ktls_transmit_##cipher_name##_##name, tc)			\
1948 {									\
1949 	struct tls_enable en;						\
1950 	uint64_t seqno;							\
1951 									\
1952 	ATF_REQUIRE_KTLS();						\
1953 	seqno = random();						\
1954 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
1955 	    seqno, &en);						\
1956 	test_ktls_transmit_app_data(tc, &en, seqno, len);		\
1957 	free_tls_enable(&en);						\
1958 }
1959 
1960 #define ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
1961 	    auth_alg, minor, name)					\
1962 	ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_##name);
1963 
1964 #define GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1965 	    auth_alg, minor, name, type, len)				\
1966 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_##name);		\
1967 ATF_TC_BODY(ktls_transmit_##cipher_name##_##name, tc)			\
1968 {									\
1969 	struct tls_enable en;						\
1970 	uint64_t seqno;							\
1971 									\
1972 	ATF_REQUIRE_KTLS();						\
1973 	seqno = random();						\
1974 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
1975 	    seqno, &en);						\
1976 	test_ktls_transmit_control(tc, &en, seqno, type, len);		\
1977 	free_tls_enable(&en);						\
1978 }
1979 
1980 #define ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1981 	    auth_alg, minor, name)					\
1982 	ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_##name);
1983 
1984 #define GEN_TRANSMIT_EMPTY_FRAGMENT_TEST(cipher_name, cipher_alg,	\
1985 	    key_size, auth_alg, minor)					\
1986 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_empty_fragment);	\
1987 ATF_TC_BODY(ktls_transmit_##cipher_name##_empty_fragment, tc)		\
1988 {									\
1989 	struct tls_enable en;						\
1990 	uint64_t seqno;							\
1991 									\
1992 	ATF_REQUIRE_KTLS();						\
1993 	seqno = random();						\
1994 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
1995 	    seqno, &en);						\
1996 	test_ktls_transmit_empty_fragment(tc, &en, seqno);		\
1997 	free_tls_enable(&en);						\
1998 }
1999 
2000 #define ADD_TRANSMIT_EMPTY_FRAGMENT_TEST(cipher_name, cipher_alg,	\
2001 	    key_size, auth_alg, minor)					\
2002 	ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_empty_fragment);
2003 
2004 #define GEN_TRANSMIT_TESTS(cipher_name, cipher_alg, key_size, auth_alg,	\
2005 	    minor)							\
2006 	GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2007 	    auth_alg, minor, short, 64)					\
2008 	GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2009 	    auth_alg, minor, long, 64 * 1024)				\
2010 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2011 	    auth_alg, minor, control, 0x21 /* Alert */, 32)
2012 
2013 #define ADD_TRANSMIT_TESTS(cipher_name, cipher_alg, key_size, auth_alg,	\
2014 	    minor)							\
2015 	ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2016 	    auth_alg, minor, short)					\
2017 	ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2018 	    auth_alg, minor, long)					\
2019 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2020 	    auth_alg, minor, control)
2021 
2022 /*
2023  * For each supported cipher suite, run three transmit tests:
2024  *
2025  * - a short test which sends 64 bytes of application data (likely as
2026  *   a single TLS record)
2027  *
2028  * - a long test which sends 64KB of application data (split across
2029  *   multiple TLS records)
2030  *
2031  * - a control test which sends a single record with a specific
2032  *   content type via sendmsg()
2033  */
2034 AES_CBC_TESTS(GEN_TRANSMIT_TESTS);
2035 AES_GCM_TESTS(GEN_TRANSMIT_TESTS);
2036 CHACHA20_TESTS(GEN_TRANSMIT_TESTS);
2037 
2038 #define GEN_TRANSMIT_PADDING_TESTS(cipher_name, cipher_alg, key_size,	\
2039 	    auth_alg, minor)						\
2040 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2041 	    auth_alg, minor, padding_1, 0x21 /* Alert */, 1)		\
2042 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2043 	    auth_alg, minor, padding_2, 0x21 /* Alert */, 2)		\
2044 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2045 	    auth_alg, minor, padding_3, 0x21 /* Alert */, 3)		\
2046 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2047 	    auth_alg, minor, padding_4, 0x21 /* Alert */, 4)		\
2048 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2049 	    auth_alg, minor, padding_5, 0x21 /* Alert */, 5)		\
2050 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2051 	    auth_alg, minor, padding_6, 0x21 /* Alert */, 6)		\
2052 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2053 	    auth_alg, minor, padding_7, 0x21 /* Alert */, 7)		\
2054 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2055 	    auth_alg, minor, padding_8, 0x21 /* Alert */, 8)		\
2056 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2057 	    auth_alg, minor, padding_9, 0x21 /* Alert */, 9)		\
2058 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2059 	    auth_alg, minor, padding_10, 0x21 /* Alert */, 10)		\
2060 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2061 	    auth_alg, minor, padding_11, 0x21 /* Alert */, 11)		\
2062 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2063 	    auth_alg, minor, padding_12, 0x21 /* Alert */, 12)		\
2064 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2065 	    auth_alg, minor, padding_13, 0x21 /* Alert */, 13)		\
2066 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2067 	    auth_alg, minor, padding_14, 0x21 /* Alert */, 14)		\
2068 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2069 	    auth_alg, minor, padding_15, 0x21 /* Alert */, 15)		\
2070 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2071 	    auth_alg, minor, padding_16, 0x21 /* Alert */, 16)
2072 
2073 #define ADD_TRANSMIT_PADDING_TESTS(cipher_name, cipher_alg, key_size,	\
2074 	    auth_alg, minor)						\
2075 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2076 	    auth_alg, minor, padding_1)					\
2077 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2078 	    auth_alg, minor, padding_2)					\
2079 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2080 	    auth_alg, minor, padding_3)					\
2081 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2082 	    auth_alg, minor, padding_4)					\
2083 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2084 	    auth_alg, minor, padding_5)					\
2085 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2086 	    auth_alg, minor, padding_6)					\
2087 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2088 	    auth_alg, minor, padding_7)					\
2089 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2090 	    auth_alg, minor, padding_8)					\
2091 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2092 	    auth_alg, minor, padding_9)					\
2093 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2094 	    auth_alg, minor, padding_10)				\
2095 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2096 	    auth_alg, minor, padding_11)				\
2097 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2098 	    auth_alg, minor, padding_12)				\
2099 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2100 	    auth_alg, minor, padding_13)				\
2101 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2102 	    auth_alg, minor, padding_14)				\
2103 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2104 	    auth_alg, minor, padding_15)				\
2105 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
2106 	    auth_alg, minor, padding_16)
2107 
2108 /*
2109  * For AES-CBC MTE cipher suites using padding, add tests of messages
2110  * with each possible padding size.  Note that the padding_<N> tests
2111  * do not necessarily test <N> bytes of padding as the padding is a
2112  * function of the cipher suite's MAC length.  However, cycling
2113  * through all of the payload sizes from 1 to 16 should exercise all
2114  * of the possible padding lengths for each suite.
2115  */
2116 AES_CBC_TESTS(GEN_TRANSMIT_PADDING_TESTS);
2117 
2118 /*
2119  * Test "empty fragments" which are TLS records with no payload that
2120  * OpenSSL can send for TLS 1.0 connections.
2121  */
2122 AES_CBC_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST);
2123 AES_GCM_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST);
2124 CHACHA20_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST);
2125 
2126 static void
2127 test_ktls_invalid_transmit_cipher_suite(const atf_tc_t *tc,
2128     struct tls_enable *en)
2129 {
2130 	int sockets[2];
2131 
2132 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
2133 
2134 	ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en,
2135 	    sizeof(*en)) == -1);
2136 	ATF_REQUIRE(errno == EINVAL);
2137 
2138 	close_sockets(sockets);
2139 }
2140 
2141 #define GEN_INVALID_TRANSMIT_TEST(name, cipher_alg, key_size, auth_alg,	\
2142 	    minor)							\
2143 ATF_TC_WITHOUT_HEAD(ktls_transmit_invalid_##name);			\
2144 ATF_TC_BODY(ktls_transmit_invalid_##name, tc)				\
2145 {									\
2146 	struct tls_enable en;						\
2147 	uint64_t seqno;							\
2148 									\
2149 	ATF_REQUIRE_KTLS();						\
2150 	seqno = random();						\
2151 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2152 	    seqno, &en);						\
2153 	test_ktls_invalid_transmit_cipher_suite(tc, &en);		\
2154 	free_tls_enable(&en);						\
2155 }
2156 
2157 #define ADD_INVALID_TRANSMIT_TEST(name, cipher_alg, key_size, auth_alg, \
2158 	    minor)							\
2159 	ATF_TP_ADD_TC(tp, ktls_transmit_invalid_##name);
2160 
2161 #define	INVALID_CIPHER_SUITES(M)					\
2162 	M(aes128_cbc_1_0_sha256, CRYPTO_AES_CBC, 128 / 8,		\
2163 	    CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_ZERO)			\
2164 	M(aes128_cbc_1_0_sha384, CRYPTO_AES_CBC, 128 / 8,		\
2165 	    CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_ZERO)			\
2166 	M(aes128_gcm_1_0, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0,		\
2167 	    TLS_MINOR_VER_ZERO)						\
2168 	M(chacha20_poly1305_1_0, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0,	\
2169 	    TLS_MINOR_VER_ZERO)						\
2170 	M(aes128_cbc_1_1_sha256, CRYPTO_AES_CBC, 128 / 8,		\
2171 	    CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_ONE)			\
2172 	M(aes128_cbc_1_1_sha384, CRYPTO_AES_CBC, 128 / 8,		\
2173 	    CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_ONE)			\
2174 	M(aes128_gcm_1_1, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0,		\
2175 	    TLS_MINOR_VER_ONE)						\
2176 	M(chacha20_poly1305_1_1, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0,	\
2177 	    TLS_MINOR_VER_ONE)						\
2178 	M(aes128_cbc_1_3_sha1, CRYPTO_AES_CBC, 128 / 8,			\
2179 	    CRYPTO_SHA1_HMAC, TLS_MINOR_VER_THREE)			\
2180 	M(aes128_cbc_1_3_sha256, CRYPTO_AES_CBC, 128 / 8,		\
2181 	    CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_THREE)			\
2182 	M(aes128_cbc_1_3_sha384, CRYPTO_AES_CBC, 128 / 8,		\
2183 	    CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_THREE)
2184 
2185 /*
2186  * Ensure that invalid cipher suites are rejected for transmit.
2187  */
2188 INVALID_CIPHER_SUITES(GEN_INVALID_TRANSMIT_TEST);
2189 
2190 #define GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2191 	    auth_alg, minor, name, len, padding)			\
2192 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_##name);		\
2193 ATF_TC_BODY(ktls_receive_##cipher_name##_##name, tc)			\
2194 {									\
2195 	struct tls_enable en;						\
2196 	uint64_t seqno;							\
2197 									\
2198 	ATF_REQUIRE_KTLS();						\
2199 	seqno = random();						\
2200 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2201 	    seqno, &en);						\
2202 	test_ktls_receive_app_data(tc, &en, seqno, len, padding);	\
2203 	free_tls_enable(&en);						\
2204 }
2205 
2206 #define ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2207 	    auth_alg, minor, name)					\
2208 	ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_##name);
2209 
2210 #define GEN_RECEIVE_BAD_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2211 	    auth_alg, minor, len)					\
2212 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_data);		\
2213 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_data, tc)			\
2214 {									\
2215 	struct tls_enable en;						\
2216 	uint64_t seqno;							\
2217 									\
2218 	ATF_REQUIRE_KTLS();						\
2219 	seqno = random();						\
2220 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2221 	    seqno, &en);						\
2222 	test_ktls_receive_corrupted_data(tc, &en, seqno, len);		\
2223 	free_tls_enable(&en);						\
2224 }
2225 
2226 #define ADD_RECEIVE_BAD_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2227 	    auth_alg, minor)						\
2228 	ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_data);
2229 
2230 #define GEN_RECEIVE_BAD_MAC_TEST(cipher_name, cipher_alg, key_size,	\
2231 	    auth_alg, minor, len)					\
2232 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_mac);		\
2233 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_mac, tc)			\
2234 {									\
2235 	struct tls_enable en;						\
2236 	uint64_t seqno;							\
2237 									\
2238 	ATF_REQUIRE_KTLS();						\
2239 	seqno = random();						\
2240 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2241 	    seqno, &en);						\
2242 	test_ktls_receive_corrupted_mac(tc, &en, seqno, len);		\
2243 	free_tls_enable(&en);						\
2244 }
2245 
2246 #define ADD_RECEIVE_BAD_MAC_TEST(cipher_name, cipher_alg, key_size,	\
2247 	    auth_alg, minor)						\
2248 	ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_mac);
2249 
2250 #define GEN_RECEIVE_TRUNCATED_TEST(cipher_name, cipher_alg, key_size,	\
2251 	    auth_alg, minor, len)					\
2252 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_truncated_record);	\
2253 ATF_TC_BODY(ktls_receive_##cipher_name##_truncated_record, tc)		\
2254 {									\
2255 	struct tls_enable en;						\
2256 	uint64_t seqno;							\
2257 									\
2258 	ATF_REQUIRE_KTLS();						\
2259 	seqno = random();						\
2260 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2261 	    seqno, &en);						\
2262 	test_ktls_receive_truncated_record(tc, &en, seqno, len);	\
2263 	free_tls_enable(&en);						\
2264 }
2265 
2266 #define ADD_RECEIVE_TRUNCATED_TEST(cipher_name, cipher_alg, key_size,	\
2267 	    auth_alg, minor)						\
2268 	ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_truncated_record);
2269 
2270 #define GEN_RECEIVE_BAD_MAJOR_TEST(cipher_name, cipher_alg, key_size,	\
2271 	    auth_alg, minor, len)					\
2272 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_major);		\
2273 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_major, tc)			\
2274 {									\
2275 	struct tls_enable en;						\
2276 	uint64_t seqno;							\
2277 									\
2278 	ATF_REQUIRE_KTLS();						\
2279 	seqno = random();						\
2280 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2281 	    seqno, &en);						\
2282 	test_ktls_receive_bad_major(tc, &en, seqno, len);		\
2283 	free_tls_enable(&en);						\
2284 }
2285 
2286 #define ADD_RECEIVE_BAD_MAJOR_TEST(cipher_name, cipher_alg, key_size,	\
2287 	    auth_alg, minor)						\
2288 	ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_major);
2289 
2290 #define GEN_RECEIVE_BAD_MINOR_TEST(cipher_name, cipher_alg, key_size,	\
2291 	    auth_alg, minor, len)					\
2292 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_minor);		\
2293 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_minor, tc)			\
2294 {									\
2295 	struct tls_enable en;						\
2296 	uint64_t seqno;							\
2297 									\
2298 	ATF_REQUIRE_KTLS();						\
2299 	seqno = random();						\
2300 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2301 	    seqno, &en);						\
2302 	test_ktls_receive_bad_minor(tc, &en, seqno, len);		\
2303 	free_tls_enable(&en);						\
2304 }
2305 
2306 #define ADD_RECEIVE_BAD_MINOR_TEST(cipher_name, cipher_alg, key_size,	\
2307 	    auth_alg, minor)						\
2308 	ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_minor);
2309 
2310 #define GEN_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size,	\
2311 	    auth_alg, minor, name, len)					\
2312 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_##name);		\
2313 ATF_TC_BODY(ktls_receive_##cipher_name##_##name, tc)			\
2314 {									\
2315 	struct tls_enable en;						\
2316 	uint64_t seqno;							\
2317 									\
2318 	ATF_REQUIRE_KTLS();						\
2319 	seqno = random();						\
2320 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2321 	    seqno, &en);						\
2322 	test_ktls_receive_bad_size(tc, &en, seqno, (len));		\
2323 	free_tls_enable(&en);						\
2324 }
2325 
2326 #define ADD_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size,	\
2327 	    auth_alg, minor, name)					\
2328 	ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_##name);
2329 
2330 #define GEN_RECEIVE_TESTS(cipher_name, cipher_alg, key_size, auth_alg,	\
2331 	    minor)							\
2332 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2333 	    auth_alg, minor, short, 64, 0)				\
2334 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2335 	    auth_alg, minor, long, 64 * 1024, 0)			\
2336 	GEN_RECEIVE_BAD_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2337 	    auth_alg, minor, 64)					\
2338 	GEN_RECEIVE_BAD_MAC_TEST(cipher_name, cipher_alg, key_size,	\
2339 	    auth_alg, minor, 64)					\
2340 	GEN_RECEIVE_TRUNCATED_TEST(cipher_name, cipher_alg, key_size,	\
2341 	    auth_alg, minor, 64)					\
2342 	GEN_RECEIVE_BAD_MAJOR_TEST(cipher_name, cipher_alg, key_size,	\
2343 	    auth_alg, minor, 64)					\
2344 	GEN_RECEIVE_BAD_MINOR_TEST(cipher_name, cipher_alg, key_size,	\
2345 	    auth_alg, minor, 64)					\
2346 	GEN_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size,	\
2347 	    auth_alg, minor, small_record,				\
2348 	    tls_minimum_record_payload(&en) - 1)			\
2349 	GEN_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size,	\
2350 	    auth_alg, minor, oversized_record,				\
2351 	    TLS_MAX_MSG_SIZE_V10_2 * 2)
2352 
2353 #define ADD_RECEIVE_TESTS(cipher_name, cipher_alg, key_size, auth_alg,	\
2354 	    minor)							\
2355 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2356 	    auth_alg, minor, short)					\
2357 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2358 	    auth_alg, minor, long)					\
2359 	ADD_RECEIVE_BAD_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2360 	    auth_alg, minor)						\
2361 	ADD_RECEIVE_BAD_MAC_TEST(cipher_name, cipher_alg, key_size,	\
2362 	    auth_alg, minor)						\
2363 	ADD_RECEIVE_TRUNCATED_TEST(cipher_name, cipher_alg, key_size,	\
2364 	    auth_alg, minor)						\
2365 	ADD_RECEIVE_BAD_MAJOR_TEST(cipher_name, cipher_alg, key_size,	\
2366 	    auth_alg, minor)						\
2367 	ADD_RECEIVE_BAD_MINOR_TEST(cipher_name, cipher_alg, key_size,	\
2368 	    auth_alg, minor)						\
2369 	ADD_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size,	\
2370 	    auth_alg, minor, small_record)				\
2371 	ADD_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size,	\
2372 	    auth_alg, minor, oversized_record)
2373 
2374 /*
2375  * For each supported cipher suite, run several receive tests:
2376  *
2377  * - a short test which sends 64 bytes of application data (likely as
2378  *   a single TLS record)
2379  *
2380  * - a long test which sends 64KB of application data (split across
2381  *   multiple TLS records)
2382  *
2383  * - a test with corrupted payload data in a single TLS record
2384  *
2385  * - a test with a corrupted MAC in a single TLS record
2386  *
2387  * - a test with a truncated TLS record
2388  *
2389  * - tests with invalid TLS major and minor versions
2390  *
2391  * - a tests with a record whose is one less than the smallest valid
2392  *   size
2393  *
2394  * - a test with an oversized TLS record
2395  */
2396 AES_CBC_NONZERO_TESTS(GEN_RECEIVE_TESTS);
2397 AES_GCM_TESTS(GEN_RECEIVE_TESTS);
2398 CHACHA20_TESTS(GEN_RECEIVE_TESTS);
2399 
2400 #define	GEN_RECEIVE_MTE_PADDING_TESTS(cipher_name, cipher_alg,		\
2401 	    key_size, auth_alg, minor)					\
2402 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2403 	    auth_alg, minor, padding_1, 1, 0)				\
2404 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2405 	    auth_alg, minor, padding_2, 2, 0)				\
2406 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2407 	    auth_alg, minor, padding_3, 3, 0)				\
2408 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2409 	    auth_alg, minor, padding_4, 4, 0)				\
2410 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2411 	    auth_alg, minor, padding_5, 5, 0)				\
2412 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2413 	    auth_alg, minor, padding_6, 6, 0)				\
2414 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2415 	    auth_alg, minor, padding_7, 7, 0)				\
2416 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2417 	    auth_alg, minor, padding_8, 8, 0)				\
2418 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2419 	    auth_alg, minor, padding_9, 9, 0)				\
2420 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2421 	    auth_alg, minor, padding_10, 10, 0)				\
2422 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2423 	    auth_alg, minor, padding_11, 11, 0)				\
2424 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2425 	    auth_alg, minor, padding_12, 12, 0)				\
2426 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2427 	    auth_alg, minor, padding_13, 13, 0)				\
2428 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2429 	    auth_alg, minor, padding_14, 14, 0)				\
2430 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2431 	    auth_alg, minor, padding_15, 15, 0)				\
2432 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2433 	    auth_alg, minor, padding_16, 16, 0)				\
2434 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2435 	    auth_alg, minor, padding_16_extra, 16, 16)			\
2436 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2437 	    auth_alg, minor, padding_32_extra, 16, 32)
2438 
2439 #define ADD_RECEIVE_MTE_PADDING_TESTS(cipher_name, cipher_alg,		\
2440 	    key_size, auth_alg, minor)					\
2441 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2442 	    auth_alg, minor, padding_1)					\
2443 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2444 	    auth_alg, minor, padding_2)					\
2445 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2446 	    auth_alg, minor, padding_3)					\
2447 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2448 	    auth_alg, minor, padding_4)					\
2449 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2450 	    auth_alg, minor, padding_5)					\
2451 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2452 	    auth_alg, minor, padding_6)					\
2453 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2454 	    auth_alg, minor, padding_7)					\
2455 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2456 	    auth_alg, minor, padding_8)					\
2457 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2458 	    auth_alg, minor, padding_9)					\
2459 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2460 	    auth_alg, minor, padding_10)				\
2461 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2462 	    auth_alg, minor, padding_11)				\
2463 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2464 	    auth_alg, minor, padding_12)				\
2465 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2466 	    auth_alg, minor, padding_13)				\
2467 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2468 	    auth_alg, minor, padding_14)				\
2469 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2470 	    auth_alg, minor, padding_15)				\
2471 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2472 	    auth_alg, minor, padding_16)				\
2473 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2474 	    auth_alg, minor, padding_16_extra)				\
2475 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2476 	    auth_alg, minor, padding_32_extra)
2477 
2478 #define GEN_RECEIVE_BAD_PADDING_TEST(cipher_name, cipher_alg, key_size,	\
2479 	    auth_alg, minor, len)					\
2480 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_padding);		\
2481 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_padding, tc)		\
2482 {									\
2483 	struct tls_enable en;						\
2484 	uint64_t seqno;							\
2485 									\
2486 	ATF_REQUIRE_KTLS();						\
2487 	seqno = random();						\
2488 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2489 	    seqno, &en);						\
2490 	test_ktls_receive_corrupted_padding(tc, &en, seqno, len);	\
2491 	free_tls_enable(&en);						\
2492 }
2493 
2494 #define ADD_RECEIVE_BAD_PADDING_TEST(cipher_name, cipher_alg, key_size,	\
2495 	    auth_alg, minor)						\
2496 	ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_padding);
2497 
2498 #define	GEN_RECEIVE_MTE_TESTS(cipher_name, cipher_alg, key_size,	\
2499 	    auth_alg, minor)						\
2500 	GEN_RECEIVE_MTE_PADDING_TESTS(cipher_name, cipher_alg,		\
2501 	    key_size, auth_alg, minor)					\
2502 	GEN_RECEIVE_BAD_PADDING_TEST(cipher_name, cipher_alg, key_size,	\
2503 	    auth_alg, minor, 64)					\
2504 	GEN_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size,	\
2505 	    auth_alg, minor, non_block_size,				\
2506 	    tls_minimum_record_payload(&en) + 1)
2507 
2508 #define	ADD_RECEIVE_MTE_TESTS(cipher_name, cipher_alg, key_size,	\
2509 	    auth_alg, minor)						\
2510 	ADD_RECEIVE_MTE_PADDING_TESTS(cipher_name, cipher_alg,		\
2511 	    key_size, auth_alg, minor)					\
2512 	ADD_RECEIVE_BAD_PADDING_TEST(cipher_name, cipher_alg, key_size,	\
2513 	    auth_alg, minor)						\
2514 	ADD_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size,	\
2515 	    auth_alg, minor, non_block_size)
2516 
2517 /*
2518  * For AES-CBC MTE cipher suites using padding, add tests of messages
2519  * with each possible padding size.  Note that the padding_<N> tests
2520  * do not necessarily test <N> bytes of padding as the padding is a
2521  * function of the cipher suite's MAC length.  However, cycling
2522  * through all of the payload sizes from 1 to 16 should exercise all
2523  * of the possible padding lengths for each suite.
2524  *
2525  * Two additional tests check for additional padding with an extra
2526  * 16 or 32 bytes beyond the normal padding.
2527  *
2528  * Another test checks for corrupted padding.
2529  *
2530  * Another test checks for a record whose payload is not a multiple of
2531  * the AES block size.
2532  */
2533 AES_CBC_NONZERO_TESTS(GEN_RECEIVE_MTE_TESTS);
2534 
2535 #define GEN_RECEIVE_BAD_IV_TEST(cipher_name, cipher_alg, key_size,	\
2536 	    auth_alg, minor)						\
2537 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_iv);		\
2538 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_iv, tc)			\
2539 {									\
2540 	struct tls_enable en;						\
2541 	uint64_t seqno;							\
2542 									\
2543 	ATF_REQUIRE_KTLS();						\
2544 	seqno = random();						\
2545 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2546 	    seqno, &en);						\
2547 	test_ktls_receive_corrupted_iv(tc, &en, seqno, 64);		\
2548 	free_tls_enable(&en);						\
2549 }
2550 
2551 #define ADD_RECEIVE_BAD_IV_TEST(cipher_name, cipher_alg, key_size,	\
2552 	    auth_alg, minor)						\
2553 	ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_iv);
2554 
2555 #define	GEN_RECEIVE_EXPLICIT_IV_TESTS(cipher_name, cipher_alg,		\
2556 	    key_size, auth_alg, minor)					\
2557 	GEN_RECEIVE_BAD_IV_TEST(cipher_name, cipher_alg, key_size,	\
2558 	    auth_alg, minor)						\
2559 	GEN_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size,	\
2560 	    auth_alg, minor, short_header,				\
2561 	    sizeof(struct tls_record_layer) + 1)
2562 
2563 #define	ADD_RECEIVE_EXPLICIT_IV_TESTS(cipher_name, cipher_alg,		\
2564 	    key_size, auth_alg, minor)					\
2565 	ADD_RECEIVE_BAD_IV_TEST(cipher_name, cipher_alg, key_size,	\
2566 	    auth_alg, minor)						\
2567 	ADD_RECEIVE_BAD_SIZE_TEST(cipher_name, cipher_alg, key_size,	\
2568 	    auth_alg, minor, short_header)
2569 
2570 /*
2571  * For cipher suites with an explicit IV, run a receive test where the
2572  * explicit IV has been corrupted.  Also run a receive test that sends
2573  * a short record without a complete IV.
2574  */
2575 AES_CBC_NONZERO_TESTS(GEN_RECEIVE_EXPLICIT_IV_TESTS);
2576 AES_GCM_12_TESTS(GEN_RECEIVE_EXPLICIT_IV_TESTS);
2577 
2578 #define GEN_RECEIVE_BAD_TYPE_TEST(cipher_name, cipher_alg, key_size,	\
2579 	    auth_alg, minor, len)					\
2580 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_bad_type);		\
2581 ATF_TC_BODY(ktls_receive_##cipher_name##_bad_type, tc)			\
2582 {									\
2583 	struct tls_enable en;						\
2584 	uint64_t seqno;							\
2585 									\
2586 	ATF_REQUIRE_KTLS();						\
2587 	seqno = random();						\
2588 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2589 	    seqno, &en);						\
2590 	test_ktls_receive_bad_type(tc, &en, seqno, len);		\
2591 	free_tls_enable(&en);						\
2592 }
2593 
2594 #define ADD_RECEIVE_BAD_TYPE_TEST(cipher_name, cipher_alg, key_size,	\
2595 	    auth_alg, minor)						\
2596 	ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_bad_type);
2597 
2598 #define GEN_RECEIVE_TLS13_TESTS(cipher_name, cipher_alg, key_size,	\
2599 	    auth_alg, minor)						\
2600 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2601 	    auth_alg, minor, short_padded, 64, 16)			\
2602 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2603 	    auth_alg, minor, long_padded, 64 * 1024, 15)		\
2604 	GEN_RECEIVE_BAD_TYPE_TEST(cipher_name, cipher_alg, key_size,	\
2605 	    auth_alg, minor, 64)
2606 
2607 #define ADD_RECEIVE_TLS13_TESTS(cipher_name, cipher_alg, key_size,	\
2608 	    auth_alg, minor)						\
2609 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2610 	    auth_alg, minor, short_padded)				\
2611 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
2612 	    auth_alg, minor, long_padded)				\
2613 	ADD_RECEIVE_BAD_TYPE_TEST(cipher_name, cipher_alg, key_size,	\
2614 	    auth_alg, minor)
2615 
2616 /*
2617  * For TLS 1.3 cipher suites, run two additional receive tests which
2618  * use add padding to each record.  Also run a test that uses an
2619  * invalid "outer" record type.
2620  */
2621 TLS_13_TESTS(GEN_RECEIVE_TLS13_TESTS);
2622 
2623 static void
2624 test_ktls_invalid_receive_cipher_suite(const atf_tc_t *tc,
2625     struct tls_enable *en)
2626 {
2627 	int sockets[2];
2628 
2629 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
2630 
2631 	ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_RXTLS_ENABLE, en,
2632 	    sizeof(*en)) == -1);
2633 	ATF_REQUIRE(errno == EINVAL);
2634 
2635 	close_sockets(sockets);
2636 }
2637 
2638 #define GEN_INVALID_RECEIVE_TEST(name, cipher_alg, key_size, auth_alg,	\
2639 	    minor)							\
2640 ATF_TC_WITHOUT_HEAD(ktls_receive_invalid_##name);			\
2641 ATF_TC_BODY(ktls_receive_invalid_##name, tc)				\
2642 {									\
2643 	struct tls_enable en;						\
2644 	uint64_t seqno;							\
2645 									\
2646 	ATF_REQUIRE_KTLS();						\
2647 	seqno = random();						\
2648 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2649 	    seqno, &en);						\
2650 	test_ktls_invalid_receive_cipher_suite(tc, &en);		\
2651 	free_tls_enable(&en);						\
2652 }
2653 
2654 #define ADD_INVALID_RECEIVE_TEST(name, cipher_alg, key_size, auth_alg,	\
2655 	    minor)							\
2656 	ATF_TP_ADD_TC(tp, ktls_receive_invalid_##name);
2657 
2658 /*
2659  * Ensure that invalid cipher suites are rejected for receive.
2660  */
2661 INVALID_CIPHER_SUITES(GEN_INVALID_RECEIVE_TEST);
2662 
2663 static void
2664 test_ktls_unsupported_receive_cipher_suite(const atf_tc_t *tc,
2665     struct tls_enable *en)
2666 {
2667 	int sockets[2];
2668 
2669 	ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets");
2670 
2671 	ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_RXTLS_ENABLE, en,
2672 	    sizeof(*en)) == -1);
2673 	ATF_REQUIRE(errno == EPROTONOSUPPORT);
2674 
2675 	close_sockets(sockets);
2676 }
2677 
2678 #define GEN_UNSUPPORTED_RECEIVE_TEST(name, cipher_alg, key_size,	\
2679 	    auth_alg, minor)						\
2680 ATF_TC_WITHOUT_HEAD(ktls_receive_unsupported_##name);			\
2681 ATF_TC_BODY(ktls_receive_unsupported_##name, tc)			\
2682 {									\
2683 	struct tls_enable en;						\
2684 	uint64_t seqno;							\
2685 									\
2686 	ATF_REQUIRE_KTLS();						\
2687 	seqno = random();						\
2688 	build_tls_enable(tc, cipher_alg, key_size, auth_alg, minor,	\
2689 	    seqno, &en);						\
2690 	test_ktls_unsupported_receive_cipher_suite(tc, &en);		\
2691 	free_tls_enable(&en);						\
2692 }
2693 
2694 #define ADD_UNSUPPORTED_RECEIVE_TEST(name, cipher_alg, key_size,	\
2695 	    auth_alg, minor)						\
2696 	ATF_TP_ADD_TC(tp, ktls_receive_unsupported_##name);
2697 
2698 /*
2699  * Ensure that valid cipher suites not supported for receive are
2700  * rejected.
2701  */
2702 TLS_10_TESTS(GEN_UNSUPPORTED_RECEIVE_TEST);
2703 
2704 /*
2705  * Try to perform an invalid sendto(2) on a TXTLS-enabled socket, to exercise
2706  * KTLS error handling in the socket layer.
2707  */
2708 ATF_TC_WITHOUT_HEAD(ktls_sendto_baddst);
2709 ATF_TC_BODY(ktls_sendto_baddst, tc)
2710 {
2711 	char buf[32];
2712 	struct sockaddr_in dst;
2713 	struct tls_enable en;
2714 	ssize_t n;
2715 	int s;
2716 
2717 	ATF_REQUIRE_KTLS();
2718 
2719 	s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
2720 	ATF_REQUIRE(s >= 0);
2721 
2722 	build_tls_enable(tc, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0,
2723 	    TLS_MINOR_VER_THREE, (uint64_t)random(), &en);
2724 
2725 	ATF_REQUIRE(setsockopt(s, IPPROTO_TCP, TCP_TXTLS_ENABLE, &en,
2726 	    sizeof(en)) == 0);
2727 
2728 	memset(&dst, 0, sizeof(dst));
2729 	dst.sin_family = AF_INET;
2730 	dst.sin_len = sizeof(dst);
2731 	dst.sin_addr.s_addr = htonl(INADDR_BROADCAST);
2732 	dst.sin_port = htons(12345);
2733 
2734 	memset(buf, 0, sizeof(buf));
2735 	n = sendto(s, buf, sizeof(buf), 0, (struct sockaddr *)&dst,
2736 	    sizeof(dst));
2737 
2738 	/* Can't transmit to the broadcast address over TCP. */
2739 	ATF_REQUIRE_ERRNO(EACCES, n == -1);
2740 	ATF_REQUIRE(close(s) == 0);
2741 }
2742 
2743 ATF_TP_ADD_TCS(tp)
2744 {
2745 	/* Transmit tests */
2746 	AES_CBC_TESTS(ADD_TRANSMIT_TESTS);
2747 	AES_GCM_TESTS(ADD_TRANSMIT_TESTS);
2748 	CHACHA20_TESTS(ADD_TRANSMIT_TESTS);
2749 	AES_CBC_TESTS(ADD_TRANSMIT_PADDING_TESTS);
2750 	AES_CBC_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST);
2751 	AES_GCM_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST);
2752 	CHACHA20_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST);
2753 	INVALID_CIPHER_SUITES(ADD_INVALID_TRANSMIT_TEST);
2754 
2755 	/* Receive tests */
2756 	TLS_10_TESTS(ADD_UNSUPPORTED_RECEIVE_TEST);
2757 	AES_CBC_NONZERO_TESTS(ADD_RECEIVE_TESTS);
2758 	AES_GCM_TESTS(ADD_RECEIVE_TESTS);
2759 	CHACHA20_TESTS(ADD_RECEIVE_TESTS);
2760 	AES_CBC_NONZERO_TESTS(ADD_RECEIVE_MTE_TESTS);
2761 	AES_CBC_NONZERO_TESTS(ADD_RECEIVE_EXPLICIT_IV_TESTS);
2762 	AES_GCM_12_TESTS(ADD_RECEIVE_EXPLICIT_IV_TESTS);
2763 	TLS_13_TESTS(ADD_RECEIVE_TLS13_TESTS);
2764 	INVALID_CIPHER_SUITES(ADD_INVALID_RECEIVE_TEST);
2765 
2766 	/* Miscellaneous */
2767 	ATF_TP_ADD_TC(tp, ktls_sendto_baddst);
2768 
2769 	return (atf_no_error());
2770 }
2771