xref: /freebsd/tests/sys/kern/ktls_test.c (revision 045c8f526484cb3b97f5fd693987f4376fa43c5f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Netflix Inc.
5  * Written by: John Baldwin <jhb@FreeBSD.org>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/types.h>
30 #include <sys/endian.h>
31 #include <sys/event.h>
32 #include <sys/ktls.h>
33 #include <sys/socket.h>
34 #include <sys/sysctl.h>
35 #include <netinet/in.h>
36 #include <netinet/tcp.h>
37 #include <crypto/cryptodev.h>
38 #include <assert.h>
39 #include <err.h>
40 #include <fcntl.h>
41 #include <poll.h>
42 #include <stdbool.h>
43 #include <stdlib.h>
44 #include <atf-c.h>
45 
46 #include <openssl/err.h>
47 #include <openssl/evp.h>
48 #include <openssl/hmac.h>
49 
50 static void
51 require_ktls(void)
52 {
53 	size_t len;
54 	bool enable;
55 
56 	len = sizeof(enable);
57 	if (sysctlbyname("kern.ipc.tls.enable", &enable, &len, NULL, 0) == -1) {
58 		if (errno == ENOENT)
59 			atf_tc_skip("kernel does not support TLS offload");
60 		atf_libc_error(errno, "Failed to read kern.ipc.tls.enable");
61 	}
62 
63 	if (!enable)
64 		atf_tc_skip("Kernel TLS is disabled");
65 }
66 
67 #define	ATF_REQUIRE_KTLS()	require_ktls()
68 
69 static char
70 rdigit(void)
71 {
72 	/* ASCII printable values between 0x20 and 0x7e */
73 	return (0x20 + random() % (0x7f - 0x20));
74 }
75 
76 static char *
77 alloc_buffer(size_t len)
78 {
79 	char *buf;
80 	size_t i;
81 
82 	if (len == 0)
83 		return (NULL);
84 	buf = malloc(len);
85 	for (i = 0; i < len; i++)
86 		buf[i] = rdigit();
87 	return (buf);
88 }
89 
90 static bool
91 socketpair_tcp(int *sv)
92 {
93 	struct pollfd pfd;
94 	struct sockaddr_in sin;
95 	socklen_t len;
96 	int as, cs, ls;
97 
98 	ls = socket(PF_INET, SOCK_STREAM, 0);
99 	if (ls == -1) {
100 		warn("socket() for listen");
101 		return (false);
102 	}
103 
104 	memset(&sin, 0, sizeof(sin));
105 	sin.sin_len = sizeof(sin);
106 	sin.sin_family = AF_INET;
107 	sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
108 	if (bind(ls, (struct sockaddr *)&sin, sizeof(sin)) == -1) {
109 		warn("bind");
110 		close(ls);
111 		return (false);
112 	}
113 
114 	if (listen(ls, 1) == -1) {
115 		warn("listen");
116 		close(ls);
117 		return (false);
118 	}
119 
120 	len = sizeof(sin);
121 	if (getsockname(ls, (struct sockaddr *)&sin, &len) == -1) {
122 		warn("getsockname");
123 		close(ls);
124 		return (false);
125 	}
126 
127 	cs = socket(PF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0);
128 	if (cs == -1) {
129 		warn("socket() for connect");
130 		close(ls);
131 		return (false);
132 	}
133 
134 	if (connect(cs, (struct sockaddr *)&sin, sizeof(sin)) == -1) {
135 		if (errno != EINPROGRESS) {
136 			warn("connect");
137 			close(ls);
138 			close(cs);
139 			return (false);
140 		}
141 	}
142 
143 	as = accept4(ls, NULL, NULL, SOCK_NONBLOCK);
144 	if (as == -1) {
145 		warn("accept4");
146 		close(ls);
147 		close(cs);
148 		return (false);
149 	}
150 
151 	close(ls);
152 
153 	pfd.fd = cs;
154 	pfd.events = POLLOUT;
155 	pfd.revents = 0;
156 	ATF_REQUIRE(poll(&pfd, 1, INFTIM) == 1);
157 	ATF_REQUIRE(pfd.revents == POLLOUT);
158 
159 	sv[0] = cs;
160 	sv[1] = as;
161 	return (true);
162 }
163 
164 static void
165 fd_set_blocking(int fd)
166 {
167 	int flags;
168 
169 	ATF_REQUIRE((flags = fcntl(fd, F_GETFL)) != -1);
170 	flags &= ~O_NONBLOCK;
171 	ATF_REQUIRE(fcntl(fd, F_SETFL, flags) != -1);
172 }
173 
174 static bool
175 cbc_decrypt(const EVP_CIPHER *cipher, const char *key, const char *iv,
176     const char *input, char *output, size_t size)
177 {
178 	EVP_CIPHER_CTX *ctx;
179 	int outl, total;
180 
181 	ctx = EVP_CIPHER_CTX_new();
182 	if (ctx == NULL) {
183 		warnx("EVP_CIPHER_CTX_new failed: %s",
184 		    ERR_error_string(ERR_get_error(), NULL));
185 		return (false);
186 	}
187 	if (EVP_CipherInit_ex(ctx, cipher, NULL, (const u_char *)key,
188 	    (const u_char *)iv, 0) != 1) {
189 		warnx("EVP_CipherInit_ex failed: %s",
190 		    ERR_error_string(ERR_get_error(), NULL));
191 		EVP_CIPHER_CTX_free(ctx);
192 		return (false);
193 	}
194 	EVP_CIPHER_CTX_set_padding(ctx, 0);
195 	if (EVP_CipherUpdate(ctx, (u_char *)output, &outl,
196 	    (const u_char *)input, size) != 1) {
197 		warnx("EVP_CipherUpdate failed: %s",
198 		    ERR_error_string(ERR_get_error(), NULL));
199 		EVP_CIPHER_CTX_free(ctx);
200 		return (false);
201 	}
202 	total = outl;
203 	if (EVP_CipherFinal_ex(ctx, (u_char *)output + outl, &outl) != 1) {
204 		warnx("EVP_CipherFinal_ex failed: %s",
205 		    ERR_error_string(ERR_get_error(), NULL));
206 		EVP_CIPHER_CTX_free(ctx);
207 		return (false);
208 	}
209 	total += outl;
210 	if ((size_t)total != size) {
211 		warnx("decrypt size mismatch: %zu vs %d", size, total);
212 		EVP_CIPHER_CTX_free(ctx);
213 		return (false);
214 	}
215 	EVP_CIPHER_CTX_free(ctx);
216 	return (true);
217 }
218 
219 static bool
220 verify_hash(const EVP_MD *md, const void *key, size_t key_len, const void *aad,
221     size_t aad_len, const void *buffer, size_t len, const void *digest)
222 {
223 	HMAC_CTX *ctx;
224 	unsigned char digest2[EVP_MAX_MD_SIZE];
225 	u_int digest_len;
226 
227 	ctx = HMAC_CTX_new();
228 	if (ctx == NULL) {
229 		warnx("HMAC_CTX_new failed: %s",
230 		    ERR_error_string(ERR_get_error(), NULL));
231 		return (false);
232 	}
233 	if (HMAC_Init_ex(ctx, key, key_len, md, NULL) != 1) {
234 		warnx("HMAC_Init_ex failed: %s",
235 		    ERR_error_string(ERR_get_error(), NULL));
236 		HMAC_CTX_free(ctx);
237 		return (false);
238 	}
239 	if (HMAC_Update(ctx, aad, aad_len) != 1) {
240 		warnx("HMAC_Update (aad) failed: %s",
241 		    ERR_error_string(ERR_get_error(), NULL));
242 		HMAC_CTX_free(ctx);
243 		return (false);
244 	}
245 	if (HMAC_Update(ctx, buffer, len) != 1) {
246 		warnx("HMAC_Update (payload) failed: %s",
247 		    ERR_error_string(ERR_get_error(), NULL));
248 		HMAC_CTX_free(ctx);
249 		return (false);
250 	}
251 	if (HMAC_Final(ctx, digest2, &digest_len) != 1) {
252 		warnx("HMAC_Final failed: %s",
253 		    ERR_error_string(ERR_get_error(), NULL));
254 		HMAC_CTX_free(ctx);
255 		return (false);
256 	}
257 	HMAC_CTX_free(ctx);
258 	if (memcmp(digest, digest2, digest_len) != 0) {
259 		warnx("HMAC mismatch");
260 		return (false);
261 	}
262 	return (true);
263 }
264 
265 static bool
266 aead_encrypt(const EVP_CIPHER *cipher, const char *key, const char *nonce,
267     const void *aad, size_t aad_len, const char *input, char *output,
268     size_t size, char *tag, size_t tag_len)
269 {
270 	EVP_CIPHER_CTX *ctx;
271 	int outl, total;
272 
273 	ctx = EVP_CIPHER_CTX_new();
274 	if (ctx == NULL) {
275 		warnx("EVP_CIPHER_CTX_new failed: %s",
276 		    ERR_error_string(ERR_get_error(), NULL));
277 		return (false);
278 	}
279 	if (EVP_EncryptInit_ex(ctx, cipher, NULL, (const u_char *)key,
280 	    (const u_char *)nonce) != 1) {
281 		warnx("EVP_EncryptInit_ex failed: %s",
282 		    ERR_error_string(ERR_get_error(), NULL));
283 		EVP_CIPHER_CTX_free(ctx);
284 		return (false);
285 	}
286 	EVP_CIPHER_CTX_set_padding(ctx, 0);
287 	if (aad != NULL) {
288 		if (EVP_EncryptUpdate(ctx, NULL, &outl, (const u_char *)aad,
289 		    aad_len) != 1) {
290 			warnx("EVP_EncryptUpdate for AAD failed: %s",
291 			    ERR_error_string(ERR_get_error(), NULL));
292 			EVP_CIPHER_CTX_free(ctx);
293 			return (false);
294 		}
295 	}
296 	if (EVP_EncryptUpdate(ctx, (u_char *)output, &outl,
297 	    (const u_char *)input, size) != 1) {
298 		warnx("EVP_EncryptUpdate failed: %s",
299 		    ERR_error_string(ERR_get_error(), NULL));
300 		EVP_CIPHER_CTX_free(ctx);
301 		return (false);
302 	}
303 	total = outl;
304 	if (EVP_EncryptFinal_ex(ctx, (u_char *)output + outl, &outl) != 1) {
305 		warnx("EVP_EncryptFinal_ex failed: %s",
306 		    ERR_error_string(ERR_get_error(), NULL));
307 		EVP_CIPHER_CTX_free(ctx);
308 		return (false);
309 	}
310 	total += outl;
311 	if ((size_t)total != size) {
312 		warnx("encrypt size mismatch: %zu vs %d", size, total);
313 		EVP_CIPHER_CTX_free(ctx);
314 		return (false);
315 	}
316 	if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, tag_len, tag) !=
317 	    1) {
318 		warnx("EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_GET_TAG) failed: %s",
319 		    ERR_error_string(ERR_get_error(), NULL));
320 		EVP_CIPHER_CTX_free(ctx);
321 		return (false);
322 	}
323 	EVP_CIPHER_CTX_free(ctx);
324 	return (true);
325 }
326 
327 static bool
328 aead_decrypt(const EVP_CIPHER *cipher, const char *key, const char *nonce,
329     const void *aad, size_t aad_len, const char *input, char *output,
330     size_t size, const char *tag, size_t tag_len)
331 {
332 	EVP_CIPHER_CTX *ctx;
333 	int outl, total;
334 	bool valid;
335 
336 	ctx = EVP_CIPHER_CTX_new();
337 	if (ctx == NULL) {
338 		warnx("EVP_CIPHER_CTX_new failed: %s",
339 		    ERR_error_string(ERR_get_error(), NULL));
340 		return (false);
341 	}
342 	if (EVP_DecryptInit_ex(ctx, cipher, NULL, (const u_char *)key,
343 	    (const u_char *)nonce) != 1) {
344 		warnx("EVP_DecryptInit_ex failed: %s",
345 		    ERR_error_string(ERR_get_error(), NULL));
346 		EVP_CIPHER_CTX_free(ctx);
347 		return (false);
348 	}
349 	EVP_CIPHER_CTX_set_padding(ctx, 0);
350 	if (aad != NULL) {
351 		if (EVP_DecryptUpdate(ctx, NULL, &outl, (const u_char *)aad,
352 		    aad_len) != 1) {
353 			warnx("EVP_DecryptUpdate for AAD failed: %s",
354 			    ERR_error_string(ERR_get_error(), NULL));
355 			EVP_CIPHER_CTX_free(ctx);
356 			return (false);
357 		}
358 	}
359 	if (EVP_DecryptUpdate(ctx, (u_char *)output, &outl,
360 	    (const u_char *)input, size) != 1) {
361 		warnx("EVP_DecryptUpdate failed: %s",
362 		    ERR_error_string(ERR_get_error(), NULL));
363 		EVP_CIPHER_CTX_free(ctx);
364 		return (false);
365 	}
366 	total = outl;
367 	if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, tag_len,
368 	    __DECONST(char *, tag)) != 1) {
369 		warnx("EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_TAG) failed: %s",
370 		    ERR_error_string(ERR_get_error(), NULL));
371 		EVP_CIPHER_CTX_free(ctx);
372 		return (false);
373 	}
374 	valid = (EVP_DecryptFinal_ex(ctx, (u_char *)output + outl, &outl) == 1);
375 	total += outl;
376 	if ((size_t)total != size) {
377 		warnx("decrypt size mismatch: %zu vs %d", size, total);
378 		EVP_CIPHER_CTX_free(ctx);
379 		return (false);
380 	}
381 	if (!valid)
382 		warnx("tag mismatch");
383 	EVP_CIPHER_CTX_free(ctx);
384 	return (valid);
385 }
386 
387 static void
388 build_tls_enable(int cipher_alg, size_t cipher_key_len, int auth_alg,
389     int minor, uint64_t seqno, struct tls_enable *en)
390 {
391 	u_int auth_key_len, iv_len;
392 
393 	memset(en, 0, sizeof(*en));
394 
395 	switch (cipher_alg) {
396 	case CRYPTO_AES_CBC:
397 		if (minor == TLS_MINOR_VER_ZERO)
398 			iv_len = AES_BLOCK_LEN;
399 		else
400 			iv_len = 0;
401 		break;
402 	case CRYPTO_AES_NIST_GCM_16:
403 		if (minor == TLS_MINOR_VER_TWO)
404 			iv_len = TLS_AEAD_GCM_LEN;
405 		else
406 			iv_len = TLS_1_3_GCM_IV_LEN;
407 		break;
408 	case CRYPTO_CHACHA20_POLY1305:
409 		iv_len = TLS_CHACHA20_IV_LEN;
410 		break;
411 	default:
412 		iv_len = 0;
413 		break;
414 	}
415 	switch (auth_alg) {
416 	case CRYPTO_SHA1_HMAC:
417 		auth_key_len = SHA1_HASH_LEN;
418 		break;
419 	case CRYPTO_SHA2_256_HMAC:
420 		auth_key_len = SHA2_256_HASH_LEN;
421 		break;
422 	case CRYPTO_SHA2_384_HMAC:
423 		auth_key_len = SHA2_384_HASH_LEN;
424 		break;
425 	default:
426 		auth_key_len = 0;
427 		break;
428 	}
429 	en->cipher_key = alloc_buffer(cipher_key_len);
430 	en->iv = alloc_buffer(iv_len);
431 	en->auth_key = alloc_buffer(auth_key_len);
432 	en->cipher_algorithm = cipher_alg;
433 	en->cipher_key_len = cipher_key_len;
434 	en->iv_len = iv_len;
435 	en->auth_algorithm = auth_alg;
436 	en->auth_key_len = auth_key_len;
437 	en->tls_vmajor = TLS_MAJOR_VER_ONE;
438 	en->tls_vminor = minor;
439 	be64enc(en->rec_seq, seqno);
440 }
441 
442 static void
443 free_tls_enable(struct tls_enable *en)
444 {
445 	free(__DECONST(void *, en->cipher_key));
446 	free(__DECONST(void *, en->iv));
447 	free(__DECONST(void *, en->auth_key));
448 }
449 
450 static const EVP_CIPHER *
451 tls_EVP_CIPHER(const struct tls_enable *en)
452 {
453 	switch (en->cipher_algorithm) {
454 	case CRYPTO_AES_CBC:
455 		switch (en->cipher_key_len) {
456 		case 128 / 8:
457 			return (EVP_aes_128_cbc());
458 		case 256 / 8:
459 			return (EVP_aes_256_cbc());
460 		default:
461 			return (NULL);
462 		}
463 		break;
464 	case CRYPTO_AES_NIST_GCM_16:
465 		switch (en->cipher_key_len) {
466 		case 128 / 8:
467 			return (EVP_aes_128_gcm());
468 		case 256 / 8:
469 			return (EVP_aes_256_gcm());
470 		default:
471 			return (NULL);
472 		}
473 		break;
474 	case CRYPTO_CHACHA20_POLY1305:
475 		return (EVP_chacha20_poly1305());
476 	default:
477 		return (NULL);
478 	}
479 }
480 
481 static const EVP_MD *
482 tls_EVP_MD(const struct tls_enable *en)
483 {
484 	switch (en->auth_algorithm) {
485 	case CRYPTO_SHA1_HMAC:
486 		return (EVP_sha1());
487 	case CRYPTO_SHA2_256_HMAC:
488 		return (EVP_sha256());
489 	case CRYPTO_SHA2_384_HMAC:
490 		return (EVP_sha384());
491 	default:
492 		return (NULL);
493 	}
494 }
495 
496 static size_t
497 tls_header_len(struct tls_enable *en)
498 {
499 	size_t len;
500 
501 	len = sizeof(struct tls_record_layer);
502 	switch (en->cipher_algorithm) {
503 	case CRYPTO_AES_CBC:
504 		if (en->tls_vminor != TLS_MINOR_VER_ZERO)
505 			len += AES_BLOCK_LEN;
506 		return (len);
507 	case CRYPTO_AES_NIST_GCM_16:
508 		if (en->tls_vminor == TLS_MINOR_VER_TWO)
509 			len += sizeof(uint64_t);
510 		return (len);
511 	case CRYPTO_CHACHA20_POLY1305:
512 		return (len);
513 	default:
514 		return (0);
515 	}
516 }
517 
518 static size_t
519 tls_mac_len(struct tls_enable *en)
520 {
521 	switch (en->cipher_algorithm) {
522 	case CRYPTO_AES_CBC:
523 		switch (en->auth_algorithm) {
524 		case CRYPTO_SHA1_HMAC:
525 			return (SHA1_HASH_LEN);
526 		case CRYPTO_SHA2_256_HMAC:
527 			return (SHA2_256_HASH_LEN);
528 		case CRYPTO_SHA2_384_HMAC:
529 			return (SHA2_384_HASH_LEN);
530 		default:
531 			return (0);
532 		}
533 	case CRYPTO_AES_NIST_GCM_16:
534 		return (AES_GMAC_HASH_LEN);
535 	case CRYPTO_CHACHA20_POLY1305:
536 		return (POLY1305_HASH_LEN);
537 	default:
538 		return (0);
539 	}
540 }
541 
542 /* Includes maximum padding for MTE. */
543 static size_t
544 tls_trailer_len(struct tls_enable *en)
545 {
546 	size_t len;
547 
548 	len = tls_mac_len(en);
549 	if (en->cipher_algorithm == CRYPTO_AES_CBC)
550 		len += AES_BLOCK_LEN;
551 	if (en->tls_vminor == TLS_MINOR_VER_THREE)
552 		len++;
553 	return (len);
554 }
555 
556 /* 'len' is the length of the payload application data. */
557 static void
558 tls_mte_aad(struct tls_enable *en, size_t len,
559     const struct tls_record_layer *hdr, uint64_t seqno, struct tls_mac_data *ad)
560 {
561 	ad->seq = htobe64(seqno);
562 	ad->type = hdr->tls_type;
563 	ad->tls_vmajor = hdr->tls_vmajor;
564 	ad->tls_vminor = hdr->tls_vminor;
565 	ad->tls_length = htons(len);
566 }
567 
568 static void
569 tls_12_aead_aad(struct tls_enable *en, size_t len,
570     const struct tls_record_layer *hdr, uint64_t seqno,
571     struct tls_aead_data *ad)
572 {
573 	ad->seq = htobe64(seqno);
574 	ad->type = hdr->tls_type;
575 	ad->tls_vmajor = hdr->tls_vmajor;
576 	ad->tls_vminor = hdr->tls_vminor;
577 	ad->tls_length = htons(len);
578 }
579 
580 static void
581 tls_13_aad(struct tls_enable *en, const struct tls_record_layer *hdr,
582     uint64_t seqno, struct tls_aead_data_13 *ad)
583 {
584 	ad->type = hdr->tls_type;
585 	ad->tls_vmajor = hdr->tls_vmajor;
586 	ad->tls_vminor = hdr->tls_vminor;
587 	ad->tls_length = hdr->tls_length;
588 }
589 
590 static void
591 tls_12_gcm_nonce(struct tls_enable *en, const struct tls_record_layer *hdr,
592     char *nonce)
593 {
594 	memcpy(nonce, en->iv, TLS_AEAD_GCM_LEN);
595 	memcpy(nonce + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t));
596 }
597 
598 static void
599 tls_13_nonce(struct tls_enable *en, uint64_t seqno, char *nonce)
600 {
601 	static_assert(TLS_1_3_GCM_IV_LEN == TLS_CHACHA20_IV_LEN,
602 	    "TLS 1.3 nonce length mismatch");
603 	memcpy(nonce, en->iv, TLS_1_3_GCM_IV_LEN);
604 	*(uint64_t *)(nonce + 4) ^= htobe64(seqno);
605 }
606 
607 /*
608  * Decrypt a TLS record 'len' bytes long at 'src' and store the result at
609  * 'dst'.  If the TLS record header length doesn't match or 'dst' doesn't
610  * have sufficient room ('avail'), fail the test.
611  */
612 static size_t
613 decrypt_tls_aes_cbc_mte(struct tls_enable *en, uint64_t seqno, const void *src,
614     size_t len, void *dst, size_t avail, uint8_t *record_type)
615 {
616 	const struct tls_record_layer *hdr;
617 	struct tls_mac_data aad;
618 	const char *iv;
619 	char *buf;
620 	size_t hdr_len, mac_len, payload_len;
621 	int padding;
622 
623 	hdr = src;
624 	hdr_len = tls_header_len(en);
625 	mac_len = tls_mac_len(en);
626 	ATF_REQUIRE(hdr->tls_vmajor == TLS_MAJOR_VER_ONE);
627 	ATF_REQUIRE(hdr->tls_vminor == en->tls_vminor);
628 
629 	/* First, decrypt the outer payload into a temporary buffer. */
630 	payload_len = len - hdr_len;
631 	buf = malloc(payload_len);
632 	if (en->tls_vminor == TLS_MINOR_VER_ZERO)
633 		iv = en->iv;
634 	else
635 		iv = (void *)(hdr + 1);
636 	ATF_REQUIRE(cbc_decrypt(tls_EVP_CIPHER(en), en->cipher_key, iv,
637 	    (const u_char *)src + hdr_len, buf, payload_len));
638 
639 	/*
640 	 * Copy the last encrypted block to use as the IV for the next
641 	 * record for TLS 1.0.
642 	 */
643 	if (en->tls_vminor == TLS_MINOR_VER_ZERO)
644 		memcpy(__DECONST(uint8_t *, en->iv), (const u_char *)src +
645 		    (len - AES_BLOCK_LEN), AES_BLOCK_LEN);
646 
647 	/*
648 	 * Verify trailing padding and strip.
649 	 *
650 	 * The kernel always generates the smallest amount of padding.
651 	 */
652 	padding = buf[payload_len - 1] + 1;
653 	ATF_REQUIRE(padding > 0 && padding <= AES_BLOCK_LEN);
654 	ATF_REQUIRE(payload_len >= mac_len + padding);
655 	payload_len -= padding;
656 
657 	/* Verify HMAC. */
658 	payload_len -= mac_len;
659 	tls_mte_aad(en, payload_len, hdr, seqno, &aad);
660 	ATF_REQUIRE(verify_hash(tls_EVP_MD(en), en->auth_key, en->auth_key_len,
661 	    &aad, sizeof(aad), buf, payload_len, buf + payload_len));
662 
663 	ATF_REQUIRE(payload_len <= avail);
664 	memcpy(dst, buf, payload_len);
665 	*record_type = hdr->tls_type;
666 	return (payload_len);
667 }
668 
669 static size_t
670 decrypt_tls_12_aead(struct tls_enable *en, uint64_t seqno, const void *src,
671     size_t len, void *dst, uint8_t *record_type)
672 {
673 	const struct tls_record_layer *hdr;
674 	struct tls_aead_data aad;
675 	char nonce[12];
676 	size_t hdr_len, mac_len, payload_len;
677 
678 	hdr = src;
679 
680 	hdr_len = tls_header_len(en);
681 	mac_len = tls_mac_len(en);
682 	payload_len = len - (hdr_len + mac_len);
683 	ATF_REQUIRE(hdr->tls_vmajor == TLS_MAJOR_VER_ONE);
684 	ATF_REQUIRE(hdr->tls_vminor == TLS_MINOR_VER_TWO);
685 
686 	tls_12_aead_aad(en, payload_len, hdr, seqno, &aad);
687 	if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
688 		tls_12_gcm_nonce(en, hdr, nonce);
689 	else
690 		tls_13_nonce(en, seqno, nonce);
691 
692 	ATF_REQUIRE(aead_decrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce,
693 	    &aad, sizeof(aad), (const char *)src + hdr_len, dst, payload_len,
694 	    (const char *)src + hdr_len + payload_len, mac_len));
695 
696 	*record_type = hdr->tls_type;
697 	return (payload_len);
698 }
699 
700 static size_t
701 decrypt_tls_13_aead(struct tls_enable *en, uint64_t seqno, const void *src,
702     size_t len, void *dst, uint8_t *record_type)
703 {
704 	const struct tls_record_layer *hdr;
705 	struct tls_aead_data_13 aad;
706 	char nonce[12];
707 	char *buf;
708 	size_t hdr_len, mac_len, payload_len;
709 
710 	hdr = src;
711 
712 	hdr_len = tls_header_len(en);
713 	mac_len = tls_mac_len(en);
714 	payload_len = len - (hdr_len + mac_len);
715 	ATF_REQUIRE(payload_len >= 1);
716 	ATF_REQUIRE(hdr->tls_type == TLS_RLTYPE_APP);
717 	ATF_REQUIRE(hdr->tls_vmajor == TLS_MAJOR_VER_ONE);
718 	ATF_REQUIRE(hdr->tls_vminor == TLS_MINOR_VER_TWO);
719 
720 	tls_13_aad(en, hdr, seqno, &aad);
721 	tls_13_nonce(en, seqno, nonce);
722 
723 	/*
724 	 * Have to use a temporary buffer for the output due to the
725 	 * record type as the last byte of the trailer.
726 	 */
727 	buf = malloc(payload_len);
728 
729 	ATF_REQUIRE(aead_decrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce,
730 	    &aad, sizeof(aad), (const char *)src + hdr_len, buf, payload_len,
731 	    (const char *)src + hdr_len + payload_len, mac_len));
732 
733 	/* Trim record type. */
734 	*record_type = buf[payload_len - 1];
735 	payload_len--;
736 
737 	memcpy(dst, buf, payload_len);
738 	free(buf);
739 
740 	return (payload_len);
741 }
742 
743 static size_t
744 decrypt_tls_aead(struct tls_enable *en, uint64_t seqno, const void *src,
745     size_t len, void *dst, size_t avail, uint8_t *record_type)
746 {
747 	const struct tls_record_layer *hdr;
748 	size_t payload_len;
749 
750 	hdr = src;
751 	ATF_REQUIRE(ntohs(hdr->tls_length) + sizeof(*hdr) == len);
752 
753 	payload_len = len - (tls_header_len(en) + tls_trailer_len(en));
754 	ATF_REQUIRE(payload_len <= avail);
755 
756 	if (en->tls_vminor == TLS_MINOR_VER_TWO) {
757 		ATF_REQUIRE(decrypt_tls_12_aead(en, seqno, src, len, dst,
758 		    record_type) == payload_len);
759 	} else {
760 		ATF_REQUIRE(decrypt_tls_13_aead(en, seqno, src, len, dst,
761 		    record_type) == payload_len);
762 	}
763 
764 	return (payload_len);
765 }
766 
767 static size_t
768 decrypt_tls_record(struct tls_enable *en, uint64_t seqno, const void *src,
769     size_t len, void *dst, size_t avail, uint8_t *record_type)
770 {
771 	if (en->cipher_algorithm == CRYPTO_AES_CBC)
772 		return (decrypt_tls_aes_cbc_mte(en, seqno, src, len, dst, avail,
773 		    record_type));
774 	else
775 		return (decrypt_tls_aead(en, seqno, src, len, dst, avail,
776 		    record_type));
777 }
778 
779 /*
780  * Encrypt a TLS record of type 'record_type' with payload 'len' bytes
781  * long at 'src' and store the result at 'dst'.  If 'dst' doesn't have
782  * sufficient room ('avail'), fail the test.
783  */
784 static size_t
785 encrypt_tls_12_aead(struct tls_enable *en, uint8_t record_type, uint64_t seqno,
786     const void *src, size_t len, void *dst)
787 {
788 	struct tls_record_layer *hdr;
789 	struct tls_aead_data aad;
790 	char nonce[12];
791 	size_t hdr_len, mac_len, record_len;
792 
793 	hdr = dst;
794 
795 	hdr_len = tls_header_len(en);
796 	mac_len = tls_mac_len(en);
797 	record_len = hdr_len + len + mac_len;
798 
799 	hdr->tls_type = record_type;
800 	hdr->tls_vmajor = TLS_MAJOR_VER_ONE;
801 	hdr->tls_vminor = TLS_MINOR_VER_TWO;
802 	hdr->tls_length = htons(record_len - sizeof(*hdr));
803 	if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
804 		memcpy(hdr + 1, &seqno, sizeof(seqno));
805 
806 	tls_12_aead_aad(en, len, hdr, seqno, &aad);
807 	if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
808 		tls_12_gcm_nonce(en, hdr, nonce);
809 	else
810 		tls_13_nonce(en, seqno, nonce);
811 
812 	ATF_REQUIRE(aead_encrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce,
813 	    &aad, sizeof(aad), src, (char *)dst + hdr_len, len,
814 	    (char *)dst + hdr_len + len, mac_len));
815 
816 	return (record_len);
817 }
818 
819 static size_t
820 encrypt_tls_13_aead(struct tls_enable *en, uint8_t record_type, uint64_t seqno,
821     const void *src, size_t len, void *dst, size_t padding)
822 {
823 	struct tls_record_layer *hdr;
824 	struct tls_aead_data_13 aad;
825 	char nonce[12];
826 	char *buf;
827 	size_t hdr_len, mac_len, record_len;
828 
829 	hdr = dst;
830 
831 	hdr_len = tls_header_len(en);
832 	mac_len = tls_mac_len(en);
833 	record_len = hdr_len + len + 1 + padding + mac_len;
834 
835 	hdr->tls_type = TLS_RLTYPE_APP;
836 	hdr->tls_vmajor = TLS_MAJOR_VER_ONE;
837 	hdr->tls_vminor = TLS_MINOR_VER_TWO;
838 	hdr->tls_length = htons(record_len - sizeof(*hdr));
839 
840 	tls_13_aad(en, hdr, seqno, &aad);
841 	tls_13_nonce(en, seqno, nonce);
842 
843 	/*
844 	 * Have to use a temporary buffer for the input so that the record
845 	 * type can be appended.
846 	 */
847 	buf = malloc(len + 1 + padding);
848 	memcpy(buf, src, len);
849 	buf[len] = record_type;
850 	memset(buf + len + 1, 0, padding);
851 
852 	ATF_REQUIRE(aead_encrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce,
853 	    &aad, sizeof(aad), buf, (char *)dst + hdr_len, len + 1 + padding,
854 	    (char *)dst + hdr_len + len + 1 + padding, mac_len));
855 
856 	free(buf);
857 
858 	return (record_len);
859 }
860 
861 static size_t
862 encrypt_tls_aead(struct tls_enable *en, uint8_t record_type, uint64_t seqno,
863     const void *src, size_t len, void *dst, size_t avail, size_t padding)
864 {
865 	size_t record_len;
866 
867 	record_len = tls_header_len(en) + len + padding + tls_trailer_len(en);
868 	ATF_REQUIRE(record_len <= avail);
869 
870 	if (en->tls_vminor == TLS_MINOR_VER_TWO) {
871 		ATF_REQUIRE(padding == 0);
872 		ATF_REQUIRE(encrypt_tls_12_aead(en, record_type, seqno, src,
873 		    len, dst) == record_len);
874 	} else
875 		ATF_REQUIRE(encrypt_tls_13_aead(en, record_type, seqno, src,
876 		    len, dst, padding) == record_len);
877 
878 	return (record_len);
879 }
880 
881 static size_t
882 encrypt_tls_record(struct tls_enable *en, uint8_t record_type, uint64_t seqno,
883     const void *src, size_t len, void *dst, size_t avail, size_t padding)
884 {
885 	return (encrypt_tls_aead(en, record_type, seqno, src, len, dst, avail,
886 	    padding));
887 }
888 
889 static void
890 test_ktls_transmit_app_data(struct tls_enable *en, uint64_t seqno, size_t len)
891 {
892 	struct kevent ev;
893 	struct tls_record_layer *hdr;
894 	char *plaintext, *decrypted, *outbuf;
895 	size_t decrypted_len, outbuf_len, outbuf_cap, record_len, written;
896 	ssize_t rv;
897 	int kq, sockets[2];
898 	uint8_t record_type;
899 
900 	plaintext = alloc_buffer(len);
901 	decrypted = malloc(len);
902 	outbuf_cap = tls_header_len(en) + TLS_MAX_MSG_SIZE_V10_2 +
903 	    tls_trailer_len(en);
904 	outbuf = malloc(outbuf_cap);
905 	hdr = (struct tls_record_layer *)outbuf;
906 
907 	ATF_REQUIRE((kq = kqueue()) != -1);
908 
909 	ATF_REQUIRE_MSG(socketpair_tcp(sockets), "failed to create sockets");
910 
911 	ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en,
912 	    sizeof(*en)) == 0);
913 
914 	EV_SET(&ev, sockets[0], EVFILT_READ, EV_ADD, 0, 0, NULL);
915 	ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0);
916 	EV_SET(&ev, sockets[1], EVFILT_WRITE, EV_ADD, 0, 0, NULL);
917 	ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0);
918 
919 	decrypted_len = 0;
920 	outbuf_len = 0;
921 	written = 0;
922 
923 	while (decrypted_len != len) {
924 		ATF_REQUIRE(kevent(kq, NULL, 0, &ev, 1, NULL) == 1);
925 
926 		switch (ev.filter) {
927 		case EVFILT_WRITE:
928 			/* Try to write any remaining data. */
929 			rv = write(ev.ident, plaintext + written,
930 			    len - written);
931 			ATF_REQUIRE_MSG(rv > 0,
932 			    "failed to write to socket");
933 			written += rv;
934 			if (written == len) {
935 				ev.flags = EV_DISABLE;
936 				ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0,
937 				    NULL) == 0);
938 			}
939 			break;
940 
941 		case EVFILT_READ:
942 			ATF_REQUIRE((ev.flags & EV_EOF) == 0);
943 
944 			/*
945 			 * Try to read data for the next TLS record
946 			 * into outbuf.  Start by reading the header
947 			 * to determine how much additional data to
948 			 * read.
949 			 */
950 			if (outbuf_len < sizeof(struct tls_record_layer)) {
951 				rv = read(ev.ident, outbuf + outbuf_len,
952 				    sizeof(struct tls_record_layer) -
953 				    outbuf_len);
954 				ATF_REQUIRE_MSG(rv > 0,
955 				    "failed to read from socket");
956 				outbuf_len += rv;
957 			}
958 
959 			if (outbuf_len < sizeof(struct tls_record_layer))
960 				break;
961 
962 			record_len = sizeof(struct tls_record_layer) +
963 			    ntohs(hdr->tls_length);
964 			ATF_REQUIRE(record_len <= outbuf_cap);
965 			ATF_REQUIRE(record_len > outbuf_len);
966 			rv = read(ev.ident, outbuf + outbuf_len,
967 			    record_len - outbuf_len);
968 			if (rv == -1 && errno == EAGAIN)
969 				break;
970 			ATF_REQUIRE_MSG(rv > 0, "failed to read from socket");
971 
972 			outbuf_len += rv;
973 			if (outbuf_len == record_len) {
974 				decrypted_len += decrypt_tls_record(en, seqno,
975 				    outbuf, outbuf_len,
976 				    decrypted + decrypted_len,
977 				    len - decrypted_len, &record_type);
978 				ATF_REQUIRE(record_type == TLS_RLTYPE_APP);
979 
980 				seqno++;
981 				outbuf_len = 0;
982 			}
983 			break;
984 		}
985 	}
986 
987 	ATF_REQUIRE_MSG(written == decrypted_len,
988 	    "read %zu decrypted bytes, but wrote %zu", decrypted_len, written);
989 
990 	ATF_REQUIRE(memcmp(plaintext, decrypted, len) == 0);
991 
992 	free(outbuf);
993 	free(decrypted);
994 	free(plaintext);
995 
996 	ATF_REQUIRE(close(sockets[1]) == 0);
997 	ATF_REQUIRE(close(sockets[0]) == 0);
998 	ATF_REQUIRE(close(kq) == 0);
999 }
1000 
1001 static void
1002 ktls_send_control_message(int fd, uint8_t type, void *data, size_t len)
1003 {
1004 	struct msghdr msg;
1005 	struct cmsghdr *cmsg;
1006 	char cbuf[CMSG_SPACE(sizeof(type))];
1007 	struct iovec iov;
1008 
1009 	memset(&msg, 0, sizeof(msg));
1010 
1011 	msg.msg_control = cbuf;
1012 	msg.msg_controllen = sizeof(cbuf);
1013 	cmsg = CMSG_FIRSTHDR(&msg);
1014 	cmsg->cmsg_level = IPPROTO_TCP;
1015 	cmsg->cmsg_type = TLS_SET_RECORD_TYPE;
1016 	cmsg->cmsg_len = CMSG_LEN(sizeof(type));
1017 	*(uint8_t *)CMSG_DATA(cmsg) = type;
1018 
1019 	iov.iov_base = data;
1020 	iov.iov_len = len;
1021 	msg.msg_iov = &iov;
1022 	msg.msg_iovlen = 1;
1023 
1024 	ATF_REQUIRE(sendmsg(fd, &msg, 0) == (ssize_t)len);
1025 }
1026 
1027 static void
1028 test_ktls_transmit_control(struct tls_enable *en, uint64_t seqno, uint8_t type,
1029     size_t len)
1030 {
1031 	struct tls_record_layer *hdr;
1032 	char *plaintext, *decrypted, *outbuf;
1033 	size_t outbuf_cap, payload_len, record_len;
1034 	ssize_t rv;
1035 	int sockets[2];
1036 	uint8_t record_type;
1037 
1038 	ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2);
1039 
1040 	plaintext = alloc_buffer(len);
1041 	decrypted = malloc(len);
1042 	outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en);
1043 	outbuf = malloc(outbuf_cap);
1044 	hdr = (struct tls_record_layer *)outbuf;
1045 
1046 	ATF_REQUIRE_MSG(socketpair_tcp(sockets), "failed to create sockets");
1047 
1048 	ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en,
1049 	    sizeof(*en)) == 0);
1050 
1051 	fd_set_blocking(sockets[0]);
1052 	fd_set_blocking(sockets[1]);
1053 
1054 	ktls_send_control_message(sockets[1], type, plaintext, len);
1055 
1056 	/*
1057 	 * First read the header to determine how much additional data
1058 	 * to read.
1059 	 */
1060 	rv = read(sockets[0], outbuf, sizeof(struct tls_record_layer));
1061 	ATF_REQUIRE(rv == sizeof(struct tls_record_layer));
1062 	payload_len = ntohs(hdr->tls_length);
1063 	record_len = payload_len + sizeof(struct tls_record_layer);
1064 	ATF_REQUIRE(record_len <= outbuf_cap);
1065 	rv = read(sockets[0], outbuf + sizeof(struct tls_record_layer),
1066 	    payload_len);
1067 	ATF_REQUIRE(rv == (ssize_t)payload_len);
1068 
1069 	rv = decrypt_tls_record(en, seqno, outbuf, record_len, decrypted, len,
1070 	    &record_type);
1071 
1072 	ATF_REQUIRE_MSG((ssize_t)len == rv,
1073 	    "read %zd decrypted bytes, but wrote %zu", rv, len);
1074 	ATF_REQUIRE(record_type == type);
1075 
1076 	ATF_REQUIRE(memcmp(plaintext, decrypted, len) == 0);
1077 
1078 	free(outbuf);
1079 	free(decrypted);
1080 	free(plaintext);
1081 
1082 	ATF_REQUIRE(close(sockets[1]) == 0);
1083 	ATF_REQUIRE(close(sockets[0]) == 0);
1084 }
1085 
1086 static void
1087 test_ktls_transmit_empty_fragment(struct tls_enable *en, uint64_t seqno)
1088 {
1089 	struct tls_record_layer *hdr;
1090 	char *outbuf;
1091 	size_t outbuf_cap, payload_len, record_len;
1092 	ssize_t rv;
1093 	int sockets[2];
1094 	uint8_t record_type;
1095 
1096 	outbuf_cap = tls_header_len(en) + tls_trailer_len(en);
1097 	outbuf = malloc(outbuf_cap);
1098 	hdr = (struct tls_record_layer *)outbuf;
1099 
1100 	ATF_REQUIRE_MSG(socketpair_tcp(sockets), "failed to create sockets");
1101 
1102 	ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en,
1103 	    sizeof(*en)) == 0);
1104 
1105 	fd_set_blocking(sockets[0]);
1106 	fd_set_blocking(sockets[1]);
1107 
1108 	/*
1109 	 * A write of zero bytes should send an empty fragment only for
1110 	 * TLS 1.0, otherwise an error should be raised.
1111 	 */
1112 	rv = write(sockets[1], NULL, 0);
1113 	if (rv == 0) {
1114 		ATF_REQUIRE(en->cipher_algorithm == CRYPTO_AES_CBC);
1115 		ATF_REQUIRE(en->tls_vminor == TLS_MINOR_VER_ZERO);
1116 	} else {
1117 		ATF_REQUIRE(rv == -1);
1118 		ATF_REQUIRE(errno == EINVAL);
1119 		goto out;
1120 	}
1121 
1122 	/*
1123 	 * First read the header to determine how much additional data
1124 	 * to read.
1125 	 */
1126 	rv = read(sockets[0], outbuf, sizeof(struct tls_record_layer));
1127 	ATF_REQUIRE(rv == sizeof(struct tls_record_layer));
1128 	payload_len = ntohs(hdr->tls_length);
1129 	record_len = payload_len + sizeof(struct tls_record_layer);
1130 	ATF_REQUIRE(record_len <= outbuf_cap);
1131 	rv = read(sockets[0], outbuf + sizeof(struct tls_record_layer),
1132 	    payload_len);
1133 	ATF_REQUIRE(rv == (ssize_t)payload_len);
1134 
1135 	rv = decrypt_tls_record(en, seqno, outbuf, record_len, NULL, 0,
1136 	    &record_type);
1137 
1138 	ATF_REQUIRE_MSG(rv == 0,
1139 	    "read %zd decrypted bytes for an empty fragment", rv);
1140 	ATF_REQUIRE(record_type == TLS_RLTYPE_APP);
1141 
1142 out:
1143 	free(outbuf);
1144 
1145 	ATF_REQUIRE(close(sockets[1]) == 0);
1146 	ATF_REQUIRE(close(sockets[0]) == 0);
1147 }
1148 
1149 static size_t
1150 ktls_receive_tls_record(struct tls_enable *en, int fd, uint8_t record_type,
1151     void *data, size_t len)
1152 {
1153 	struct msghdr msg;
1154 	struct cmsghdr *cmsg;
1155 	struct tls_get_record *tgr;
1156 	char cbuf[CMSG_SPACE(sizeof(*tgr))];
1157 	struct iovec iov;
1158 	ssize_t rv;
1159 
1160 	memset(&msg, 0, sizeof(msg));
1161 
1162 	msg.msg_control = cbuf;
1163 	msg.msg_controllen = sizeof(cbuf);
1164 
1165 	iov.iov_base = data;
1166 	iov.iov_len = len;
1167 	msg.msg_iov = &iov;
1168 	msg.msg_iovlen = 1;
1169 
1170 	ATF_REQUIRE((rv = recvmsg(fd, &msg, 0)) > 0);
1171 
1172 	ATF_REQUIRE((msg.msg_flags & (MSG_EOR | MSG_CTRUNC)) == MSG_EOR);
1173 
1174 	cmsg = CMSG_FIRSTHDR(&msg);
1175 	ATF_REQUIRE(cmsg != NULL);
1176 	ATF_REQUIRE(cmsg->cmsg_level == IPPROTO_TCP);
1177 	ATF_REQUIRE(cmsg->cmsg_type == TLS_GET_RECORD);
1178 	ATF_REQUIRE(cmsg->cmsg_len == CMSG_LEN(sizeof(*tgr)));
1179 
1180 	tgr = (struct tls_get_record *)CMSG_DATA(cmsg);
1181 	ATF_REQUIRE(tgr->tls_type == record_type);
1182 	ATF_REQUIRE(tgr->tls_vmajor == en->tls_vmajor);
1183 	/* XXX: Not sure if this is what OpenSSL expects? */
1184 	if (en->tls_vminor == TLS_MINOR_VER_THREE)
1185 		ATF_REQUIRE(tgr->tls_vminor == TLS_MINOR_VER_TWO);
1186 	else
1187 		ATF_REQUIRE(tgr->tls_vminor == en->tls_vminor);
1188 	ATF_REQUIRE(tgr->tls_length == htons(rv));
1189 
1190 	return (rv);
1191 }
1192 
1193 static void
1194 test_ktls_receive_app_data(struct tls_enable *en, uint64_t seqno, size_t len,
1195     size_t padding)
1196 {
1197 	struct kevent ev;
1198 	char *plaintext, *received, *outbuf;
1199 	size_t outbuf_cap, outbuf_len, outbuf_sent, received_len, todo, written;
1200 	ssize_t rv;
1201 	int kq, sockets[2];
1202 
1203 	plaintext = alloc_buffer(len);
1204 	received = malloc(len);
1205 	outbuf_cap = tls_header_len(en) + TLS_MAX_MSG_SIZE_V10_2 +
1206 	    tls_trailer_len(en);
1207 	outbuf = malloc(outbuf_cap);
1208 
1209 	ATF_REQUIRE((kq = kqueue()) != -1);
1210 
1211 	ATF_REQUIRE_MSG(socketpair_tcp(sockets), "failed to create sockets");
1212 
1213 	ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en,
1214 	    sizeof(*en)) == 0);
1215 
1216 	EV_SET(&ev, sockets[0], EVFILT_READ, EV_ADD, 0, 0, NULL);
1217 	ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0);
1218 	EV_SET(&ev, sockets[1], EVFILT_WRITE, EV_ADD, 0, 0, NULL);
1219 	ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0);
1220 
1221 	received_len = 0;
1222 	outbuf_len = 0;
1223 	written = 0;
1224 
1225 	while (received_len != len) {
1226 		ATF_REQUIRE(kevent(kq, NULL, 0, &ev, 1, NULL) == 1);
1227 
1228 		switch (ev.filter) {
1229 		case EVFILT_WRITE:
1230 			/*
1231 			 * Compose the next TLS record to send.
1232 			 */
1233 			if (outbuf_len == 0) {
1234 				ATF_REQUIRE(written < len);
1235 				todo = len - written;
1236 				if (todo > TLS_MAX_MSG_SIZE_V10_2 - padding)
1237 					todo = TLS_MAX_MSG_SIZE_V10_2 - padding;
1238 				outbuf_len = encrypt_tls_record(en,
1239 				    TLS_RLTYPE_APP, seqno, plaintext + written,
1240 				    todo, outbuf, outbuf_cap, padding);
1241 				outbuf_sent = 0;
1242 				written += todo;
1243 				seqno++;
1244 			}
1245 
1246 			/*
1247 			 * Try to write the remainder of the current
1248 			 * TLS record.
1249 			 */
1250 			rv = write(ev.ident, outbuf + outbuf_sent,
1251 			    outbuf_len - outbuf_sent);
1252 			ATF_REQUIRE_MSG(rv > 0,
1253 			    "failed to write to socket");
1254 			outbuf_sent += rv;
1255 			if (outbuf_sent == outbuf_len) {
1256 				outbuf_len = 0;
1257 				if (written == len) {
1258 					ev.flags = EV_DISABLE;
1259 					ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0,
1260 					    NULL) == 0);
1261 				}
1262 			}
1263 			break;
1264 
1265 		case EVFILT_READ:
1266 			ATF_REQUIRE((ev.flags & EV_EOF) == 0);
1267 
1268 			rv = ktls_receive_tls_record(en, ev.ident,
1269 			    TLS_RLTYPE_APP, received + received_len,
1270 			    len - received_len);
1271 			received_len += rv;
1272 			break;
1273 		}
1274 	}
1275 
1276 	ATF_REQUIRE_MSG(written == received_len,
1277 	    "read %zu decrypted bytes, but wrote %zu", received_len, written);
1278 
1279 	ATF_REQUIRE(memcmp(plaintext, received, len) == 0);
1280 
1281 	free(outbuf);
1282 	free(received);
1283 	free(plaintext);
1284 
1285 	ATF_REQUIRE(close(sockets[1]) == 0);
1286 	ATF_REQUIRE(close(sockets[0]) == 0);
1287 	ATF_REQUIRE(close(kq) == 0);
1288 }
1289 
1290 #define	TLS_10_TESTS(M)							\
1291 	M(aes128_cbc_1_0_sha1, CRYPTO_AES_CBC, 128 / 8,			\
1292 	    CRYPTO_SHA1_HMAC)						\
1293 	M(aes256_cbc_1_0_sha1, CRYPTO_AES_CBC, 256 / 8,			\
1294 	    CRYPTO_SHA1_HMAC)
1295 
1296 #define	TLS_13_TESTS(M)							\
1297 	M(aes128_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0,		\
1298 	    TLS_MINOR_VER_THREE)					\
1299 	M(aes256_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0,		\
1300 	    TLS_MINOR_VER_THREE)					\
1301 	M(chacha20_poly1305_1_3, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0,	\
1302 	    TLS_MINOR_VER_THREE)
1303 
1304 #define	AES_CBC_TESTS(M)						\
1305 	M(aes128_cbc_1_0_sha1, CRYPTO_AES_CBC, 128 / 8,			\
1306 	    CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ZERO)			\
1307 	M(aes256_cbc_1_0_sha1, CRYPTO_AES_CBC, 256 / 8,			\
1308 	    CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ZERO)			\
1309 	M(aes128_cbc_1_1_sha1, CRYPTO_AES_CBC, 128 / 8,			\
1310 	    CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ONE)			\
1311 	M(aes256_cbc_1_1_sha1, CRYPTO_AES_CBC, 256 / 8,			\
1312 	    CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ONE)			\
1313 	M(aes128_cbc_1_2_sha1, CRYPTO_AES_CBC, 128 / 8,			\
1314 	    CRYPTO_SHA1_HMAC, TLS_MINOR_VER_TWO)			\
1315 	M(aes256_cbc_1_2_sha1, CRYPTO_AES_CBC, 256 / 8,			\
1316 	    CRYPTO_SHA1_HMAC, TLS_MINOR_VER_TWO)			\
1317 	M(aes128_cbc_1_2_sha256, CRYPTO_AES_CBC, 128 / 8,		\
1318 	    CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_TWO)			\
1319 	M(aes256_cbc_1_2_sha256, CRYPTO_AES_CBC, 256 / 8,		\
1320 	    CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_TWO)			\
1321 	M(aes128_cbc_1_2_sha384, CRYPTO_AES_CBC, 128 / 8,		\
1322 	    CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_TWO)			\
1323 	M(aes256_cbc_1_2_sha384, CRYPTO_AES_CBC, 256 / 8,		\
1324 	    CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_TWO)			\
1325 
1326 #define AES_GCM_TESTS(M)						\
1327 	M(aes128_gcm_1_2, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0,		\
1328 	    TLS_MINOR_VER_TWO)						\
1329 	M(aes256_gcm_1_2, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0,		\
1330 	    TLS_MINOR_VER_TWO)						\
1331 	M(aes128_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0,		\
1332 	    TLS_MINOR_VER_THREE)					\
1333 	M(aes256_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0,		\
1334 	    TLS_MINOR_VER_THREE)
1335 
1336 #define CHACHA20_TESTS(M)						\
1337 	M(chacha20_poly1305_1_2, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0,	\
1338 	    TLS_MINOR_VER_TWO)						\
1339 	M(chacha20_poly1305_1_3, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0,	\
1340 	    TLS_MINOR_VER_THREE)
1341 
1342 #define GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
1343 	    auth_alg, minor, name, len)					\
1344 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_##name);		\
1345 ATF_TC_BODY(ktls_transmit_##cipher_name##_##name, tc)			\
1346 {									\
1347 	struct tls_enable en;						\
1348 	uint64_t seqno;							\
1349 									\
1350 	ATF_REQUIRE_KTLS();						\
1351 	seqno = random();						\
1352 	build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno,	\
1353 	    &en);							\
1354 	test_ktls_transmit_app_data(&en, seqno, len);			\
1355 	free_tls_enable(&en);						\
1356 }
1357 
1358 #define ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
1359 	    auth_alg, minor, name)					\
1360 	ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_##name);
1361 
1362 #define GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1363 	    auth_alg, minor, name, type, len)				\
1364 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_##name);		\
1365 ATF_TC_BODY(ktls_transmit_##cipher_name##_##name, tc)			\
1366 {									\
1367 	struct tls_enable en;						\
1368 	uint64_t seqno;							\
1369 									\
1370 	ATF_REQUIRE_KTLS();						\
1371 	seqno = random();						\
1372 	build_tls_enable(cipher_alg, key_size, auth_alg, minor,	seqno,	\
1373 	    &en);							\
1374 	test_ktls_transmit_control(&en, seqno, type, len);		\
1375 	free_tls_enable(&en);						\
1376 }
1377 
1378 #define ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1379 	    auth_alg, minor, name)					\
1380 	ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_##name);
1381 
1382 #define GEN_TRANSMIT_EMPTY_FRAGMENT_TEST(cipher_name, cipher_alg,	\
1383 	    key_size, auth_alg, minor)					\
1384 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_empty_fragment);	\
1385 ATF_TC_BODY(ktls_transmit_##cipher_name##_empty_fragment, tc)		\
1386 {									\
1387 	struct tls_enable en;						\
1388 	uint64_t seqno;							\
1389 									\
1390 	ATF_REQUIRE_KTLS();						\
1391 	seqno = random();						\
1392 	build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno,	\
1393 	    &en);							\
1394 	test_ktls_transmit_empty_fragment(&en, seqno);			\
1395 	free_tls_enable(&en);						\
1396 }
1397 
1398 #define ADD_TRANSMIT_EMPTY_FRAGMENT_TEST(cipher_name, cipher_alg,	\
1399 	    key_size, auth_alg, minor)					\
1400 	ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_empty_fragment);
1401 
1402 #define GEN_TRANSMIT_TESTS(cipher_name, cipher_alg, key_size, auth_alg,	\
1403 	    minor)							\
1404 	GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
1405 	    auth_alg, minor, short, 64)					\
1406 	GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
1407 	    auth_alg, minor, long, 64 * 1024)				\
1408 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1409 	    auth_alg, minor, control, 0x21 /* Alert */, 32)
1410 
1411 #define ADD_TRANSMIT_TESTS(cipher_name, cipher_alg, key_size, auth_alg,	\
1412 	    minor)							\
1413 	ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
1414 	    auth_alg, minor, short)					\
1415 	ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
1416 	    auth_alg, minor, long)					\
1417 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1418 	    auth_alg, minor, control)
1419 
1420 /*
1421  * For each supported cipher suite, run three transmit tests:
1422  *
1423  * - a short test which sends 64 bytes of application data (likely as
1424  *   a single TLS record)
1425  *
1426  * - a long test which sends 64KB of application data (split across
1427  *   multiple TLS records)
1428  *
1429  * - a control test which sends a single record with a specific
1430  *   content type via sendmsg()
1431  */
1432 AES_CBC_TESTS(GEN_TRANSMIT_TESTS);
1433 AES_GCM_TESTS(GEN_TRANSMIT_TESTS);
1434 CHACHA20_TESTS(GEN_TRANSMIT_TESTS);
1435 
1436 #define GEN_TRANSMIT_PADDING_TESTS(cipher_name, cipher_alg, key_size,	\
1437 	    auth_alg, minor)						\
1438 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1439 	    auth_alg, minor, padding_1, 0x21 /* Alert */, 1)		\
1440 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1441 	    auth_alg, minor, padding_2, 0x21 /* Alert */, 2)		\
1442 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1443 	    auth_alg, minor, padding_3, 0x21 /* Alert */, 3)		\
1444 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1445 	    auth_alg, minor, padding_4, 0x21 /* Alert */, 4)		\
1446 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1447 	    auth_alg, minor, padding_5, 0x21 /* Alert */, 5)		\
1448 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1449 	    auth_alg, minor, padding_6, 0x21 /* Alert */, 6)		\
1450 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1451 	    auth_alg, minor, padding_7, 0x21 /* Alert */, 7)		\
1452 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1453 	    auth_alg, minor, padding_8, 0x21 /* Alert */, 8)		\
1454 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1455 	    auth_alg, minor, padding_9, 0x21 /* Alert */, 9)		\
1456 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1457 	    auth_alg, minor, padding_10, 0x21 /* Alert */, 10)		\
1458 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1459 	    auth_alg, minor, padding_11, 0x21 /* Alert */, 11)		\
1460 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1461 	    auth_alg, minor, padding_12, 0x21 /* Alert */, 12)		\
1462 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1463 	    auth_alg, minor, padding_13, 0x21 /* Alert */, 13)		\
1464 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1465 	    auth_alg, minor, padding_14, 0x21 /* Alert */, 14)		\
1466 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1467 	    auth_alg, minor, padding_15, 0x21 /* Alert */, 15)		\
1468 	GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1469 	    auth_alg, minor, padding_16, 0x21 /* Alert */, 16)
1470 
1471 #define ADD_TRANSMIT_PADDING_TESTS(cipher_name, cipher_alg, key_size,	\
1472 	    auth_alg, minor)						\
1473 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1474 	    auth_alg, minor, padding_1)					\
1475 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1476 	    auth_alg, minor, padding_2)					\
1477 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1478 	    auth_alg, minor, padding_3)					\
1479 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1480 	    auth_alg, minor, padding_4)					\
1481 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1482 	    auth_alg, minor, padding_5)					\
1483 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1484 	    auth_alg, minor, padding_6)					\
1485 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1486 	    auth_alg, minor, padding_7)					\
1487 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1488 	    auth_alg, minor, padding_8)					\
1489 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1490 	    auth_alg, minor, padding_9)					\
1491 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1492 	    auth_alg, minor, padding_10)				\
1493 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1494 	    auth_alg, minor, padding_11)				\
1495 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1496 	    auth_alg, minor, padding_12)				\
1497 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1498 	    auth_alg, minor, padding_13)				\
1499 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1500 	    auth_alg, minor, padding_14)				\
1501 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1502 	    auth_alg, minor, padding_15)				\
1503 	ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size,	\
1504 	    auth_alg, minor, padding_16)
1505 
1506 /*
1507  * For AES-CBC MTE cipher suites using padding, add tests of messages
1508  * with each possible padding size.  Note that the padding_<N> tests
1509  * do not necessarily test <N> bytes of padding as the padding is a
1510  * function of the cipher suite's MAC length.  However, cycling
1511  * through all of the payload sizes from 1 to 16 should exercise all
1512  * of the possible padding lengths for each suite.
1513  */
1514 AES_CBC_TESTS(GEN_TRANSMIT_PADDING_TESTS);
1515 
1516 /*
1517  * Test "empty fragments" which are TLS records with no payload that
1518  * OpenSSL can send for TLS 1.0 connections.
1519  */
1520 AES_CBC_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST);
1521 AES_GCM_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST);
1522 CHACHA20_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST);
1523 
1524 static void
1525 test_ktls_invalid_transmit_cipher_suite(struct tls_enable *en)
1526 {
1527 	int sockets[2];
1528 
1529 	ATF_REQUIRE_MSG(socketpair_tcp(sockets), "failed to create sockets");
1530 
1531 	ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en,
1532 	    sizeof(*en)) == -1);
1533 	ATF_REQUIRE(errno == EINVAL);
1534 
1535 	ATF_REQUIRE(close(sockets[1]) == 0);
1536 	ATF_REQUIRE(close(sockets[0]) == 0);
1537 }
1538 
1539 #define GEN_INVALID_TRANSMIT_TEST(name, cipher_alg, key_size, auth_alg,	\
1540 	    minor)							\
1541 ATF_TC_WITHOUT_HEAD(ktls_transmit_invalid_##name);			\
1542 ATF_TC_BODY(ktls_transmit_invalid_##name, tc)				\
1543 {									\
1544 	struct tls_enable en;						\
1545 	uint64_t seqno;							\
1546 									\
1547 	ATF_REQUIRE_KTLS();						\
1548 	seqno = random();						\
1549 	build_tls_enable(cipher_alg, key_size, auth_alg, minor,	seqno,	\
1550 	    &en);							\
1551 	test_ktls_invalid_transmit_cipher_suite(&en);			\
1552 	free_tls_enable(&en);						\
1553 }
1554 
1555 #define ADD_INVALID_TRANSMIT_TEST(name, cipher_alg, key_size, auth_alg, \
1556 	    minor)							\
1557 	ATF_TP_ADD_TC(tp, ktls_transmit_invalid_##name);
1558 
1559 #define	INVALID_CIPHER_SUITES(M)					\
1560 	M(aes128_cbc_1_0_sha256, CRYPTO_AES_CBC, 128 / 8,		\
1561 	    CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_ZERO)			\
1562 	M(aes128_cbc_1_0_sha384, CRYPTO_AES_CBC, 128 / 8,		\
1563 	    CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_ZERO)			\
1564 	M(aes128_gcm_1_0, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0,		\
1565 	    TLS_MINOR_VER_ZERO)						\
1566 	M(chacha20_poly1305_1_0, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0,	\
1567 	    TLS_MINOR_VER_ZERO)						\
1568 	M(aes128_cbc_1_1_sha256, CRYPTO_AES_CBC, 128 / 8,		\
1569 	    CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_ONE)			\
1570 	M(aes128_cbc_1_1_sha384, CRYPTO_AES_CBC, 128 / 8,		\
1571 	    CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_ONE)			\
1572 	M(aes128_gcm_1_1, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0,		\
1573 	    TLS_MINOR_VER_ONE)						\
1574 	M(chacha20_poly1305_1_1, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0,	\
1575 	    TLS_MINOR_VER_ONE)						\
1576 	M(aes128_cbc_1_3_sha1, CRYPTO_AES_CBC, 128 / 8,			\
1577 	    CRYPTO_SHA1_HMAC, TLS_MINOR_VER_THREE)			\
1578 	M(aes128_cbc_1_3_sha256, CRYPTO_AES_CBC, 128 / 8,		\
1579 	    CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_THREE)			\
1580 	M(aes128_cbc_1_3_sha384, CRYPTO_AES_CBC, 128 / 8,		\
1581 	    CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_THREE)
1582 
1583 /*
1584  * Ensure that invalid cipher suites are rejected for transmit.
1585  */
1586 INVALID_CIPHER_SUITES(GEN_INVALID_TRANSMIT_TEST);
1587 
1588 #define GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
1589 	    auth_alg, minor, name, len, padding)			\
1590 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_##name);		\
1591 ATF_TC_BODY(ktls_receive_##cipher_name##_##name, tc)			\
1592 {									\
1593 	struct tls_enable en;						\
1594 	uint64_t seqno;							\
1595 									\
1596 	ATF_REQUIRE_KTLS();						\
1597 	seqno = random();						\
1598 	build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno,	\
1599 	    &en);							\
1600 	test_ktls_receive_app_data(&en, seqno, len, padding);		\
1601 	free_tls_enable(&en);						\
1602 }
1603 
1604 #define ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
1605 	    auth_alg, minor, name)					\
1606 	ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_##name);
1607 
1608 #define GEN_RECEIVE_TESTS(cipher_name, cipher_alg, key_size, auth_alg,	\
1609 	    minor)							\
1610 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
1611 	    auth_alg, minor, short, 64, 0)				\
1612 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
1613 	    auth_alg, minor, long, 64 * 1024, 0)
1614 
1615 #define ADD_RECEIVE_TESTS(cipher_name, cipher_alg, key_size, auth_alg,	\
1616 	    minor)							\
1617 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
1618 	    auth_alg, minor, short)					\
1619 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
1620 	    auth_alg, minor, long)
1621 
1622 /*
1623  * For each supported cipher suite, run two receive tests:
1624  *
1625  * - a short test which sends 64 bytes of application data (likely as
1626  *   a single TLS record)
1627  *
1628  * - a long test which sends 64KB of application data (split across
1629  *   multiple TLS records)
1630  *
1631  * Note that receive is currently only supported for TLS 1.2 AEAD
1632  * cipher suites.
1633  */
1634 AES_GCM_TESTS(GEN_RECEIVE_TESTS);
1635 CHACHA20_TESTS(GEN_RECEIVE_TESTS);
1636 
1637 #define GEN_PADDING_RECEIVE_TESTS(cipher_name, cipher_alg, key_size,	\
1638 	    auth_alg, minor)						\
1639 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
1640 	    auth_alg, minor, short_padded, 64, 16)			\
1641 	GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
1642 	    auth_alg, minor, long_padded, 64 * 1024, 15)
1643 
1644 #define ADD_PADDING_RECEIVE_TESTS(cipher_name, cipher_alg, key_size,	\
1645 	    auth_alg, minor)						\
1646 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
1647 	    auth_alg, minor, short_padded)				\
1648 	ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size,	\
1649 	    auth_alg, minor, long_padded)
1650 
1651 /*
1652  * For TLS 1.3 cipher suites, run two additional receive tests which
1653  * use add padding to each record.
1654  */
1655 TLS_13_TESTS(GEN_PADDING_RECEIVE_TESTS);
1656 
1657 static void
1658 test_ktls_invalid_receive_cipher_suite(struct tls_enable *en)
1659 {
1660 	int sockets[2];
1661 
1662 	ATF_REQUIRE_MSG(socketpair_tcp(sockets), "failed to create sockets");
1663 
1664 	ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_RXTLS_ENABLE, en,
1665 	    sizeof(*en)) == -1);
1666 	ATF_REQUIRE(errno == EINVAL);
1667 
1668 	ATF_REQUIRE(close(sockets[1]) == 0);
1669 	ATF_REQUIRE(close(sockets[0]) == 0);
1670 }
1671 
1672 #define GEN_INVALID_RECEIVE_TEST(name, cipher_alg, key_size, auth_alg,	\
1673 	    minor)							\
1674 ATF_TC_WITHOUT_HEAD(ktls_receive_invalid_##name);			\
1675 ATF_TC_BODY(ktls_receive_invalid_##name, tc)				\
1676 {									\
1677 	struct tls_enable en;						\
1678 	uint64_t seqno;							\
1679 									\
1680 	ATF_REQUIRE_KTLS();						\
1681 	seqno = random();						\
1682 	build_tls_enable(cipher_alg, key_size, auth_alg, minor,	seqno,	\
1683 	    &en);							\
1684 	test_ktls_invalid_receive_cipher_suite(&en);			\
1685 	free_tls_enable(&en);						\
1686 }
1687 
1688 #define ADD_INVALID_RECEIVE_TEST(name, cipher_alg, key_size, auth_alg,	\
1689 	    minor)							\
1690 	ATF_TP_ADD_TC(tp, ktls_receive_invalid_##name);
1691 
1692 /*
1693  * Ensure that invalid cipher suites are rejected for receive.
1694  */
1695 INVALID_CIPHER_SUITES(GEN_INVALID_RECEIVE_TEST);
1696 
1697 static void
1698 test_ktls_unsupported_receive_cipher_suite(struct tls_enable *en)
1699 {
1700 	int sockets[2];
1701 
1702 	ATF_REQUIRE_MSG(socketpair_tcp(sockets), "failed to create sockets");
1703 
1704 	ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_RXTLS_ENABLE, en,
1705 	    sizeof(*en)) == -1);
1706 	ATF_REQUIRE(errno == EPROTONOSUPPORT);
1707 
1708 	ATF_REQUIRE(close(sockets[1]) == 0);
1709 	ATF_REQUIRE(close(sockets[0]) == 0);
1710 }
1711 
1712 #define GEN_UNSUPPORTED_RECEIVE_TEST(name, cipher_alg, key_size,	\
1713 	    auth_alg, minor)						\
1714 ATF_TC_WITHOUT_HEAD(ktls_receive_unsupported_##name);			\
1715 ATF_TC_BODY(ktls_receive_unsupported_##name, tc)			\
1716 {									\
1717 	struct tls_enable en;						\
1718 	uint64_t seqno;							\
1719 									\
1720 	ATF_REQUIRE_KTLS();						\
1721 	seqno = random();						\
1722 	build_tls_enable(cipher_alg, key_size, auth_alg, minor,	seqno,	\
1723 	    &en);							\
1724 	test_ktls_unsupported_receive_cipher_suite(&en);		\
1725 	free_tls_enable(&en);						\
1726 }
1727 
1728 #define ADD_UNSUPPORTED_RECEIVE_TEST(name, cipher_alg, key_size,	\
1729 	    auth_alg, minor)						\
1730 	ATF_TP_ADD_TC(tp, ktls_receive_unsupported_##name);
1731 
1732 /*
1733  * Ensure that valid cipher suites not supported for receive are
1734  * rejected.
1735  */
1736 AES_CBC_TESTS(GEN_UNSUPPORTED_RECEIVE_TEST);
1737 
1738 /*
1739  * Try to perform an invalid sendto(2) on a TXTLS-enabled socket, to exercise
1740  * KTLS error handling in the socket layer.
1741  */
1742 ATF_TC_WITHOUT_HEAD(ktls_sendto_baddst);
1743 ATF_TC_BODY(ktls_sendto_baddst, tc)
1744 {
1745 	char buf[32];
1746 	struct sockaddr_in dst;
1747 	struct tls_enable en;
1748 	ssize_t n;
1749 	int s;
1750 
1751 	ATF_REQUIRE_KTLS();
1752 
1753 	s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
1754 	ATF_REQUIRE(s >= 0);
1755 
1756 	build_tls_enable(CRYPTO_AES_NIST_GCM_16, 128 / 8, 0,
1757 	    TLS_MINOR_VER_THREE, (uint64_t)random(), &en);
1758 
1759 	ATF_REQUIRE(setsockopt(s, IPPROTO_TCP, TCP_TXTLS_ENABLE, &en,
1760 	    sizeof(en)) == 0);
1761 
1762 	memset(&dst, 0, sizeof(dst));
1763 	dst.sin_family = AF_INET;
1764 	dst.sin_len = sizeof(dst);
1765 	dst.sin_addr.s_addr = htonl(INADDR_BROADCAST);
1766 	dst.sin_port = htons(12345);
1767 
1768 	memset(buf, 0, sizeof(buf));
1769 	n = sendto(s, buf, sizeof(buf), 0, (struct sockaddr *)&dst,
1770 	    sizeof(dst));
1771 
1772 	/* Can't transmit to the broadcast address over TCP. */
1773 	ATF_REQUIRE_ERRNO(EACCES, n == -1);
1774 	ATF_REQUIRE(close(s) == 0);
1775 }
1776 
1777 ATF_TP_ADD_TCS(tp)
1778 {
1779 	/* Transmit tests */
1780 	AES_CBC_TESTS(ADD_TRANSMIT_TESTS);
1781 	AES_GCM_TESTS(ADD_TRANSMIT_TESTS);
1782 	CHACHA20_TESTS(ADD_TRANSMIT_TESTS);
1783 	AES_CBC_TESTS(ADD_TRANSMIT_PADDING_TESTS);
1784 	AES_CBC_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST);
1785 	AES_GCM_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST);
1786 	CHACHA20_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST);
1787 	INVALID_CIPHER_SUITES(ADD_INVALID_TRANSMIT_TEST);
1788 
1789 	/* Receive tests */
1790 	AES_CBC_TESTS(ADD_UNSUPPORTED_RECEIVE_TEST);
1791 	AES_GCM_TESTS(ADD_RECEIVE_TESTS);
1792 	CHACHA20_TESTS(ADD_RECEIVE_TESTS);
1793 	TLS_13_TESTS(ADD_PADDING_RECEIVE_TESTS);
1794 	INVALID_CIPHER_SUITES(ADD_INVALID_RECEIVE_TEST);
1795 
1796 	/* Miscellaneous */
1797 	ATF_TP_ADD_TC(tp, ktls_sendto_baddst);
1798 
1799 	return (atf_no_error());
1800 }
1801