xref: /linux/net/rxrpc/rxgk.c (revision a0285236ab93fdfdd1008afaa04561d142d6c276)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* GSSAPI-based RxRPC security
3  *
4  * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/net.h>
11 #include <linux/skbuff.h>
12 #include <linux/slab.h>
13 #include <linux/key-type.h>
14 #include "ar-internal.h"
15 #include "rxgk_common.h"
16 
17 /*
18  * Parse the information from a server key
19  */
20 static int rxgk_preparse_server_key(struct key_preparsed_payload *prep)
21 {
22 	const struct krb5_enctype *krb5;
23 	struct krb5_buffer *server_key = (void *)&prep->payload.data[2];
24 	unsigned int service, sec_class, kvno, enctype;
25 	int n = 0;
26 
27 	_enter("%zu", prep->datalen);
28 
29 	if (sscanf(prep->orig_description, "%u:%u:%u:%u%n",
30 		   &service, &sec_class, &kvno, &enctype, &n) != 4)
31 		return -EINVAL;
32 
33 	if (prep->orig_description[n])
34 		return -EINVAL;
35 
36 	krb5 = crypto_krb5_find_enctype(enctype);
37 	if (!krb5)
38 		return -ENOPKG;
39 
40 	prep->payload.data[0] = (struct krb5_enctype *)krb5;
41 
42 	if (prep->datalen != krb5->key_len)
43 		return -EKEYREJECTED;
44 
45 	server_key->len = prep->datalen;
46 	server_key->data = kmemdup(prep->data, prep->datalen, GFP_KERNEL);
47 	if (!server_key->data)
48 		return -ENOMEM;
49 
50 	_leave(" = 0");
51 	return 0;
52 }
53 
54 static void rxgk_free_server_key(union key_payload *payload)
55 {
56 	struct krb5_buffer *server_key = (void *)&payload->data[2];
57 
58 	kfree_sensitive(server_key->data);
59 }
60 
61 static void rxgk_free_preparse_server_key(struct key_preparsed_payload *prep)
62 {
63 	rxgk_free_server_key(&prep->payload);
64 }
65 
66 static void rxgk_destroy_server_key(struct key *key)
67 {
68 	rxgk_free_server_key(&key->payload);
69 }
70 
71 static void rxgk_describe_server_key(const struct key *key, struct seq_file *m)
72 {
73 	const struct krb5_enctype *krb5 = key->payload.data[0];
74 
75 	if (krb5)
76 		seq_printf(m, ": %s", krb5->name);
77 }
78 
79 /*
80  * Handle rekeying the connection when we see our limits overrun or when the
81  * far side decided to rekey.
82  *
83  * Returns a ref on the context if successful or -ESTALE if the key is out of
84  * date.
85  */
86 static struct rxgk_context *rxgk_rekey(struct rxrpc_connection *conn,
87 				       const u16 *specific_key_number)
88 {
89 	struct rxgk_context *gk, *dead = NULL;
90 	unsigned int key_number, current_key, mask = ARRAY_SIZE(conn->rxgk.keys) - 1;
91 	bool crank = false;
92 
93 	_enter("%d", specific_key_number ? *specific_key_number : -1);
94 
95 	mutex_lock(&conn->security_lock);
96 
97 	current_key = conn->rxgk.key_number;
98 	if (!specific_key_number) {
99 		key_number = current_key;
100 	} else {
101 		if (*specific_key_number == (u16)current_key)
102 			key_number = current_key;
103 		else if (*specific_key_number == (u16)(current_key - 1))
104 			key_number = current_key - 1;
105 		else if (*specific_key_number == (u16)(current_key + 1))
106 			goto crank_window;
107 		else
108 			goto bad_key;
109 	}
110 
111 	gk = conn->rxgk.keys[key_number & mask];
112 	if (!gk)
113 		goto generate_key;
114 	if (!specific_key_number &&
115 	    test_bit(RXGK_TK_NEEDS_REKEY, &gk->flags))
116 		goto crank_window;
117 
118 grab:
119 	refcount_inc(&gk->usage);
120 	mutex_unlock(&conn->security_lock);
121 	rxgk_put(dead);
122 	return gk;
123 
124 crank_window:
125 	trace_rxrpc_rxgk_rekey(conn, current_key,
126 			       specific_key_number ? *specific_key_number : -1);
127 	if (current_key == UINT_MAX)
128 		goto bad_key;
129 	if (current_key + 1 == UINT_MAX)
130 		set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
131 
132 	key_number = current_key + 1;
133 	if (WARN_ON(conn->rxgk.keys[key_number & mask]))
134 		goto bad_key;
135 	crank = true;
136 
137 generate_key:
138 	gk = conn->rxgk.keys[current_key & mask];
139 	gk = rxgk_generate_transport_key(conn, gk->key, key_number, GFP_NOFS);
140 	if (IS_ERR(gk)) {
141 		mutex_unlock(&conn->security_lock);
142 		return gk;
143 	}
144 
145 	write_lock(&conn->security_use_lock);
146 	if (crank) {
147 		current_key++;
148 		conn->rxgk.key_number = current_key;
149 		dead = conn->rxgk.keys[(current_key - 2) & mask];
150 		conn->rxgk.keys[(current_key - 2) & mask] = NULL;
151 	}
152 	conn->rxgk.keys[current_key & mask] = gk;
153 	write_unlock(&conn->security_use_lock);
154 	goto grab;
155 
156 bad_key:
157 	mutex_unlock(&conn->security_lock);
158 	return ERR_PTR(-ESTALE);
159 }
160 
161 /*
162  * Get the specified keying context.
163  *
164  * Returns a ref on the context if successful or -ESTALE if the key is out of
165  * date.
166  */
167 static struct rxgk_context *rxgk_get_key(struct rxrpc_connection *conn,
168 					 const u16 *specific_key_number)
169 {
170 	struct rxgk_context *gk;
171 	unsigned int key_number, current_key, mask = ARRAY_SIZE(conn->rxgk.keys) - 1;
172 
173 	_enter("{%u},%d",
174 	       conn->rxgk.key_number, specific_key_number ? *specific_key_number : -1);
175 
176 	read_lock(&conn->security_use_lock);
177 
178 	current_key = conn->rxgk.key_number;
179 	if (!specific_key_number) {
180 		key_number = current_key;
181 	} else {
182 		/* Only the bottom 16 bits of the key number are exposed in the
183 		 * header, so we try and keep the upper 16 bits in step.  The
184 		 * whole 32 bits are used to generate the TK.
185 		 */
186 		if (*specific_key_number == (u16)current_key)
187 			key_number = current_key;
188 		else if (*specific_key_number == (u16)(current_key - 1))
189 			key_number = current_key - 1;
190 		else if (*specific_key_number == (u16)(current_key + 1))
191 			goto rekey;
192 		else
193 			goto bad_key;
194 	}
195 
196 	gk = conn->rxgk.keys[key_number & mask];
197 	if (!gk)
198 		goto slow_path;
199 	if (!specific_key_number &&
200 	    key_number < UINT_MAX) {
201 		if (time_after(jiffies, gk->expiry) ||
202 		    gk->bytes_remaining < 0) {
203 			set_bit(RXGK_TK_NEEDS_REKEY, &gk->flags);
204 			goto slow_path;
205 		}
206 
207 		if (test_bit(RXGK_TK_NEEDS_REKEY, &gk->flags))
208 			goto slow_path;
209 	}
210 
211 	refcount_inc(&gk->usage);
212 	read_unlock(&conn->security_use_lock);
213 	return gk;
214 
215 rekey:
216 	_debug("rekey");
217 	if (current_key == UINT_MAX)
218 		goto bad_key;
219 	gk = conn->rxgk.keys[current_key & mask];
220 	if (gk)
221 		set_bit(RXGK_TK_NEEDS_REKEY, &gk->flags);
222 slow_path:
223 	read_unlock(&conn->security_use_lock);
224 	return rxgk_rekey(conn, specific_key_number);
225 bad_key:
226 	read_unlock(&conn->security_use_lock);
227 	return ERR_PTR(-ESTALE);
228 }
229 
230 /*
231  * initialise connection security
232  */
233 static int rxgk_init_connection_security(struct rxrpc_connection *conn,
234 					 struct rxrpc_key_token *token)
235 {
236 	struct rxgk_context *gk;
237 	int ret;
238 
239 	_enter("{%d,%u},{%x}",
240 	       conn->debug_id, conn->rxgk.key_number, key_serial(conn->key));
241 
242 	conn->security_ix = token->security_index;
243 	conn->security_level = token->rxgk->level;
244 
245 	if (rxrpc_conn_is_client(conn)) {
246 		conn->rxgk.start_time = ktime_get();
247 		do_div(conn->rxgk.start_time, 100);
248 	}
249 
250 	gk = rxgk_generate_transport_key(conn, token->rxgk, conn->rxgk.key_number,
251 					 GFP_NOFS);
252 	if (IS_ERR(gk))
253 		return PTR_ERR(gk);
254 	conn->rxgk.enctype = gk->krb5->etype;
255 	conn->rxgk.keys[gk->key_number & 3] = gk;
256 
257 	switch (conn->security_level) {
258 	case RXRPC_SECURITY_PLAIN:
259 	case RXRPC_SECURITY_AUTH:
260 	case RXRPC_SECURITY_ENCRYPT:
261 		break;
262 	default:
263 		ret = -EKEYREJECTED;
264 		goto error;
265 	}
266 
267 	ret = 0;
268 error:
269 	_leave(" = %d", ret);
270 	return ret;
271 }
272 
273 /*
274  * Clean up the crypto on a call.
275  */
276 static void rxgk_free_call_crypto(struct rxrpc_call *call)
277 {
278 }
279 
280 /*
281  * Work out how much data we can put in a packet.
282  */
283 static struct rxrpc_txbuf *rxgk_alloc_txbuf(struct rxrpc_call *call, size_t remain, gfp_t gfp)
284 {
285 	enum krb5_crypto_mode mode;
286 	struct rxgk_context *gk;
287 	struct rxrpc_txbuf *txb;
288 	size_t shdr, alloc, limit, part, offset, gap;
289 
290 	switch (call->conn->security_level) {
291 	default:
292 		alloc = umin(remain, RXRPC_JUMBO_DATALEN);
293 		return rxrpc_alloc_data_txbuf(call, alloc, 1, gfp);
294 	case RXRPC_SECURITY_AUTH:
295 		shdr = 0;
296 		mode = KRB5_CHECKSUM_MODE;
297 		break;
298 	case RXRPC_SECURITY_ENCRYPT:
299 		shdr = sizeof(struct rxgk_header);
300 		mode = KRB5_ENCRYPT_MODE;
301 		break;
302 	}
303 
304 	gk = rxgk_get_key(call->conn, NULL);
305 	if (IS_ERR(gk))
306 		return NULL;
307 
308 	/* Work out the maximum amount of data that will fit. */
309 	alloc = RXRPC_JUMBO_DATALEN;
310 	limit = crypto_krb5_how_much_data(gk->krb5, mode, &alloc, &offset);
311 
312 	if (remain < limit - shdr) {
313 		part = remain;
314 		alloc = crypto_krb5_how_much_buffer(gk->krb5, mode,
315 						    shdr + part, &offset);
316 		gap = 0;
317 	} else {
318 		part = limit - shdr;
319 		gap = RXRPC_JUMBO_DATALEN - alloc;
320 		alloc = RXRPC_JUMBO_DATALEN;
321 	}
322 
323 	rxgk_put(gk);
324 
325 	txb = rxrpc_alloc_data_txbuf(call, alloc, 16, gfp);
326 	if (!txb)
327 		return NULL;
328 
329 	txb->crypto_header	= offset;
330 	txb->sec_header		= shdr;
331 	txb->offset		+= offset + shdr;
332 	txb->space		= part;
333 
334 	/* Clear excess space in the packet */
335 	if (gap)
336 		memset(txb->data + alloc - gap, 0, gap);
337 	return txb;
338 }
339 
340 /*
341  * Integrity mode (sign a packet - level 1 security)
342  */
343 static int rxgk_secure_packet_integrity(const struct rxrpc_call *call,
344 					struct rxgk_context *gk,
345 					struct rxrpc_txbuf *txb)
346 {
347 	struct rxgk_header *hdr;
348 	struct scatterlist sg[1];
349 	struct krb5_buffer metadata;
350 	int ret = -ENOMEM;
351 
352 	_enter("");
353 
354 	hdr = kzalloc(sizeof(*hdr), GFP_NOFS);
355 	if (!hdr)
356 		goto error_gk;
357 
358 	hdr->epoch	= htonl(call->conn->proto.epoch);
359 	hdr->cid	= htonl(call->cid);
360 	hdr->call_number = htonl(call->call_id);
361 	hdr->seq	= htonl(txb->seq);
362 	hdr->sec_index	= htonl(call->security_ix);
363 	hdr->data_len	= htonl(txb->len);
364 	metadata.len = sizeof(*hdr);
365 	metadata.data = hdr;
366 
367 	sg_init_table(sg, 1);
368 	sg_set_buf(&sg[0], txb->data, txb->alloc_size);
369 
370 	ret = crypto_krb5_get_mic(gk->krb5, gk->tx_Kc, &metadata,
371 				  sg, 1, txb->alloc_size,
372 				  txb->crypto_header, txb->sec_header + txb->len);
373 	if (ret >= 0) {
374 		txb->pkt_len = ret;
375 		if (txb->alloc_size == RXRPC_JUMBO_DATALEN)
376 			txb->jumboable = true;
377 		gk->bytes_remaining -= ret;
378 	}
379 	kfree(hdr);
380 error_gk:
381 	rxgk_put(gk);
382 	_leave(" = %d", ret);
383 	return ret;
384 }
385 
386 /*
387  * wholly encrypt a packet (level 2 security)
388  */
389 static int rxgk_secure_packet_encrypted(const struct rxrpc_call *call,
390 					struct rxgk_context *gk,
391 					struct rxrpc_txbuf *txb)
392 {
393 	struct rxgk_header *hdr;
394 	struct scatterlist sg[1];
395 	int ret;
396 
397 	_enter("%x", txb->len);
398 
399 	/* Insert the header into the buffer. */
400 	hdr = txb->data + txb->crypto_header;
401 	hdr->epoch	 = htonl(call->conn->proto.epoch);
402 	hdr->cid	 = htonl(call->cid);
403 	hdr->call_number = htonl(call->call_id);
404 	hdr->seq	 = htonl(txb->seq);
405 	hdr->sec_index	 = htonl(call->security_ix);
406 	hdr->data_len	 = htonl(txb->len);
407 
408 	sg_init_table(sg, 1);
409 	sg_set_buf(&sg[0], txb->data, txb->alloc_size);
410 
411 	ret = crypto_krb5_encrypt(gk->krb5, gk->tx_enc,
412 				  sg, 1, txb->alloc_size,
413 				  txb->crypto_header, txb->sec_header + txb->len,
414 				  false);
415 	if (ret >= 0) {
416 		txb->pkt_len = ret;
417 		if (txb->alloc_size == RXRPC_JUMBO_DATALEN)
418 			txb->jumboable = true;
419 		gk->bytes_remaining -= ret;
420 	}
421 
422 	rxgk_put(gk);
423 	_leave(" = %d", ret);
424 	return ret;
425 }
426 
427 /*
428  * checksum an RxRPC packet header
429  */
430 static int rxgk_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
431 {
432 	struct rxgk_context *gk;
433 	int ret;
434 
435 	_enter("{%d{%x}},{#%u},%u,",
436 	       call->debug_id, key_serial(call->conn->key), txb->seq, txb->len);
437 
438 	gk = rxgk_get_key(call->conn, NULL);
439 	if (IS_ERR(gk))
440 		return PTR_ERR(gk) == -ESTALE ? -EKEYREJECTED : PTR_ERR(gk);
441 
442 	ret = key_validate(call->conn->key);
443 	if (ret < 0)
444 		return ret;
445 
446 	call->security_enctype = gk->krb5->etype;
447 	txb->cksum = htons(gk->key_number);
448 
449 	switch (call->conn->security_level) {
450 	case RXRPC_SECURITY_PLAIN:
451 		rxgk_put(gk);
452 		txb->pkt_len = txb->len;
453 		return 0;
454 	case RXRPC_SECURITY_AUTH:
455 		return rxgk_secure_packet_integrity(call, gk, txb);
456 	case RXRPC_SECURITY_ENCRYPT:
457 		return rxgk_secure_packet_encrypted(call, gk, txb);
458 	default:
459 		rxgk_put(gk);
460 		return -EPERM;
461 	}
462 }
463 
464 /*
465  * Integrity mode (check the signature on a packet - level 1 security)
466  */
467 static int rxgk_verify_packet_integrity(struct rxrpc_call *call,
468 					struct rxgk_context *gk,
469 					struct sk_buff *skb)
470 {
471 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
472 	struct rxgk_header *hdr;
473 	struct krb5_buffer metadata;
474 	unsigned int offset = sp->offset, len = sp->len;
475 	size_t data_offset = 0, data_len = len;
476 	u32 ac;
477 	int ret = -ENOMEM;
478 
479 	_enter("");
480 
481 	crypto_krb5_where_is_the_data(gk->krb5, KRB5_CHECKSUM_MODE,
482 				      &data_offset, &data_len);
483 
484 	hdr = kzalloc(sizeof(*hdr), GFP_NOFS);
485 	if (!hdr)
486 		return -ENOMEM;
487 
488 	hdr->epoch	= htonl(call->conn->proto.epoch);
489 	hdr->cid	= htonl(call->cid);
490 	hdr->call_number = htonl(call->call_id);
491 	hdr->seq	= htonl(sp->hdr.seq);
492 	hdr->sec_index	= htonl(call->security_ix);
493 	hdr->data_len	= htonl(data_len);
494 
495 	metadata.len = sizeof(*hdr);
496 	metadata.data = hdr;
497 	ret = rxgk_verify_mic_skb(gk->krb5, gk->rx_Kc, &metadata,
498 				  skb, &offset, &len, &ac);
499 	kfree(hdr);
500 	if (ret == -EPROTO) {
501 		rxrpc_abort_eproto(call, skb, ac,
502 				   rxgk_abort_1_verify_mic_eproto);
503 	} else {
504 		sp->offset = offset;
505 		sp->len = len;
506 	}
507 
508 	rxgk_put(gk);
509 	_leave(" = %d", ret);
510 	return ret;
511 }
512 
513 /*
514  * Decrypt an encrypted packet (level 2 security).
515  */
516 static int rxgk_verify_packet_encrypted(struct rxrpc_call *call,
517 					struct rxgk_context *gk,
518 					struct sk_buff *skb)
519 {
520 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
521 	struct rxgk_header hdr;
522 	unsigned int offset = sp->offset, len = sp->len;
523 	int ret;
524 	u32 ac;
525 
526 	_enter("");
527 
528 	ret = rxgk_decrypt_skb(gk->krb5, gk->rx_enc, skb, &offset, &len, &ac);
529 	if (ret == -EPROTO)
530 		rxrpc_abort_eproto(call, skb, ac, rxgk_abort_2_decrypt_eproto);
531 	if (ret < 0)
532 		goto error;
533 
534 	if (len < sizeof(hdr)) {
535 		ret = rxrpc_abort_eproto(call, skb, RXGK_PACKETSHORT,
536 					 rxgk_abort_2_short_header);
537 		goto error;
538 	}
539 
540 	/* Extract the header from the skb */
541 	ret = skb_copy_bits(skb, offset, &hdr, sizeof(hdr));
542 	if (ret < 0) {
543 		ret = rxrpc_abort_eproto(call, skb, RXGK_PACKETSHORT,
544 					 rxgk_abort_2_short_encdata);
545 		goto error;
546 	}
547 	offset += sizeof(hdr);
548 	len -= sizeof(hdr);
549 
550 	if (ntohl(hdr.epoch)		!= call->conn->proto.epoch ||
551 	    ntohl(hdr.cid)		!= call->cid ||
552 	    ntohl(hdr.call_number)	!= call->call_id ||
553 	    ntohl(hdr.seq)		!= sp->hdr.seq ||
554 	    ntohl(hdr.sec_index)	!= call->security_ix ||
555 	    ntohl(hdr.data_len)		> len) {
556 		ret = rxrpc_abort_eproto(call, skb, RXGK_SEALEDINCON,
557 					 rxgk_abort_2_short_data);
558 		goto error;
559 	}
560 
561 	sp->offset = offset;
562 	sp->len = ntohl(hdr.data_len);
563 	ret = 0;
564 error:
565 	rxgk_put(gk);
566 	_leave(" = %d", ret);
567 	return ret;
568 }
569 
570 /*
571  * Verify the security on a received packet or subpacket (if part of a
572  * jumbo packet).
573  */
574 static int rxgk_verify_packet(struct rxrpc_call *call, struct sk_buff *skb)
575 {
576 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
577 	struct rxgk_context *gk;
578 	u16 key_number = sp->hdr.cksum;
579 
580 	_enter("{%d{%x}},{#%u}",
581 	       call->debug_id, key_serial(call->conn->key), sp->hdr.seq);
582 
583 	gk = rxgk_get_key(call->conn, &key_number);
584 	if (IS_ERR(gk)) {
585 		switch (PTR_ERR(gk)) {
586 		case -ESTALE:
587 			return rxrpc_abort_eproto(call, skb, RXGK_BADKEYNO,
588 						  rxgk_abort_bad_key_number);
589 		default:
590 			return PTR_ERR(gk);
591 		}
592 	}
593 
594 	call->security_enctype = gk->krb5->etype;
595 	switch (call->conn->security_level) {
596 	case RXRPC_SECURITY_PLAIN:
597 		return 0;
598 	case RXRPC_SECURITY_AUTH:
599 		return rxgk_verify_packet_integrity(call, gk, skb);
600 	case RXRPC_SECURITY_ENCRYPT:
601 		return rxgk_verify_packet_encrypted(call, gk, skb);
602 	default:
603 		rxgk_put(gk);
604 		return -ENOANO;
605 	}
606 }
607 
608 /*
609  * Allocate memory to hold a challenge or a response packet.  We're not running
610  * in the io_thread, so we can't use ->tx_alloc.
611  */
612 static struct page *rxgk_alloc_packet(size_t total_len)
613 {
614 	gfp_t gfp = GFP_NOFS;
615 	int order;
616 
617 	order = get_order(total_len);
618 	if (order > 0)
619 		gfp |= __GFP_COMP;
620 	return alloc_pages(gfp, order);
621 }
622 
623 /*
624  * Issue a challenge.
625  */
626 static int rxgk_issue_challenge(struct rxrpc_connection *conn)
627 {
628 	struct rxrpc_wire_header *whdr;
629 	struct bio_vec bvec[1];
630 	struct msghdr msg;
631 	struct page *page;
632 	size_t len = sizeof(*whdr) + sizeof(conn->rxgk.nonce);
633 	u32 serial;
634 	int ret;
635 
636 	_enter("{%d}", conn->debug_id);
637 
638 	get_random_bytes(&conn->rxgk.nonce, sizeof(conn->rxgk.nonce));
639 
640 	/* We can't use conn->tx_alloc without a lock */
641 	page = rxgk_alloc_packet(sizeof(*whdr) + sizeof(conn->rxgk.nonce));
642 	if (!page)
643 		return -ENOMEM;
644 
645 	bvec_set_page(&bvec[0], page, len, 0);
646 	iov_iter_bvec(&msg.msg_iter, WRITE, bvec, 1, len);
647 
648 	msg.msg_name	= &conn->peer->srx.transport;
649 	msg.msg_namelen	= conn->peer->srx.transport_len;
650 	msg.msg_control	= NULL;
651 	msg.msg_controllen = 0;
652 	msg.msg_flags	= MSG_SPLICE_PAGES;
653 
654 	whdr = page_address(page);
655 	whdr->epoch	= htonl(conn->proto.epoch);
656 	whdr->cid	= htonl(conn->proto.cid);
657 	whdr->callNumber = 0;
658 	whdr->seq	= 0;
659 	whdr->type	= RXRPC_PACKET_TYPE_CHALLENGE;
660 	whdr->flags	= conn->out_clientflag;
661 	whdr->userStatus = 0;
662 	whdr->securityIndex = conn->security_ix;
663 	whdr->_rsvd	= 0;
664 	whdr->serviceId	= htons(conn->service_id);
665 
666 	memcpy(whdr + 1, conn->rxgk.nonce, sizeof(conn->rxgk.nonce));
667 
668 	serial = rxrpc_get_next_serials(conn, 1);
669 	whdr->serial = htonl(serial);
670 
671 	trace_rxrpc_tx_challenge(conn, serial, 0, *(u32 *)&conn->rxgk.nonce);
672 
673 	ret = do_udp_sendmsg(conn->local->socket, &msg, len);
674 	if (ret > 0)
675 		conn->peer->last_tx_at = ktime_get_seconds();
676 	__free_page(page);
677 
678 	if (ret < 0) {
679 		trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
680 				    rxrpc_tx_point_rxgk_challenge);
681 		return -EAGAIN;
682 	}
683 
684 	trace_rxrpc_tx_packet(conn->debug_id, whdr,
685 			      rxrpc_tx_point_rxgk_challenge);
686 	_leave(" = 0");
687 	return 0;
688 }
689 
690 /*
691  * Validate a challenge packet.
692  */
693 static bool rxgk_validate_challenge(struct rxrpc_connection *conn,
694 				    struct sk_buff *skb)
695 {
696 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
697 	u8 nonce[20];
698 
699 	if (!conn->key) {
700 		rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO,
701 				 rxgk_abort_chall_no_key);
702 		return false;
703 	}
704 
705 	if (key_validate(conn->key) < 0) {
706 		rxrpc_abort_conn(conn, skb, RXGK_EXPIRED, -EPROTO,
707 				 rxgk_abort_chall_key_expired);
708 		return false;
709 	}
710 
711 	if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
712 			  nonce, sizeof(nonce)) < 0) {
713 		rxrpc_abort_conn(conn, skb, RXGK_PACKETSHORT, -EPROTO,
714 				 rxgk_abort_chall_short);
715 		return false;
716 	}
717 
718 	trace_rxrpc_rx_challenge(conn, sp->hdr.serial, 0, *(u32 *)nonce, 0);
719 	return true;
720 }
721 
722 /**
723  * rxgk_kernel_query_challenge - Query RxGK-specific challenge parameters
724  * @challenge: The challenge packet to query
725  *
726  * Return: The Kerberos 5 encoding type for the challenged connection.
727  */
728 u32 rxgk_kernel_query_challenge(struct sk_buff *challenge)
729 {
730 	struct rxrpc_skb_priv *sp = rxrpc_skb(challenge);
731 
732 	return sp->chall.conn->rxgk.enctype;
733 }
734 EXPORT_SYMBOL(rxgk_kernel_query_challenge);
735 
736 /*
737  * Fill out the control message to pass to userspace to inform about the
738  * challenge.
739  */
740 static int rxgk_challenge_to_recvmsg(struct rxrpc_connection *conn,
741 				     struct sk_buff *challenge,
742 				     struct msghdr *msg)
743 {
744 	struct rxgk_challenge chall;
745 
746 	chall.base.service_id		= conn->service_id;
747 	chall.base.security_index	= conn->security_ix;
748 	chall.enctype			= conn->rxgk.enctype;
749 
750 	return put_cmsg(msg, SOL_RXRPC, RXRPC_CHALLENGED, sizeof(chall), &chall);
751 }
752 
753 /*
754  * Insert the requisite amount of XDR padding for the length given.
755  */
756 static int rxgk_pad_out(struct sk_buff *response, size_t len, size_t offset)
757 {
758 	__be32 zero = 0;
759 	size_t pad = xdr_round_up(len) - len;
760 	int ret;
761 
762 	if (!pad)
763 		return 0;
764 
765 	ret = skb_store_bits(response, offset, &zero, pad);
766 	if (ret < 0)
767 		return ret;
768 	return pad;
769 }
770 
771 /*
772  * Insert the header into the response.
773  */
774 static noinline ssize_t rxgk_insert_response_header(struct rxrpc_connection *conn,
775 						    struct rxgk_context *gk,
776 						    struct sk_buff *response,
777 						    size_t offset)
778 {
779 	struct rxrpc_skb_priv *rsp = rxrpc_skb(response);
780 
781 	struct {
782 		struct rxrpc_wire_header whdr;
783 		__be32 start_time_msw;
784 		__be32 start_time_lsw;
785 		__be32 ticket_len;
786 	} h;
787 	int ret;
788 
789 	rsp->resp.kvno		= gk->key_number;
790 	rsp->resp.version	= gk->krb5->etype;
791 
792 	h.whdr.epoch		= htonl(conn->proto.epoch);
793 	h.whdr.cid		= htonl(conn->proto.cid);
794 	h.whdr.callNumber	= 0;
795 	h.whdr.serial		= 0;
796 	h.whdr.seq		= 0;
797 	h.whdr.type		= RXRPC_PACKET_TYPE_RESPONSE;
798 	h.whdr.flags		= conn->out_clientflag;
799 	h.whdr.userStatus	= 0;
800 	h.whdr.securityIndex	= conn->security_ix;
801 	h.whdr.cksum		= htons(gk->key_number);
802 	h.whdr.serviceId	= htons(conn->service_id);
803 	h.start_time_msw	= htonl(upper_32_bits(conn->rxgk.start_time));
804 	h.start_time_lsw	= htonl(lower_32_bits(conn->rxgk.start_time));
805 	h.ticket_len		= htonl(gk->key->ticket.len);
806 
807 	ret = skb_store_bits(response, offset, &h, sizeof(h));
808 	return ret < 0 ? ret : sizeof(h);
809 }
810 
811 /*
812  * Construct the authenticator to go in the response packet
813  *
814  * struct RXGK_Authenticator {
815  *	opaque nonce[20];
816  *	opaque appdata<>;
817  *	RXGK_Level level;
818  *	unsigned int epoch;
819  *	unsigned int cid;
820  *	unsigned int call_numbers<>;
821  * };
822  */
823 static ssize_t rxgk_construct_authenticator(struct rxrpc_connection *conn,
824 					    struct sk_buff *challenge,
825 					    const struct krb5_buffer *appdata,
826 					    struct sk_buff *response,
827 					    size_t offset)
828 {
829 	struct {
830 		u8	nonce[20];
831 		__be32	appdata_len;
832 	} a;
833 	struct {
834 		__be32	level;
835 		__be32	epoch;
836 		__be32	cid;
837 		__be32	call_numbers_count;
838 		__be32	call_numbers[4];
839 	} b;
840 	int ret;
841 
842 	ret = skb_copy_bits(challenge, sizeof(struct rxrpc_wire_header),
843 			    a.nonce, sizeof(a.nonce));
844 	if (ret < 0)
845 		return -EPROTO;
846 
847 	a.appdata_len = htonl(appdata->len);
848 
849 	ret = skb_store_bits(response, offset, &a, sizeof(a));
850 	if (ret < 0)
851 		return ret;
852 	offset += sizeof(a);
853 
854 	if (appdata->len) {
855 		ret = skb_store_bits(response, offset, appdata->data, appdata->len);
856 		if (ret < 0)
857 			return ret;
858 		offset += appdata->len;
859 
860 		ret = rxgk_pad_out(response, appdata->len, offset);
861 		if (ret < 0)
862 			return ret;
863 		offset += ret;
864 	}
865 
866 	b.level			= htonl(conn->security_level);
867 	b.epoch			= htonl(conn->proto.epoch);
868 	b.cid			= htonl(conn->proto.cid);
869 	b.call_numbers_count	= htonl(4);
870 	b.call_numbers[0]	= htonl(conn->channels[0].call_counter);
871 	b.call_numbers[1]	= htonl(conn->channels[1].call_counter);
872 	b.call_numbers[2]	= htonl(conn->channels[2].call_counter);
873 	b.call_numbers[3]	= htonl(conn->channels[3].call_counter);
874 
875 	ret = skb_store_bits(response, offset, &b, sizeof(b));
876 	if (ret < 0)
877 		return ret;
878 	return sizeof(a) + xdr_round_up(appdata->len) + sizeof(b);
879 }
880 
881 static ssize_t rxgk_encrypt_authenticator(struct rxrpc_connection *conn,
882 					  struct rxgk_context *gk,
883 					  struct sk_buff *response,
884 					  size_t offset,
885 					  size_t alloc_len,
886 					  size_t auth_offset,
887 					  size_t auth_len)
888 {
889 	struct scatterlist sg[16];
890 	int nr_sg;
891 
892 	sg_init_table(sg, ARRAY_SIZE(sg));
893 	nr_sg = skb_to_sgvec(response, sg, offset, alloc_len);
894 	if (unlikely(nr_sg < 0))
895 		return nr_sg;
896 	return crypto_krb5_encrypt(gk->krb5, gk->resp_enc, sg, nr_sg, alloc_len,
897 				   auth_offset, auth_len, false);
898 }
899 
900 /*
901  * Construct the response.
902  *
903  * struct RXGK_Response {
904  *	rxgkTime start_time;
905  *	RXGK_Data token;
906  *	opaque authenticator<RXGK_MAXAUTHENTICATOR>
907  * };
908  */
909 static int rxgk_construct_response(struct rxrpc_connection *conn,
910 				   struct sk_buff *challenge,
911 				   struct krb5_buffer *appdata)
912 {
913 	struct rxrpc_skb_priv *csp, *rsp;
914 	struct rxgk_context *gk;
915 	struct sk_buff *response;
916 	size_t len, auth_len, authx_len, offset, auth_offset, authx_offset;
917 	__be32 tmp;
918 	int ret;
919 
920 	gk = rxgk_get_key(conn, NULL);
921 	if (IS_ERR(gk))
922 		return PTR_ERR(gk);
923 
924 	auth_len = 20 + (4 + appdata->len) + 12 + (1 + 4) * 4;
925 	authx_len = crypto_krb5_how_much_buffer(gk->krb5, KRB5_ENCRYPT_MODE,
926 						auth_len, &auth_offset);
927 	len = sizeof(struct rxrpc_wire_header) +
928 		8 + (4 + xdr_round_up(gk->key->ticket.len)) + (4 + authx_len);
929 
930 	response = alloc_skb_with_frags(0, len, 0, &ret, GFP_NOFS);
931 	if (!response)
932 		goto error;
933 	rxrpc_new_skb(response, rxrpc_skb_new_response_rxgk);
934 	response->len = len;
935 	response->data_len = len;
936 
937 	ret = rxgk_insert_response_header(conn, gk, response, 0);
938 	if (ret < 0)
939 		goto error;
940 	offset = ret;
941 
942 	ret = skb_store_bits(response, offset, gk->key->ticket.data, gk->key->ticket.len);
943 	if (ret < 0)
944 		goto error;
945 	offset += gk->key->ticket.len;
946 	ret = rxgk_pad_out(response, gk->key->ticket.len, offset);
947 	if (ret < 0)
948 		goto error;
949 
950 	authx_offset = offset + ret + 4; /* Leave a gap for the length. */
951 
952 	ret = rxgk_construct_authenticator(conn, challenge, appdata, response,
953 					   authx_offset + auth_offset);
954 	if (ret < 0)
955 		goto error;
956 	auth_len = ret;
957 
958 	ret = rxgk_encrypt_authenticator(conn, gk, response,
959 					 authx_offset, authx_len,
960 					 auth_offset, auth_len);
961 	if (ret < 0)
962 		goto error;
963 	authx_len = ret;
964 
965 	tmp = htonl(authx_len);
966 	ret = skb_store_bits(response, authx_offset - 4, &tmp, 4);
967 	if (ret < 0)
968 		goto error;
969 
970 	ret = rxgk_pad_out(response, authx_len, authx_offset + authx_len);
971 	if (ret < 0)
972 		return ret;
973 	len = authx_offset + authx_len + ret;
974 
975 	if (len != response->len) {
976 		response->len = len;
977 		response->data_len = len;
978 	}
979 
980 	csp = rxrpc_skb(challenge);
981 	rsp = rxrpc_skb(response);
982 	rsp->resp.len = len;
983 	rsp->resp.challenge_serial = csp->hdr.serial;
984 	rxrpc_post_response(conn, response);
985 	response = NULL;
986 	ret = 0;
987 
988 error:
989 	rxrpc_free_skb(response, rxrpc_skb_put_response);
990 	rxgk_put(gk);
991 	_leave(" = %d", ret);
992 	return ret;
993 }
994 
995 /*
996  * Respond to a challenge packet.
997  */
998 static int rxgk_respond_to_challenge(struct rxrpc_connection *conn,
999 				     struct sk_buff *challenge,
1000 				     struct krb5_buffer *appdata)
1001 {
1002 	_enter("{%d,%x}", conn->debug_id, key_serial(conn->key));
1003 
1004 	if (key_validate(conn->key) < 0)
1005 		return rxrpc_abort_conn(conn, NULL, RXGK_EXPIRED, -EPROTO,
1006 					rxgk_abort_chall_key_expired);
1007 
1008 	return rxgk_construct_response(conn, challenge, appdata);
1009 }
1010 
1011 static int rxgk_respond_to_challenge_no_appdata(struct rxrpc_connection *conn,
1012 						struct sk_buff *challenge)
1013 {
1014 	struct krb5_buffer appdata = {};
1015 
1016 	return rxgk_respond_to_challenge(conn, challenge, &appdata);
1017 }
1018 
1019 /**
1020  * rxgk_kernel_respond_to_challenge - Respond to a challenge with appdata
1021  * @challenge: The challenge to respond to
1022  * @appdata: The application data to include in the RESPONSE authenticator
1023  *
1024  * Allow a kernel application to respond to a CHALLENGE with application data
1025  * to be included in the RxGK RESPONSE Authenticator.
1026  *
1027  * Return: %0 if successful and a negative error code otherwise.
1028  */
1029 int rxgk_kernel_respond_to_challenge(struct sk_buff *challenge,
1030 				     struct krb5_buffer *appdata)
1031 {
1032 	struct rxrpc_skb_priv *csp = rxrpc_skb(challenge);
1033 
1034 	return rxgk_respond_to_challenge(csp->chall.conn, challenge, appdata);
1035 }
1036 EXPORT_SYMBOL(rxgk_kernel_respond_to_challenge);
1037 
1038 /*
1039  * Parse sendmsg() control message and respond to challenge.  We need to see if
1040  * there's an appdata to fish out.
1041  */
1042 static int rxgk_sendmsg_respond_to_challenge(struct sk_buff *challenge,
1043 					     struct msghdr *msg)
1044 {
1045 	struct krb5_buffer appdata = {};
1046 	struct cmsghdr *cmsg;
1047 
1048 	for_each_cmsghdr(cmsg, msg) {
1049 		if (cmsg->cmsg_level != SOL_RXRPC ||
1050 		    cmsg->cmsg_type != RXRPC_RESP_RXGK_APPDATA)
1051 			continue;
1052 		if (appdata.data)
1053 			return -EINVAL;
1054 		appdata.data = CMSG_DATA(cmsg);
1055 		appdata.len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1056 	}
1057 
1058 	return rxgk_kernel_respond_to_challenge(challenge, &appdata);
1059 }
1060 
1061 /*
1062  * Verify the authenticator.
1063  *
1064  * struct RXGK_Authenticator {
1065  *	opaque nonce[20];
1066  *	opaque appdata<>;
1067  *	RXGK_Level level;
1068  *	unsigned int epoch;
1069  *	unsigned int cid;
1070  *	unsigned int call_numbers<>;
1071  * };
1072  */
1073 static int rxgk_do_verify_authenticator(struct rxrpc_connection *conn,
1074 					const struct krb5_enctype *krb5,
1075 					struct sk_buff *skb,
1076 					__be32 *p, __be32 *end)
1077 {
1078 	u32 app_len, call_count, level, epoch, cid, i;
1079 
1080 	_enter("");
1081 
1082 	if (memcmp(p, conn->rxgk.nonce, 20) != 0)
1083 		return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1084 					rxgk_abort_resp_bad_nonce);
1085 	p += 20 / sizeof(__be32);
1086 
1087 	app_len	= ntohl(*p++);
1088 	if (app_len > (end - p) * sizeof(__be32))
1089 		return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1090 					rxgk_abort_resp_short_applen);
1091 
1092 	p += xdr_round_up(app_len) / sizeof(__be32);
1093 	if (end - p < 4)
1094 		return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1095 					rxgk_abort_resp_short_applen);
1096 
1097 	level	= ntohl(*p++);
1098 	epoch	= ntohl(*p++);
1099 	cid	= ntohl(*p++);
1100 	call_count = ntohl(*p++);
1101 
1102 	if (level	!= conn->security_level ||
1103 	    epoch	!= conn->proto.epoch ||
1104 	    cid		!= conn->proto.cid ||
1105 	    call_count	> 4)
1106 		return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1107 					rxgk_abort_resp_bad_param);
1108 
1109 	if (end - p < call_count)
1110 		return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1111 					rxgk_abort_resp_short_call_list);
1112 
1113 	for (i = 0; i < call_count; i++) {
1114 		u32 call_id = ntohl(*p++);
1115 
1116 		if (call_id > INT_MAX)
1117 			return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1118 						rxgk_abort_resp_bad_callid);
1119 
1120 		if (call_id < conn->channels[i].call_counter)
1121 			return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1122 						rxgk_abort_resp_call_ctr);
1123 
1124 		if (call_id > conn->channels[i].call_counter) {
1125 			if (conn->channels[i].call)
1126 				return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1127 							rxgk_abort_resp_call_state);
1128 
1129 			conn->channels[i].call_counter = call_id;
1130 		}
1131 	}
1132 
1133 	_leave(" = 0");
1134 	return 0;
1135 }
1136 
1137 /*
1138  * Extract the authenticator and verify it.
1139  */
1140 static int rxgk_verify_authenticator(struct rxrpc_connection *conn,
1141 				     const struct krb5_enctype *krb5,
1142 				     struct sk_buff *skb,
1143 				     unsigned int auth_offset, unsigned int auth_len)
1144 {
1145 	void *auth;
1146 	__be32 *p;
1147 	int ret;
1148 
1149 	auth = kmalloc(auth_len, GFP_NOFS);
1150 	if (!auth)
1151 		return -ENOMEM;
1152 
1153 	ret = skb_copy_bits(skb, auth_offset, auth, auth_len);
1154 	if (ret < 0) {
1155 		ret = rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1156 				       rxgk_abort_resp_short_auth);
1157 		goto error;
1158 	}
1159 
1160 	p = auth;
1161 	ret = rxgk_do_verify_authenticator(conn, krb5, skb, p, p + auth_len);
1162 error:
1163 	kfree(auth);
1164 	return ret;
1165 }
1166 
1167 /*
1168  * Verify a response.
1169  *
1170  * struct RXGK_Response {
1171  *	rxgkTime	start_time;
1172  *	RXGK_Data	token;
1173  *	opaque		authenticator<RXGK_MAXAUTHENTICATOR>
1174  * };
1175  */
1176 static int rxgk_verify_response(struct rxrpc_connection *conn,
1177 				struct sk_buff *skb)
1178 {
1179 	const struct krb5_enctype *krb5;
1180 	struct rxrpc_key_token *token;
1181 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
1182 	struct rxgk_response rhdr;
1183 	struct rxgk_context *gk;
1184 	struct key *key = NULL;
1185 	unsigned int offset = sizeof(struct rxrpc_wire_header);
1186 	unsigned int len = skb->len - sizeof(struct rxrpc_wire_header);
1187 	unsigned int token_offset, token_len;
1188 	unsigned int auth_offset, auth_len;
1189 	__be32 xauth_len;
1190 	int ret, ec;
1191 
1192 	_enter("{%d}", conn->debug_id);
1193 
1194 	/* Parse the RXGK_Response object */
1195 	if (sizeof(rhdr) + sizeof(__be32) > len)
1196 		goto short_packet;
1197 
1198 	if (skb_copy_bits(skb, offset, &rhdr, sizeof(rhdr)) < 0)
1199 		goto short_packet;
1200 	offset	+= sizeof(rhdr);
1201 	len	-= sizeof(rhdr);
1202 
1203 	token_offset	= offset;
1204 	token_len	= ntohl(rhdr.token_len);
1205 	if (xdr_round_up(token_len) + sizeof(__be32) > len)
1206 		goto short_packet;
1207 
1208 	trace_rxrpc_rx_response(conn, sp->hdr.serial, 0, sp->hdr.cksum, token_len);
1209 
1210 	offset	+= xdr_round_up(token_len);
1211 	len	-= xdr_round_up(token_len);
1212 
1213 	if (skb_copy_bits(skb, offset, &xauth_len, sizeof(xauth_len)) < 0)
1214 		goto short_packet;
1215 	offset	+= sizeof(xauth_len);
1216 	len	-= sizeof(xauth_len);
1217 
1218 	auth_offset	= offset;
1219 	auth_len	= ntohl(xauth_len);
1220 	if (auth_len < len)
1221 		goto short_packet;
1222 	if (auth_len & 3)
1223 		goto inconsistent;
1224 	if (auth_len < 20 + 9 * 4)
1225 		goto auth_too_short;
1226 
1227 	/* We need to extract and decrypt the token and instantiate a session
1228 	 * key for it.  This bit, however, is application-specific.  If
1229 	 * possible, we use a default parser, but we might end up bumping this
1230 	 * to the app to deal with - which might mean a round trip to
1231 	 * userspace.
1232 	 */
1233 	ret = rxgk_extract_token(conn, skb, token_offset, token_len, &key);
1234 	if (ret < 0)
1235 		goto out;
1236 
1237 	/* We now have a key instantiated from the decrypted ticket.  We can
1238 	 * pass this to the application so that they can parse the ticket
1239 	 * content and we can use the session key it contains to derive the
1240 	 * keys we need.
1241 	 *
1242 	 * Note that we have to switch enctype at this point as the enctype of
1243 	 * the ticket doesn't necessarily match that of the transport.
1244 	 */
1245 	token = key->payload.data[0];
1246 	conn->security_level = token->rxgk->level;
1247 	conn->rxgk.start_time = __be64_to_cpu(rhdr.start_time);
1248 
1249 	gk = rxgk_generate_transport_key(conn, token->rxgk, sp->hdr.cksum, GFP_NOFS);
1250 	if (IS_ERR(gk)) {
1251 		ret = PTR_ERR(gk);
1252 		goto cant_get_token;
1253 	}
1254 
1255 	krb5 = gk->krb5;
1256 
1257 	trace_rxrpc_rx_response(conn, sp->hdr.serial, krb5->etype, sp->hdr.cksum, token_len);
1258 
1259 	/* Decrypt, parse and verify the authenticator. */
1260 	ret = rxgk_decrypt_skb(krb5, gk->resp_enc, skb,
1261 			       &auth_offset, &auth_len, &ec);
1262 	if (ret < 0) {
1263 		rxrpc_abort_conn(conn, skb, RXGK_SEALEDINCON, ret,
1264 				 rxgk_abort_resp_auth_dec);
1265 		goto out;
1266 	}
1267 
1268 	ret = rxgk_verify_authenticator(conn, krb5, skb, auth_offset, auth_len);
1269 	if (ret < 0)
1270 		goto out;
1271 
1272 	conn->key = key;
1273 	key = NULL;
1274 	ret = 0;
1275 out:
1276 	key_put(key);
1277 	_leave(" = %d", ret);
1278 	return ret;
1279 
1280 inconsistent:
1281 	ret = rxrpc_abort_conn(conn, skb, RXGK_INCONSISTENCY, -EPROTO,
1282 			       rxgk_abort_resp_xdr_align);
1283 	goto out;
1284 auth_too_short:
1285 	ret = rxrpc_abort_conn(conn, skb, RXGK_PACKETSHORT, -EPROTO,
1286 			       rxgk_abort_resp_short_auth);
1287 	goto out;
1288 short_packet:
1289 	ret = rxrpc_abort_conn(conn, skb, RXGK_PACKETSHORT, -EPROTO,
1290 			       rxgk_abort_resp_short_packet);
1291 	goto out;
1292 
1293 cant_get_token:
1294 	switch (ret) {
1295 	case -ENOMEM:
1296 		goto temporary_error;
1297 	case -EINVAL:
1298 		ret = rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EKEYREJECTED,
1299 				       rxgk_abort_resp_internal_error);
1300 		goto out;
1301 	case -ENOPKG:
1302 		ret = rxrpc_abort_conn(conn, skb, KRB5_PROG_KEYTYPE_NOSUPP,
1303 				       -EKEYREJECTED, rxgk_abort_resp_nopkg);
1304 		goto out;
1305 	}
1306 
1307 temporary_error:
1308 	/* Ignore the response packet if we got a temporary error such as
1309 	 * ENOMEM.  We just want to send the challenge again.  Note that we
1310 	 * also come out this way if the ticket decryption fails.
1311 	 */
1312 	goto out;
1313 }
1314 
1315 /*
1316  * clear the connection security
1317  */
1318 static void rxgk_clear(struct rxrpc_connection *conn)
1319 {
1320 	int i;
1321 
1322 	for (i = 0; i < ARRAY_SIZE(conn->rxgk.keys); i++)
1323 		rxgk_put(conn->rxgk.keys[i]);
1324 }
1325 
1326 /*
1327  * Initialise the RxGK security service.
1328  */
1329 static int rxgk_init(void)
1330 {
1331 	return 0;
1332 }
1333 
1334 /*
1335  * Clean up the RxGK security service.
1336  */
1337 static void rxgk_exit(void)
1338 {
1339 }
1340 
1341 /*
1342  * RxRPC YFS GSSAPI-based security
1343  */
1344 const struct rxrpc_security rxgk_yfs = {
1345 	.name				= "yfs-rxgk",
1346 	.security_index			= RXRPC_SECURITY_YFS_RXGK,
1347 	.no_key_abort			= RXGK_NOTAUTH,
1348 	.init				= rxgk_init,
1349 	.exit				= rxgk_exit,
1350 	.preparse_server_key		= rxgk_preparse_server_key,
1351 	.free_preparse_server_key	= rxgk_free_preparse_server_key,
1352 	.destroy_server_key		= rxgk_destroy_server_key,
1353 	.describe_server_key		= rxgk_describe_server_key,
1354 	.init_connection_security	= rxgk_init_connection_security,
1355 	.alloc_txbuf			= rxgk_alloc_txbuf,
1356 	.secure_packet			= rxgk_secure_packet,
1357 	.verify_packet			= rxgk_verify_packet,
1358 	.free_call_crypto		= rxgk_free_call_crypto,
1359 	.issue_challenge		= rxgk_issue_challenge,
1360 	.validate_challenge		= rxgk_validate_challenge,
1361 	.challenge_to_recvmsg		= rxgk_challenge_to_recvmsg,
1362 	.sendmsg_respond_to_challenge	= rxgk_sendmsg_respond_to_challenge,
1363 	.respond_to_challenge		= rxgk_respond_to_challenge_no_appdata,
1364 	.verify_response		= rxgk_verify_response,
1365 	.clear				= rxgk_clear,
1366 	.default_decode_ticket		= rxgk_yfs_decode_ticket,
1367 };
1368