1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* GSSAPI-based RxRPC security
3 *
4 * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/net.h>
11 #include <linux/skbuff.h>
12 #include <linux/slab.h>
13 #include <linux/key-type.h>
14 #include "ar-internal.h"
15 #include "rxgk_common.h"
16
17 /*
18 * Parse the information from a server key
19 */
rxgk_preparse_server_key(struct key_preparsed_payload * prep)20 static int rxgk_preparse_server_key(struct key_preparsed_payload *prep)
21 {
22 const struct krb5_enctype *krb5;
23 struct krb5_buffer *server_key = (void *)&prep->payload.data[2];
24 unsigned int service, sec_class, kvno, enctype;
25 int n = 0;
26
27 _enter("%zu", prep->datalen);
28
29 if (sscanf(prep->orig_description, "%u:%u:%u:%u%n",
30 &service, &sec_class, &kvno, &enctype, &n) != 4)
31 return -EINVAL;
32
33 if (prep->orig_description[n])
34 return -EINVAL;
35
36 krb5 = crypto_krb5_find_enctype(enctype);
37 if (!krb5)
38 return -ENOPKG;
39
40 prep->payload.data[0] = (struct krb5_enctype *)krb5;
41
42 if (prep->datalen != krb5->key_len)
43 return -EKEYREJECTED;
44
45 server_key->len = prep->datalen;
46 server_key->data = kmemdup(prep->data, prep->datalen, GFP_KERNEL);
47 if (!server_key->data)
48 return -ENOMEM;
49
50 _leave(" = 0");
51 return 0;
52 }
53
rxgk_free_server_key(union key_payload * payload)54 static void rxgk_free_server_key(union key_payload *payload)
55 {
56 struct krb5_buffer *server_key = (void *)&payload->data[2];
57
58 kfree_sensitive(server_key->data);
59 }
60
rxgk_free_preparse_server_key(struct key_preparsed_payload * prep)61 static void rxgk_free_preparse_server_key(struct key_preparsed_payload *prep)
62 {
63 rxgk_free_server_key(&prep->payload);
64 }
65
rxgk_destroy_server_key(struct key * key)66 static void rxgk_destroy_server_key(struct key *key)
67 {
68 rxgk_free_server_key(&key->payload);
69 }
70
rxgk_describe_server_key(const struct key * key,struct seq_file * m)71 static void rxgk_describe_server_key(const struct key *key, struct seq_file *m)
72 {
73 const struct krb5_enctype *krb5 = key->payload.data[0];
74
75 if (krb5)
76 seq_printf(m, ": %s", krb5->name);
77 }
78
79 /*
80 * Handle rekeying the connection when we see our limits overrun or when the
81 * far side decided to rekey.
82 *
83 * Returns a ref on the context if successful or -ESTALE if the key is out of
84 * date.
85 */
rxgk_rekey(struct rxrpc_connection * conn,const u16 * specific_key_number)86 static struct rxgk_context *rxgk_rekey(struct rxrpc_connection *conn,
87 const u16 *specific_key_number)
88 {
89 struct rxgk_context *gk, *dead = NULL;
90 unsigned int key_number, current_key, mask = ARRAY_SIZE(conn->rxgk.keys) - 1;
91 bool crank = false;
92
93 _enter("%d", specific_key_number ? *specific_key_number : -1);
94
95 mutex_lock(&conn->security_lock);
96
97 current_key = conn->rxgk.key_number;
98 if (!specific_key_number) {
99 key_number = current_key;
100 } else {
101 if (*specific_key_number == (u16)current_key)
102 key_number = current_key;
103 else if (*specific_key_number == (u16)(current_key - 1))
104 key_number = current_key - 1;
105 else if (*specific_key_number == (u16)(current_key + 1))
106 goto crank_window;
107 else
108 goto bad_key;
109 }
110
111 gk = conn->rxgk.keys[key_number & mask];
112 if (!gk)
113 goto generate_key;
114 if (!specific_key_number &&
115 test_bit(RXGK_TK_NEEDS_REKEY, &gk->flags))
116 goto crank_window;
117
118 grab:
119 refcount_inc(&gk->usage);
120 mutex_unlock(&conn->security_lock);
121 rxgk_put(dead);
122 return gk;
123
124 crank_window:
125 trace_rxrpc_rxgk_rekey(conn, current_key,
126 specific_key_number ? *specific_key_number : -1);
127 if (current_key == UINT_MAX)
128 goto bad_key;
129 if (current_key + 1 == UINT_MAX)
130 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
131
132 key_number = current_key + 1;
133 if (WARN_ON(conn->rxgk.keys[key_number & mask]))
134 goto bad_key;
135 crank = true;
136
137 generate_key:
138 gk = conn->rxgk.keys[current_key & mask];
139 gk = rxgk_generate_transport_key(conn, gk->key, key_number, GFP_NOFS);
140 if (IS_ERR(gk)) {
141 mutex_unlock(&conn->security_lock);
142 return gk;
143 }
144
145 write_lock(&conn->security_use_lock);
146 if (crank) {
147 current_key++;
148 conn->rxgk.key_number = current_key;
149 dead = conn->rxgk.keys[(current_key - 2) & mask];
150 conn->rxgk.keys[(current_key - 2) & mask] = NULL;
151 }
152 conn->rxgk.keys[current_key & mask] = gk;
153 write_unlock(&conn->security_use_lock);
154 goto grab;
155
156 bad_key:
157 mutex_unlock(&conn->security_lock);
158 return ERR_PTR(-ESTALE);
159 }
160
161 /*
162 * Get the specified keying context.
163 *
164 * Returns a ref on the context if successful or -ESTALE if the key is out of
165 * date.
166 */
rxgk_get_key(struct rxrpc_connection * conn,const u16 * specific_key_number)167 static struct rxgk_context *rxgk_get_key(struct rxrpc_connection *conn,
168 const u16 *specific_key_number)
169 {
170 struct rxgk_context *gk;
171 unsigned int key_number, current_key, mask = ARRAY_SIZE(conn->rxgk.keys) - 1;
172
173 _enter("{%u},%d",
174 conn->rxgk.key_number, specific_key_number ? *specific_key_number : -1);
175
176 read_lock(&conn->security_use_lock);
177
178 current_key = conn->rxgk.key_number;
179 if (!specific_key_number) {
180 key_number = current_key;
181 } else {
182 /* Only the bottom 16 bits of the key number are exposed in the
183 * header, so we try and keep the upper 16 bits in step. The
184 * whole 32 bits are used to generate the TK.
185 */
186 if (*specific_key_number == (u16)current_key)
187 key_number = current_key;
188 else if (*specific_key_number == (u16)(current_key - 1))
189 key_number = current_key - 1;
190 else if (*specific_key_number == (u16)(current_key + 1))
191 goto rekey;
192 else
193 goto bad_key;
194 }
195
196 gk = conn->rxgk.keys[key_number & mask];
197 if (!gk)
198 goto slow_path;
199 if (!specific_key_number &&
200 key_number < UINT_MAX) {
201 if (time_after(jiffies, gk->expiry) ||
202 gk->bytes_remaining < 0) {
203 set_bit(RXGK_TK_NEEDS_REKEY, &gk->flags);
204 goto slow_path;
205 }
206
207 if (test_bit(RXGK_TK_NEEDS_REKEY, &gk->flags))
208 goto slow_path;
209 }
210
211 refcount_inc(&gk->usage);
212 read_unlock(&conn->security_use_lock);
213 return gk;
214
215 rekey:
216 _debug("rekey");
217 if (current_key == UINT_MAX)
218 goto bad_key;
219 gk = conn->rxgk.keys[current_key & mask];
220 if (gk)
221 set_bit(RXGK_TK_NEEDS_REKEY, &gk->flags);
222 slow_path:
223 read_unlock(&conn->security_use_lock);
224 return rxgk_rekey(conn, specific_key_number);
225 bad_key:
226 read_unlock(&conn->security_use_lock);
227 return ERR_PTR(-ESTALE);
228 }
229
230 /*
231 * initialise connection security
232 */
rxgk_init_connection_security(struct rxrpc_connection * conn,struct rxrpc_key_token * token)233 static int rxgk_init_connection_security(struct rxrpc_connection *conn,
234 struct rxrpc_key_token *token)
235 {
236 struct rxgk_context *gk;
237 int ret;
238
239 _enter("{%d,%u},{%x}",
240 conn->debug_id, conn->rxgk.key_number, key_serial(conn->key));
241
242 conn->security_ix = token->security_index;
243 conn->security_level = token->rxgk->level;
244
245 if (rxrpc_conn_is_client(conn)) {
246 conn->rxgk.start_time = ktime_get();
247 do_div(conn->rxgk.start_time, 100);
248 }
249
250 gk = rxgk_generate_transport_key(conn, token->rxgk, conn->rxgk.key_number,
251 GFP_NOFS);
252 if (IS_ERR(gk))
253 return PTR_ERR(gk);
254 conn->rxgk.enctype = gk->krb5->etype;
255 conn->rxgk.keys[gk->key_number & 3] = gk;
256
257 switch (conn->security_level) {
258 case RXRPC_SECURITY_PLAIN:
259 case RXRPC_SECURITY_AUTH:
260 case RXRPC_SECURITY_ENCRYPT:
261 break;
262 default:
263 ret = -EKEYREJECTED;
264 goto error;
265 }
266
267 ret = 0;
268 error:
269 _leave(" = %d", ret);
270 return ret;
271 }
272
273 /*
274 * Clean up the crypto on a call.
275 */
rxgk_free_call_crypto(struct rxrpc_call * call)276 static void rxgk_free_call_crypto(struct rxrpc_call *call)
277 {
278 }
279
280 /*
281 * Work out how much data we can put in a packet.
282 */
rxgk_alloc_txbuf(struct rxrpc_call * call,size_t remain,gfp_t gfp)283 static struct rxrpc_txbuf *rxgk_alloc_txbuf(struct rxrpc_call *call, size_t remain, gfp_t gfp)
284 {
285 enum krb5_crypto_mode mode;
286 struct rxgk_context *gk;
287 struct rxrpc_txbuf *txb;
288 size_t shdr, alloc, limit, part, offset, gap;
289
290 switch (call->conn->security_level) {
291 default:
292 alloc = umin(remain, RXRPC_JUMBO_DATALEN);
293 return rxrpc_alloc_data_txbuf(call, alloc, 1, gfp);
294 case RXRPC_SECURITY_AUTH:
295 shdr = 0;
296 mode = KRB5_CHECKSUM_MODE;
297 break;
298 case RXRPC_SECURITY_ENCRYPT:
299 shdr = sizeof(struct rxgk_header);
300 mode = KRB5_ENCRYPT_MODE;
301 break;
302 }
303
304 gk = rxgk_get_key(call->conn, NULL);
305 if (IS_ERR(gk))
306 return NULL;
307
308 /* Work out the maximum amount of data that will fit. */
309 alloc = RXRPC_JUMBO_DATALEN;
310 limit = crypto_krb5_how_much_data(gk->krb5, mode, &alloc, &offset);
311
312 if (remain < limit - shdr) {
313 part = remain;
314 alloc = crypto_krb5_how_much_buffer(gk->krb5, mode,
315 shdr + part, &offset);
316 gap = 0;
317 } else {
318 part = limit - shdr;
319 gap = RXRPC_JUMBO_DATALEN - alloc;
320 alloc = RXRPC_JUMBO_DATALEN;
321 }
322
323 rxgk_put(gk);
324
325 txb = rxrpc_alloc_data_txbuf(call, alloc, 16, gfp);
326 if (!txb)
327 return NULL;
328
329 txb->crypto_header = offset;
330 txb->sec_header = shdr;
331 txb->offset += offset + shdr;
332 txb->space = part;
333
334 /* Clear excess space in the packet */
335 if (gap)
336 memset(txb->data + alloc - gap, 0, gap);
337 return txb;
338 }
339
340 /*
341 * Integrity mode (sign a packet - level 1 security)
342 */
rxgk_secure_packet_integrity(const struct rxrpc_call * call,struct rxgk_context * gk,struct rxrpc_txbuf * txb)343 static int rxgk_secure_packet_integrity(const struct rxrpc_call *call,
344 struct rxgk_context *gk,
345 struct rxrpc_txbuf *txb)
346 {
347 struct rxgk_header *hdr;
348 struct scatterlist sg[1];
349 struct krb5_buffer metadata;
350 int ret = -ENOMEM;
351
352 _enter("");
353
354 hdr = kzalloc(sizeof(*hdr), GFP_NOFS);
355 if (!hdr)
356 goto error_gk;
357
358 hdr->epoch = htonl(call->conn->proto.epoch);
359 hdr->cid = htonl(call->cid);
360 hdr->call_number = htonl(call->call_id);
361 hdr->seq = htonl(txb->seq);
362 hdr->sec_index = htonl(call->security_ix);
363 hdr->data_len = htonl(txb->len);
364 metadata.len = sizeof(*hdr);
365 metadata.data = hdr;
366
367 sg_init_table(sg, 1);
368 sg_set_buf(&sg[0], txb->data, txb->alloc_size);
369
370 ret = crypto_krb5_get_mic(gk->krb5, gk->tx_Kc, &metadata,
371 sg, 1, txb->alloc_size,
372 txb->crypto_header, txb->sec_header + txb->len);
373 if (ret >= 0) {
374 txb->pkt_len = ret;
375 if (txb->alloc_size == RXRPC_JUMBO_DATALEN)
376 txb->jumboable = true;
377 gk->bytes_remaining -= ret;
378 }
379 kfree(hdr);
380 error_gk:
381 rxgk_put(gk);
382 _leave(" = %d", ret);
383 return ret;
384 }
385
386 /*
387 * wholly encrypt a packet (level 2 security)
388 */
rxgk_secure_packet_encrypted(const struct rxrpc_call * call,struct rxgk_context * gk,struct rxrpc_txbuf * txb)389 static int rxgk_secure_packet_encrypted(const struct rxrpc_call *call,
390 struct rxgk_context *gk,
391 struct rxrpc_txbuf *txb)
392 {
393 struct rxgk_header *hdr;
394 struct scatterlist sg[1];
395 int ret;
396
397 _enter("%x", txb->len);
398
399 /* Insert the header into the buffer. */
400 hdr = txb->data + txb->crypto_header;
401 hdr->epoch = htonl(call->conn->proto.epoch);
402 hdr->cid = htonl(call->cid);
403 hdr->call_number = htonl(call->call_id);
404 hdr->seq = htonl(txb->seq);
405 hdr->sec_index = htonl(call->security_ix);
406 hdr->data_len = htonl(txb->len);
407
408 sg_init_table(sg, 1);
409 sg_set_buf(&sg[0], txb->data, txb->alloc_size);
410
411 ret = crypto_krb5_encrypt(gk->krb5, gk->tx_enc,
412 sg, 1, txb->alloc_size,
413 txb->crypto_header, txb->sec_header + txb->len,
414 false);
415 if (ret >= 0) {
416 txb->pkt_len = ret;
417 if (txb->alloc_size == RXRPC_JUMBO_DATALEN)
418 txb->jumboable = true;
419 gk->bytes_remaining -= ret;
420 }
421
422 rxgk_put(gk);
423 _leave(" = %d", ret);
424 return ret;
425 }
426
427 /*
428 * checksum an RxRPC packet header
429 */
rxgk_secure_packet(struct rxrpc_call * call,struct rxrpc_txbuf * txb)430 static int rxgk_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
431 {
432 struct rxgk_context *gk;
433 int ret;
434
435 _enter("{%d{%x}},{#%u},%u,",
436 call->debug_id, key_serial(call->conn->key), txb->seq, txb->len);
437
438 gk = rxgk_get_key(call->conn, NULL);
439 if (IS_ERR(gk))
440 return PTR_ERR(gk) == -ESTALE ? -EKEYREJECTED : PTR_ERR(gk);
441
442 ret = key_validate(call->conn->key);
443 if (ret < 0) {
444 rxgk_put(gk);
445 return ret;
446 }
447
448 call->security_enctype = gk->krb5->etype;
449 txb->cksum = htons(gk->key_number);
450
451 switch (call->conn->security_level) {
452 case RXRPC_SECURITY_PLAIN:
453 rxgk_put(gk);
454 txb->pkt_len = txb->len;
455 return 0;
456 case RXRPC_SECURITY_AUTH:
457 return rxgk_secure_packet_integrity(call, gk, txb);
458 case RXRPC_SECURITY_ENCRYPT:
459 return rxgk_secure_packet_encrypted(call, gk, txb);
460 default:
461 rxgk_put(gk);
462 return -EPERM;
463 }
464 }
465
466 /*
467 * Integrity mode (check the signature on a packet - level 1 security)
468 */
rxgk_verify_packet_integrity(struct rxrpc_call * call,struct rxgk_context * gk,struct sk_buff * skb)469 static int rxgk_verify_packet_integrity(struct rxrpc_call *call,
470 struct rxgk_context *gk,
471 struct sk_buff *skb)
472 {
473 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
474 struct rxgk_header *hdr;
475 struct krb5_buffer metadata;
476 unsigned int offset = sp->offset, len = sp->len;
477 size_t data_offset = 0, data_len = len;
478 u32 ac = 0;
479 int ret = -ENOMEM;
480
481 _enter("");
482
483 crypto_krb5_where_is_the_data(gk->krb5, KRB5_CHECKSUM_MODE,
484 &data_offset, &data_len);
485
486 hdr = kzalloc(sizeof(*hdr), GFP_NOFS);
487 if (!hdr)
488 goto put_gk;
489
490 hdr->epoch = htonl(call->conn->proto.epoch);
491 hdr->cid = htonl(call->cid);
492 hdr->call_number = htonl(call->call_id);
493 hdr->seq = htonl(sp->hdr.seq);
494 hdr->sec_index = htonl(call->security_ix);
495 hdr->data_len = htonl(data_len);
496
497 metadata.len = sizeof(*hdr);
498 metadata.data = hdr;
499 ret = rxgk_verify_mic_skb(gk->krb5, gk->rx_Kc, &metadata,
500 skb, &offset, &len, &ac);
501 kfree(hdr);
502 if (ret < 0) {
503 if (ret != -ENOMEM)
504 rxrpc_abort_eproto(call, skb, ac,
505 rxgk_abort_1_verify_mic_eproto);
506 } else {
507 sp->offset = offset;
508 sp->len = len;
509 }
510
511 put_gk:
512 rxgk_put(gk);
513 _leave(" = %d", ret);
514 return ret;
515 }
516
517 /*
518 * Decrypt an encrypted packet (level 2 security).
519 */
rxgk_verify_packet_encrypted(struct rxrpc_call * call,struct rxgk_context * gk,struct sk_buff * skb)520 static int rxgk_verify_packet_encrypted(struct rxrpc_call *call,
521 struct rxgk_context *gk,
522 struct sk_buff *skb)
523 {
524 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
525 struct rxgk_header hdr;
526 unsigned int offset = sp->offset, len = sp->len;
527 int ret;
528 u32 ac = 0;
529
530 _enter("");
531
532 ret = rxgk_decrypt_skb(gk->krb5, gk->rx_enc, skb, &offset, &len, &ac);
533 if (ret < 0) {
534 if (ret != -ENOMEM)
535 rxrpc_abort_eproto(call, skb, ac, rxgk_abort_2_decrypt_eproto);
536 goto error;
537 }
538
539 if (len < sizeof(hdr)) {
540 ret = rxrpc_abort_eproto(call, skb, RXGK_PACKETSHORT,
541 rxgk_abort_2_short_header);
542 goto error;
543 }
544
545 /* Extract the header from the skb */
546 ret = skb_copy_bits(skb, offset, &hdr, sizeof(hdr));
547 if (ret < 0) {
548 ret = rxrpc_abort_eproto(call, skb, RXGK_PACKETSHORT,
549 rxgk_abort_2_short_encdata);
550 goto error;
551 }
552 offset += sizeof(hdr);
553 len -= sizeof(hdr);
554
555 if (ntohl(hdr.epoch) != call->conn->proto.epoch ||
556 ntohl(hdr.cid) != call->cid ||
557 ntohl(hdr.call_number) != call->call_id ||
558 ntohl(hdr.seq) != sp->hdr.seq ||
559 ntohl(hdr.sec_index) != call->security_ix ||
560 ntohl(hdr.data_len) > len) {
561 ret = rxrpc_abort_eproto(call, skb, RXGK_SEALEDINCON,
562 rxgk_abort_2_short_data);
563 goto error;
564 }
565
566 sp->offset = offset;
567 sp->len = ntohl(hdr.data_len);
568 ret = 0;
569 error:
570 rxgk_put(gk);
571 _leave(" = %d", ret);
572 return ret;
573 }
574
575 /*
576 * Verify the security on a received packet or subpacket (if part of a
577 * jumbo packet).
578 */
rxgk_verify_packet(struct rxrpc_call * call,struct sk_buff * skb)579 static int rxgk_verify_packet(struct rxrpc_call *call, struct sk_buff *skb)
580 {
581 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
582 struct rxgk_context *gk;
583 u16 key_number = sp->hdr.cksum;
584
585 _enter("{%d{%x}},{#%u}",
586 call->debug_id, key_serial(call->conn->key), sp->hdr.seq);
587
588 gk = rxgk_get_key(call->conn, &key_number);
589 if (IS_ERR(gk)) {
590 switch (PTR_ERR(gk)) {
591 case -ESTALE:
592 return rxrpc_abort_eproto(call, skb, RXGK_BADKEYNO,
593 rxgk_abort_bad_key_number);
594 default:
595 return PTR_ERR(gk);
596 }
597 }
598
599 call->security_enctype = gk->krb5->etype;
600 switch (call->conn->security_level) {
601 case RXRPC_SECURITY_PLAIN:
602 rxgk_put(gk);
603 return 0;
604 case RXRPC_SECURITY_AUTH:
605 return rxgk_verify_packet_integrity(call, gk, skb);
606 case RXRPC_SECURITY_ENCRYPT:
607 return rxgk_verify_packet_encrypted(call, gk, skb);
608 default:
609 rxgk_put(gk);
610 return -ENOANO;
611 }
612 }
613
614 /*
615 * Allocate memory to hold a challenge or a response packet. We're not running
616 * in the io_thread, so we can't use ->tx_alloc.
617 */
rxgk_alloc_packet(size_t total_len)618 static struct page *rxgk_alloc_packet(size_t total_len)
619 {
620 gfp_t gfp = GFP_NOFS;
621 int order;
622
623 order = get_order(total_len);
624 if (order > 0)
625 gfp |= __GFP_COMP;
626 return alloc_pages(gfp, order);
627 }
628
629 /*
630 * Issue a challenge.
631 */
rxgk_issue_challenge(struct rxrpc_connection * conn)632 static int rxgk_issue_challenge(struct rxrpc_connection *conn)
633 {
634 struct rxrpc_wire_header *whdr;
635 struct bio_vec bvec[1];
636 struct msghdr msg;
637 struct page *page;
638 size_t len = sizeof(*whdr) + sizeof(conn->rxgk.nonce);
639 u32 serial;
640 int ret;
641
642 _enter("{%d}", conn->debug_id);
643
644 get_random_bytes(&conn->rxgk.nonce, sizeof(conn->rxgk.nonce));
645
646 /* We can't use conn->tx_alloc without a lock */
647 page = rxgk_alloc_packet(sizeof(*whdr) + sizeof(conn->rxgk.nonce));
648 if (!page)
649 return -ENOMEM;
650
651 bvec_set_page(&bvec[0], page, len, 0);
652 iov_iter_bvec(&msg.msg_iter, WRITE, bvec, 1, len);
653
654 msg.msg_name = &conn->peer->srx.transport;
655 msg.msg_namelen = conn->peer->srx.transport_len;
656 msg.msg_control = NULL;
657 msg.msg_controllen = 0;
658 msg.msg_flags = MSG_SPLICE_PAGES;
659
660 whdr = page_address(page);
661 whdr->epoch = htonl(conn->proto.epoch);
662 whdr->cid = htonl(conn->proto.cid);
663 whdr->callNumber = 0;
664 whdr->seq = 0;
665 whdr->type = RXRPC_PACKET_TYPE_CHALLENGE;
666 whdr->flags = conn->out_clientflag;
667 whdr->userStatus = 0;
668 whdr->securityIndex = conn->security_ix;
669 whdr->_rsvd = 0;
670 whdr->serviceId = htons(conn->service_id);
671
672 memcpy(whdr + 1, conn->rxgk.nonce, sizeof(conn->rxgk.nonce));
673
674 serial = rxrpc_get_next_serials(conn, 1);
675 whdr->serial = htonl(serial);
676
677 trace_rxrpc_tx_challenge(conn, serial, 0, *(u32 *)&conn->rxgk.nonce);
678
679 ret = do_udp_sendmsg(conn->local->socket, &msg, len);
680 if (ret > 0)
681 conn->peer->last_tx_at = ktime_get_seconds();
682 __free_page(page);
683
684 if (ret < 0) {
685 trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
686 rxrpc_tx_point_rxgk_challenge);
687 return -EAGAIN;
688 }
689
690 trace_rxrpc_tx_packet(conn->debug_id, whdr,
691 rxrpc_tx_point_rxgk_challenge);
692 _leave(" = 0");
693 return 0;
694 }
695
696 /*
697 * Validate a challenge packet.
698 */
rxgk_validate_challenge(struct rxrpc_connection * conn,struct sk_buff * skb)699 static bool rxgk_validate_challenge(struct rxrpc_connection *conn,
700 struct sk_buff *skb)
701 {
702 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
703 u8 nonce[20];
704
705 if (!conn->key) {
706 rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO,
707 rxgk_abort_chall_no_key);
708 return false;
709 }
710
711 if (key_validate(conn->key) < 0) {
712 rxrpc_abort_conn(conn, skb, RXGK_EXPIRED, -EPROTO,
713 rxgk_abort_chall_key_expired);
714 return false;
715 }
716
717 if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
718 nonce, sizeof(nonce)) < 0) {
719 rxrpc_abort_conn(conn, skb, RXGK_PACKETSHORT, -EPROTO,
720 rxgk_abort_chall_short);
721 return false;
722 }
723
724 trace_rxrpc_rx_challenge(conn, sp->hdr.serial, 0, *(u32 *)nonce, 0);
725 return true;
726 }
727
728 /**
729 * rxgk_kernel_query_challenge - Query RxGK-specific challenge parameters
730 * @challenge: The challenge packet to query
731 *
732 * Return: The Kerberos 5 encoding type for the challenged connection.
733 */
rxgk_kernel_query_challenge(struct sk_buff * challenge)734 u32 rxgk_kernel_query_challenge(struct sk_buff *challenge)
735 {
736 struct rxrpc_skb_priv *sp = rxrpc_skb(challenge);
737
738 return sp->chall.conn->rxgk.enctype;
739 }
740 EXPORT_SYMBOL(rxgk_kernel_query_challenge);
741
742 /*
743 * Fill out the control message to pass to userspace to inform about the
744 * challenge.
745 */
rxgk_challenge_to_recvmsg(struct rxrpc_connection * conn,struct sk_buff * challenge,struct msghdr * msg)746 static int rxgk_challenge_to_recvmsg(struct rxrpc_connection *conn,
747 struct sk_buff *challenge,
748 struct msghdr *msg)
749 {
750 struct rxgk_challenge chall;
751
752 chall.base.service_id = conn->service_id;
753 chall.base.security_index = conn->security_ix;
754 chall.enctype = conn->rxgk.enctype;
755
756 return put_cmsg(msg, SOL_RXRPC, RXRPC_CHALLENGED, sizeof(chall), &chall);
757 }
758
759 /*
760 * Insert the requisite amount of XDR padding for the length given.
761 */
rxgk_pad_out(struct sk_buff * response,size_t len,size_t offset)762 static int rxgk_pad_out(struct sk_buff *response, size_t len, size_t offset)
763 {
764 __be32 zero = 0;
765 size_t pad = xdr_round_up(len) - len;
766 int ret;
767
768 if (!pad)
769 return 0;
770
771 ret = skb_store_bits(response, offset, &zero, pad);
772 if (ret < 0)
773 return ret;
774 return pad;
775 }
776
777 /*
778 * Insert the header into the response.
779 */
rxgk_insert_response_header(struct rxrpc_connection * conn,struct rxgk_context * gk,struct sk_buff * response,size_t offset)780 static noinline ssize_t rxgk_insert_response_header(struct rxrpc_connection *conn,
781 struct rxgk_context *gk,
782 struct sk_buff *response,
783 size_t offset)
784 {
785 struct rxrpc_skb_priv *rsp = rxrpc_skb(response);
786
787 struct {
788 struct rxrpc_wire_header whdr;
789 __be32 start_time_msw;
790 __be32 start_time_lsw;
791 __be32 ticket_len;
792 } h;
793 int ret;
794
795 rsp->resp.kvno = gk->key_number;
796 rsp->resp.version = gk->krb5->etype;
797
798 h.whdr.epoch = htonl(conn->proto.epoch);
799 h.whdr.cid = htonl(conn->proto.cid);
800 h.whdr.callNumber = 0;
801 h.whdr.serial = 0;
802 h.whdr.seq = 0;
803 h.whdr.type = RXRPC_PACKET_TYPE_RESPONSE;
804 h.whdr.flags = conn->out_clientflag;
805 h.whdr.userStatus = 0;
806 h.whdr.securityIndex = conn->security_ix;
807 h.whdr.cksum = htons(gk->key_number);
808 h.whdr.serviceId = htons(conn->service_id);
809 h.start_time_msw = htonl(upper_32_bits(conn->rxgk.start_time));
810 h.start_time_lsw = htonl(lower_32_bits(conn->rxgk.start_time));
811 h.ticket_len = htonl(gk->key->ticket.len);
812
813 ret = skb_store_bits(response, offset, &h, sizeof(h));
814 return ret < 0 ? ret : sizeof(h);
815 }
816
817 /*
818 * Construct the authenticator to go in the response packet
819 *
820 * struct RXGK_Authenticator {
821 * opaque nonce[20];
822 * opaque appdata<>;
823 * RXGK_Level level;
824 * unsigned int epoch;
825 * unsigned int cid;
826 * unsigned int call_numbers<>;
827 * };
828 */
rxgk_construct_authenticator(struct rxrpc_connection * conn,struct sk_buff * challenge,const struct krb5_buffer * appdata,struct sk_buff * response,size_t offset)829 static ssize_t rxgk_construct_authenticator(struct rxrpc_connection *conn,
830 struct sk_buff *challenge,
831 const struct krb5_buffer *appdata,
832 struct sk_buff *response,
833 size_t offset)
834 {
835 struct {
836 u8 nonce[20];
837 __be32 appdata_len;
838 } a;
839 struct {
840 __be32 level;
841 __be32 epoch;
842 __be32 cid;
843 __be32 call_numbers_count;
844 __be32 call_numbers[4];
845 } b;
846 int ret;
847
848 ret = skb_copy_bits(challenge, sizeof(struct rxrpc_wire_header),
849 a.nonce, sizeof(a.nonce));
850 if (ret < 0)
851 return -EPROTO;
852
853 a.appdata_len = htonl(appdata->len);
854
855 ret = skb_store_bits(response, offset, &a, sizeof(a));
856 if (ret < 0)
857 return ret;
858 offset += sizeof(a);
859
860 if (appdata->len) {
861 ret = skb_store_bits(response, offset, appdata->data, appdata->len);
862 if (ret < 0)
863 return ret;
864 offset += appdata->len;
865
866 ret = rxgk_pad_out(response, appdata->len, offset);
867 if (ret < 0)
868 return ret;
869 offset += ret;
870 }
871
872 b.level = htonl(conn->security_level);
873 b.epoch = htonl(conn->proto.epoch);
874 b.cid = htonl(conn->proto.cid);
875 b.call_numbers_count = htonl(4);
876 b.call_numbers[0] = htonl(conn->channels[0].call_counter);
877 b.call_numbers[1] = htonl(conn->channels[1].call_counter);
878 b.call_numbers[2] = htonl(conn->channels[2].call_counter);
879 b.call_numbers[3] = htonl(conn->channels[3].call_counter);
880
881 ret = skb_store_bits(response, offset, &b, sizeof(b));
882 if (ret < 0)
883 return ret;
884 return sizeof(a) + xdr_round_up(appdata->len) + sizeof(b);
885 }
886
rxgk_encrypt_authenticator(struct rxrpc_connection * conn,struct rxgk_context * gk,struct sk_buff * response,size_t offset,size_t alloc_len,size_t auth_offset,size_t auth_len)887 static ssize_t rxgk_encrypt_authenticator(struct rxrpc_connection *conn,
888 struct rxgk_context *gk,
889 struct sk_buff *response,
890 size_t offset,
891 size_t alloc_len,
892 size_t auth_offset,
893 size_t auth_len)
894 {
895 struct scatterlist sg[16];
896 int nr_sg;
897
898 sg_init_table(sg, ARRAY_SIZE(sg));
899 nr_sg = skb_to_sgvec(response, sg, offset, alloc_len);
900 if (unlikely(nr_sg < 0))
901 return nr_sg;
902 return crypto_krb5_encrypt(gk->krb5, gk->resp_enc, sg, nr_sg, alloc_len,
903 auth_offset, auth_len, false);
904 }
905
906 /*
907 * Construct the response.
908 *
909 * struct RXGK_Response {
910 * rxgkTime start_time;
911 * RXGK_Data token;
912 * opaque authenticator<RXGK_MAXAUTHENTICATOR>
913 * };
914 */
rxgk_construct_response(struct rxrpc_connection * conn,struct sk_buff * challenge,struct krb5_buffer * appdata)915 static int rxgk_construct_response(struct rxrpc_connection *conn,
916 struct sk_buff *challenge,
917 struct krb5_buffer *appdata)
918 {
919 struct rxrpc_skb_priv *csp, *rsp;
920 struct rxgk_context *gk;
921 struct sk_buff *response;
922 size_t len, auth_len, authx_len, offset, auth_offset, authx_offset;
923 __be32 tmp;
924 int ret;
925
926 gk = rxgk_get_key(conn, NULL);
927 if (IS_ERR(gk))
928 return PTR_ERR(gk);
929
930 auth_len = 20 + (4 + appdata->len) + 12 + (1 + 4) * 4;
931 authx_len = crypto_krb5_how_much_buffer(gk->krb5, KRB5_ENCRYPT_MODE,
932 auth_len, &auth_offset);
933 len = sizeof(struct rxrpc_wire_header) +
934 8 + (4 + xdr_round_up(gk->key->ticket.len)) + (4 + authx_len);
935
936 response = alloc_skb_with_frags(0, len, 0, &ret, GFP_NOFS);
937 if (!response)
938 goto error;
939 rxrpc_new_skb(response, rxrpc_skb_new_response_rxgk);
940 response->len = len;
941 response->data_len = len;
942
943 ret = rxgk_insert_response_header(conn, gk, response, 0);
944 if (ret < 0)
945 goto error;
946 offset = ret;
947
948 ret = skb_store_bits(response, offset, gk->key->ticket.data, gk->key->ticket.len);
949 if (ret < 0)
950 goto error;
951 offset += gk->key->ticket.len;
952 ret = rxgk_pad_out(response, gk->key->ticket.len, offset);
953 if (ret < 0)
954 goto error;
955
956 authx_offset = offset + ret + 4; /* Leave a gap for the length. */
957
958 ret = rxgk_construct_authenticator(conn, challenge, appdata, response,
959 authx_offset + auth_offset);
960 if (ret < 0)
961 goto error;
962 auth_len = ret;
963
964 ret = rxgk_encrypt_authenticator(conn, gk, response,
965 authx_offset, authx_len,
966 auth_offset, auth_len);
967 if (ret < 0)
968 goto error;
969 authx_len = ret;
970
971 tmp = htonl(authx_len);
972 ret = skb_store_bits(response, authx_offset - 4, &tmp, 4);
973 if (ret < 0)
974 goto error;
975
976 ret = rxgk_pad_out(response, authx_len, authx_offset + authx_len);
977 if (ret < 0)
978 goto error;
979 len = authx_offset + authx_len + ret;
980
981 if (len != response->len) {
982 response->len = len;
983 response->data_len = len;
984 }
985
986 csp = rxrpc_skb(challenge);
987 rsp = rxrpc_skb(response);
988 rsp->resp.len = len;
989 rsp->resp.challenge_serial = csp->hdr.serial;
990 rxrpc_post_response(conn, response);
991 response = NULL;
992 ret = 0;
993
994 error:
995 rxrpc_free_skb(response, rxrpc_skb_put_response);
996 rxgk_put(gk);
997 _leave(" = %d", ret);
998 return ret;
999 }
1000
1001 /*
1002 * Respond to a challenge packet.
1003 */
rxgk_respond_to_challenge(struct rxrpc_connection * conn,struct sk_buff * challenge,struct krb5_buffer * appdata)1004 static int rxgk_respond_to_challenge(struct rxrpc_connection *conn,
1005 struct sk_buff *challenge,
1006 struct krb5_buffer *appdata)
1007 {
1008 _enter("{%d,%x}", conn->debug_id, key_serial(conn->key));
1009
1010 if (key_validate(conn->key) < 0)
1011 return rxrpc_abort_conn(conn, NULL, RXGK_EXPIRED, -EPROTO,
1012 rxgk_abort_chall_key_expired);
1013
1014 return rxgk_construct_response(conn, challenge, appdata);
1015 }
1016
rxgk_respond_to_challenge_no_appdata(struct rxrpc_connection * conn,struct sk_buff * challenge)1017 static int rxgk_respond_to_challenge_no_appdata(struct rxrpc_connection *conn,
1018 struct sk_buff *challenge)
1019 {
1020 struct krb5_buffer appdata = {};
1021
1022 return rxgk_respond_to_challenge(conn, challenge, &appdata);
1023 }
1024
1025 /**
1026 * rxgk_kernel_respond_to_challenge - Respond to a challenge with appdata
1027 * @challenge: The challenge to respond to
1028 * @appdata: The application data to include in the RESPONSE authenticator
1029 *
1030 * Allow a kernel application to respond to a CHALLENGE with application data
1031 * to be included in the RxGK RESPONSE Authenticator.
1032 *
1033 * Return: %0 if successful and a negative error code otherwise.
1034 */
rxgk_kernel_respond_to_challenge(struct sk_buff * challenge,struct krb5_buffer * appdata)1035 int rxgk_kernel_respond_to_challenge(struct sk_buff *challenge,
1036 struct krb5_buffer *appdata)
1037 {
1038 struct rxrpc_skb_priv *csp = rxrpc_skb(challenge);
1039
1040 return rxgk_respond_to_challenge(csp->chall.conn, challenge, appdata);
1041 }
1042 EXPORT_SYMBOL(rxgk_kernel_respond_to_challenge);
1043
1044 /*
1045 * Parse sendmsg() control message and respond to challenge. We need to see if
1046 * there's an appdata to fish out.
1047 */
rxgk_sendmsg_respond_to_challenge(struct sk_buff * challenge,struct msghdr * msg)1048 static int rxgk_sendmsg_respond_to_challenge(struct sk_buff *challenge,
1049 struct msghdr *msg)
1050 {
1051 struct krb5_buffer appdata = {};
1052 struct cmsghdr *cmsg;
1053
1054 for_each_cmsghdr(cmsg, msg) {
1055 if (cmsg->cmsg_level != SOL_RXRPC ||
1056 cmsg->cmsg_type != RXRPC_RESP_RXGK_APPDATA)
1057 continue;
1058 if (appdata.data)
1059 return -EINVAL;
1060 appdata.data = CMSG_DATA(cmsg);
1061 appdata.len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1062 }
1063
1064 return rxgk_kernel_respond_to_challenge(challenge, &appdata);
1065 }
1066
1067 /*
1068 * Verify the authenticator.
1069 *
1070 * struct RXGK_Authenticator {
1071 * opaque nonce[20];
1072 * opaque appdata<>;
1073 * RXGK_Level level;
1074 * unsigned int epoch;
1075 * unsigned int cid;
1076 * unsigned int call_numbers<>;
1077 * };
1078 */
rxgk_do_verify_authenticator(struct rxrpc_connection * conn,const struct krb5_enctype * krb5,struct sk_buff * skb,__be32 * p,__be32 * end)1079 static int rxgk_do_verify_authenticator(struct rxrpc_connection *conn,
1080 const struct krb5_enctype *krb5,
1081 struct sk_buff *skb,
1082 __be32 *p, __be32 *end)
1083 {
1084 u32 app_len, call_count, level, epoch, cid, i;
1085
1086 _enter("");
1087
1088 if (memcmp(p, conn->rxgk.nonce, 20) != 0)
1089 return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1090 rxgk_abort_resp_bad_nonce);
1091 p += 20 / sizeof(__be32);
1092
1093 app_len = ntohl(*p++);
1094 if (app_len > (end - p) * sizeof(__be32))
1095 return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1096 rxgk_abort_resp_short_applen);
1097
1098 p += xdr_round_up(app_len) / sizeof(__be32);
1099 if (end - p < 4)
1100 return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1101 rxgk_abort_resp_short_applen);
1102
1103 level = ntohl(*p++);
1104 epoch = ntohl(*p++);
1105 cid = ntohl(*p++);
1106 call_count = ntohl(*p++);
1107
1108 if (level != conn->security_level ||
1109 epoch != conn->proto.epoch ||
1110 cid != conn->proto.cid ||
1111 call_count > 4)
1112 return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1113 rxgk_abort_resp_bad_param);
1114
1115 if (end - p < call_count)
1116 return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1117 rxgk_abort_resp_short_call_list);
1118
1119 for (i = 0; i < call_count; i++) {
1120 u32 call_id = ntohl(*p++);
1121
1122 if (call_id > INT_MAX)
1123 return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1124 rxgk_abort_resp_bad_callid);
1125
1126 if (call_id < conn->channels[i].call_counter)
1127 return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1128 rxgk_abort_resp_call_ctr);
1129
1130 if (call_id > conn->channels[i].call_counter) {
1131 if (conn->channels[i].call)
1132 return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1133 rxgk_abort_resp_call_state);
1134
1135 conn->channels[i].call_counter = call_id;
1136 }
1137 }
1138
1139 _leave(" = 0");
1140 return 0;
1141 }
1142
1143 /*
1144 * Extract the authenticator and verify it.
1145 */
rxgk_verify_authenticator(struct rxrpc_connection * conn,const struct krb5_enctype * krb5,struct sk_buff * skb,unsigned int auth_offset,unsigned int auth_len)1146 static int rxgk_verify_authenticator(struct rxrpc_connection *conn,
1147 const struct krb5_enctype *krb5,
1148 struct sk_buff *skb,
1149 unsigned int auth_offset, unsigned int auth_len)
1150 {
1151 void *auth;
1152 __be32 *p;
1153 int ret;
1154
1155 auth = kmalloc(auth_len, GFP_NOFS);
1156 if (!auth)
1157 return -ENOMEM;
1158
1159 ret = skb_copy_bits(skb, auth_offset, auth, auth_len);
1160 if (ret < 0) {
1161 ret = rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1162 rxgk_abort_resp_short_auth);
1163 goto error;
1164 }
1165
1166 p = auth;
1167 ret = rxgk_do_verify_authenticator(conn, krb5, skb, p, p + auth_len);
1168 error:
1169 kfree(auth);
1170 return ret;
1171 }
1172
1173 /*
1174 * Verify a response.
1175 *
1176 * struct RXGK_Response {
1177 * rxgkTime start_time;
1178 * RXGK_Data token;
1179 * opaque authenticator<RXGK_MAXAUTHENTICATOR>
1180 * };
1181 */
rxgk_verify_response(struct rxrpc_connection * conn,struct sk_buff * skb)1182 static int rxgk_verify_response(struct rxrpc_connection *conn,
1183 struct sk_buff *skb)
1184 {
1185 const struct krb5_enctype *krb5;
1186 struct rxrpc_key_token *token;
1187 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
1188 struct rxgk_response rhdr;
1189 struct rxgk_context *gk;
1190 struct key *key = NULL;
1191 unsigned int offset = sizeof(struct rxrpc_wire_header);
1192 unsigned int len = skb->len - sizeof(struct rxrpc_wire_header);
1193 unsigned int token_offset, token_len;
1194 unsigned int auth_offset, auth_len;
1195 __be32 xauth_len;
1196 int ret, ec;
1197
1198 _enter("{%d}", conn->debug_id);
1199
1200 /* Parse the RXGK_Response object */
1201 if (sizeof(rhdr) + sizeof(__be32) > len)
1202 goto short_packet;
1203
1204 if (skb_copy_bits(skb, offset, &rhdr, sizeof(rhdr)) < 0)
1205 goto short_packet;
1206 offset += sizeof(rhdr);
1207 len -= sizeof(rhdr);
1208
1209 token_offset = offset;
1210 token_len = ntohl(rhdr.token_len);
1211 if (xdr_round_up(token_len) + sizeof(__be32) > len)
1212 goto short_packet;
1213
1214 trace_rxrpc_rx_response(conn, sp->hdr.serial, 0, sp->hdr.cksum, token_len);
1215
1216 offset += xdr_round_up(token_len);
1217 len -= xdr_round_up(token_len);
1218
1219 if (skb_copy_bits(skb, offset, &xauth_len, sizeof(xauth_len)) < 0)
1220 goto short_packet;
1221 offset += sizeof(xauth_len);
1222 len -= sizeof(xauth_len);
1223
1224 auth_offset = offset;
1225 auth_len = ntohl(xauth_len);
1226 if (auth_len < len)
1227 goto short_packet;
1228 if (auth_len & 3)
1229 goto inconsistent;
1230 if (auth_len < 20 + 9 * 4)
1231 goto auth_too_short;
1232
1233 /* We need to extract and decrypt the token and instantiate a session
1234 * key for it. This bit, however, is application-specific. If
1235 * possible, we use a default parser, but we might end up bumping this
1236 * to the app to deal with - which might mean a round trip to
1237 * userspace.
1238 */
1239 ret = rxgk_extract_token(conn, skb, token_offset, token_len, &key);
1240 if (ret < 0)
1241 goto out;
1242
1243 /* We now have a key instantiated from the decrypted ticket. We can
1244 * pass this to the application so that they can parse the ticket
1245 * content and we can use the session key it contains to derive the
1246 * keys we need.
1247 *
1248 * Note that we have to switch enctype at this point as the enctype of
1249 * the ticket doesn't necessarily match that of the transport.
1250 */
1251 token = key->payload.data[0];
1252 conn->security_level = token->rxgk->level;
1253 conn->rxgk.start_time = __be64_to_cpu(rhdr.start_time);
1254
1255 gk = rxgk_generate_transport_key(conn, token->rxgk, sp->hdr.cksum, GFP_NOFS);
1256 if (IS_ERR(gk)) {
1257 ret = PTR_ERR(gk);
1258 goto cant_get_token;
1259 }
1260
1261 krb5 = gk->krb5;
1262
1263 trace_rxrpc_rx_response(conn, sp->hdr.serial, krb5->etype, sp->hdr.cksum, token_len);
1264
1265 /* Decrypt, parse and verify the authenticator. */
1266 ret = rxgk_decrypt_skb(krb5, gk->resp_enc, skb,
1267 &auth_offset, &auth_len, &ec);
1268 if (ret < 0) {
1269 rxrpc_abort_conn(conn, skb, RXGK_SEALEDINCON, ret,
1270 rxgk_abort_resp_auth_dec);
1271 goto out;
1272 }
1273
1274 ret = rxgk_verify_authenticator(conn, krb5, skb, auth_offset, auth_len);
1275 if (ret < 0)
1276 goto out;
1277
1278 conn->key = key;
1279 key = NULL;
1280 ret = 0;
1281 out:
1282 key_put(key);
1283 _leave(" = %d", ret);
1284 return ret;
1285
1286 inconsistent:
1287 ret = rxrpc_abort_conn(conn, skb, RXGK_INCONSISTENCY, -EPROTO,
1288 rxgk_abort_resp_xdr_align);
1289 goto out;
1290 auth_too_short:
1291 ret = rxrpc_abort_conn(conn, skb, RXGK_PACKETSHORT, -EPROTO,
1292 rxgk_abort_resp_short_auth);
1293 goto out;
1294 short_packet:
1295 ret = rxrpc_abort_conn(conn, skb, RXGK_PACKETSHORT, -EPROTO,
1296 rxgk_abort_resp_short_packet);
1297 goto out;
1298
1299 cant_get_token:
1300 switch (ret) {
1301 case -ENOMEM:
1302 goto temporary_error;
1303 case -EINVAL:
1304 ret = rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EKEYREJECTED,
1305 rxgk_abort_resp_internal_error);
1306 goto out;
1307 case -ENOPKG:
1308 ret = rxrpc_abort_conn(conn, skb, KRB5_PROG_KEYTYPE_NOSUPP,
1309 -EKEYREJECTED, rxgk_abort_resp_nopkg);
1310 goto out;
1311 }
1312
1313 temporary_error:
1314 /* Ignore the response packet if we got a temporary error such as
1315 * ENOMEM. We just want to send the challenge again. Note that we
1316 * also come out this way if the ticket decryption fails.
1317 */
1318 goto out;
1319 }
1320
1321 /*
1322 * clear the connection security
1323 */
rxgk_clear(struct rxrpc_connection * conn)1324 static void rxgk_clear(struct rxrpc_connection *conn)
1325 {
1326 int i;
1327
1328 for (i = 0; i < ARRAY_SIZE(conn->rxgk.keys); i++)
1329 rxgk_put(conn->rxgk.keys[i]);
1330 }
1331
1332 /*
1333 * Initialise the RxGK security service.
1334 */
rxgk_init(void)1335 static int rxgk_init(void)
1336 {
1337 return 0;
1338 }
1339
1340 /*
1341 * Clean up the RxGK security service.
1342 */
rxgk_exit(void)1343 static void rxgk_exit(void)
1344 {
1345 }
1346
1347 /*
1348 * RxRPC YFS GSSAPI-based security
1349 */
1350 const struct rxrpc_security rxgk_yfs = {
1351 .name = "yfs-rxgk",
1352 .security_index = RXRPC_SECURITY_YFS_RXGK,
1353 .no_key_abort = RXGK_NOTAUTH,
1354 .init = rxgk_init,
1355 .exit = rxgk_exit,
1356 .preparse_server_key = rxgk_preparse_server_key,
1357 .free_preparse_server_key = rxgk_free_preparse_server_key,
1358 .destroy_server_key = rxgk_destroy_server_key,
1359 .describe_server_key = rxgk_describe_server_key,
1360 .init_connection_security = rxgk_init_connection_security,
1361 .alloc_txbuf = rxgk_alloc_txbuf,
1362 .secure_packet = rxgk_secure_packet,
1363 .verify_packet = rxgk_verify_packet,
1364 .free_call_crypto = rxgk_free_call_crypto,
1365 .issue_challenge = rxgk_issue_challenge,
1366 .validate_challenge = rxgk_validate_challenge,
1367 .challenge_to_recvmsg = rxgk_challenge_to_recvmsg,
1368 .sendmsg_respond_to_challenge = rxgk_sendmsg_respond_to_challenge,
1369 .respond_to_challenge = rxgk_respond_to_challenge_no_appdata,
1370 .verify_response = rxgk_verify_response,
1371 .clear = rxgk_clear,
1372 .default_decode_ticket = rxgk_yfs_decode_ticket,
1373 };
1374