1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * net/tipc/crypto.c: TIPC crypto for key handling & packet en/decryption 4 * 5 * Copyright (c) 2019, Ericsson AB 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 #include <crypto/aead.h> 38 #include <crypto/aes.h> 39 #include <crypto/rng.h> 40 #include "crypto.h" 41 #include "msg.h" 42 #include "bcast.h" 43 44 #define TIPC_TX_GRACE_PERIOD msecs_to_jiffies(5000) /* 5s */ 45 #define TIPC_TX_LASTING_TIME msecs_to_jiffies(10000) /* 10s */ 46 #define TIPC_RX_ACTIVE_LIM msecs_to_jiffies(3000) /* 3s */ 47 #define TIPC_RX_PASSIVE_LIM msecs_to_jiffies(15000) /* 15s */ 48 49 #define TIPC_MAX_TFMS_DEF 10 50 #define TIPC_MAX_TFMS_LIM 1000 51 52 #define TIPC_REKEYING_INTV_DEF (60 * 24) /* default: 1 day */ 53 54 /* 55 * TIPC Key ids 56 */ 57 enum { 58 KEY_MASTER = 0, 59 KEY_MIN = KEY_MASTER, 60 KEY_1 = 1, 61 KEY_2, 62 KEY_3, 63 KEY_MAX = KEY_3, 64 }; 65 66 /* 67 * TIPC Crypto statistics 68 */ 69 enum { 70 STAT_OK, 71 STAT_NOK, 72 STAT_ASYNC, 73 STAT_ASYNC_OK, 74 STAT_ASYNC_NOK, 75 STAT_BADKEYS, /* tx only */ 76 STAT_BADMSGS = STAT_BADKEYS, /* rx only */ 77 STAT_NOKEYS, 78 STAT_SWITCHES, 79 80 MAX_STATS, 81 }; 82 83 /* TIPC crypto statistics' header */ 84 static const char *hstats[MAX_STATS] = {"ok", "nok", "async", "async_ok", 85 "async_nok", "badmsgs", "nokeys", 86 "switches"}; 87 88 /* Max TFMs number per key */ 89 int sysctl_tipc_max_tfms __read_mostly = TIPC_MAX_TFMS_DEF; 90 /* Key exchange switch, default: on */ 91 int sysctl_tipc_key_exchange_enabled __read_mostly = 1; 92 93 /* 94 * struct tipc_key - TIPC keys' status indicator 95 * 96 * 7 6 5 4 3 2 1 0 97 * +-----+-----+-----+-----+-----+-----+-----+-----+ 98 * key: | (reserved)|passive idx| active idx|pending idx| 99 * +-----+-----+-----+-----+-----+-----+-----+-----+ 100 */ 101 struct tipc_key { 102 #define KEY_BITS (2) 103 #define KEY_MASK ((1 << KEY_BITS) - 1) 104 union { 105 struct { 106 #if defined(__LITTLE_ENDIAN_BITFIELD) 107 u8 pending:2, 108 active:2, 109 passive:2, /* rx only */ 110 reserved:2; 111 #elif defined(__BIG_ENDIAN_BITFIELD) 112 u8 reserved:2, 113 passive:2, /* rx only */ 114 active:2, 115 pending:2; 116 #else 117 #error "Please fix <asm/byteorder.h>" 118 #endif 119 } __packed; 120 u8 keys; 121 }; 122 }; 123 124 /** 125 * struct tipc_tfm - TIPC TFM structure to form a list of TFMs 126 * @tfm: cipher handle/key 127 * @list: linked list of TFMs 128 */ 129 struct tipc_tfm { 130 struct crypto_aead *tfm; 131 struct list_head list; 132 }; 133 134 /** 135 * struct tipc_aead - TIPC AEAD key structure 136 * @tfm_entry: per-cpu pointer to one entry in TFM list 137 * @crypto: TIPC crypto owns this key 138 * @cloned: reference to the source key in case cloning 139 * @users: the number of the key users (TX/RX) 140 * @salt: the key's SALT value 141 * @authsize: authentication tag size (max = 16) 142 * @mode: crypto mode is applied to the key 143 * @hint: a hint for user key 144 * @rcu: struct rcu_head 145 * @key: the aead key 146 * @gen: the key's generation 147 * @seqno: the key seqno (cluster scope) 148 * @refcnt: the key reference counter 149 */ 150 struct tipc_aead { 151 #define TIPC_AEAD_HINT_LEN (5) 152 struct tipc_tfm * __percpu *tfm_entry; 153 struct tipc_crypto *crypto; 154 struct tipc_aead *cloned; 155 atomic_t users; 156 u32 salt; 157 u8 authsize; 158 u8 mode; 159 char hint[2 * TIPC_AEAD_HINT_LEN + 1]; 160 struct rcu_head rcu; 161 struct tipc_aead_key *key; 162 u16 gen; 163 164 atomic64_t seqno ____cacheline_aligned; 165 refcount_t refcnt ____cacheline_aligned; 166 167 } ____cacheline_aligned; 168 169 /** 170 * struct tipc_crypto_stats - TIPC Crypto statistics 171 * @stat: array of crypto statistics 172 */ 173 struct tipc_crypto_stats { 174 unsigned int stat[MAX_STATS]; 175 }; 176 177 /** 178 * struct tipc_crypto - TIPC TX/RX crypto structure 179 * @net: struct net 180 * @node: TIPC node (RX) 181 * @aead: array of pointers to AEAD keys for encryption/decryption 182 * @peer_rx_active: replicated peer RX active key index 183 * @key_gen: TX/RX key generation 184 * @key: the key states 185 * @skey_mode: session key's mode 186 * @skey: received session key 187 * @wq: common workqueue on TX crypto 188 * @work: delayed work sched for TX/RX 189 * @key_distr: key distributing state 190 * @rekeying_intv: rekeying interval (in minutes) 191 * @stats: the crypto statistics 192 * @name: the crypto name 193 * @sndnxt: the per-peer sndnxt (TX) 194 * @timer1: general timer 1 (jiffies) 195 * @timer2: general timer 2 (jiffies) 196 * @working: the crypto is working or not 197 * @key_master: flag indicates if master key exists 198 * @legacy_user: flag indicates if a peer joins w/o master key (for bwd comp.) 199 * @nokey: no key indication 200 * @flags: combined flags field 201 * @lock: tipc_key lock 202 */ 203 struct tipc_crypto { 204 struct net *net; 205 struct tipc_node *node; 206 struct tipc_aead __rcu *aead[KEY_MAX + 1]; 207 atomic_t peer_rx_active; 208 u16 key_gen; 209 struct tipc_key key; 210 u8 skey_mode; 211 struct tipc_aead_key *skey; 212 struct workqueue_struct *wq; 213 struct delayed_work work; 214 #define KEY_DISTR_SCHED 1 215 #define KEY_DISTR_COMPL 2 216 atomic_t key_distr; 217 u32 rekeying_intv; 218 219 struct tipc_crypto_stats __percpu *stats; 220 char name[48]; 221 222 atomic64_t sndnxt ____cacheline_aligned; 223 unsigned long timer1; 224 unsigned long timer2; 225 union { 226 struct { 227 u8 working:1; 228 u8 key_master:1; 229 u8 legacy_user:1; 230 u8 nokey: 1; 231 }; 232 u8 flags; 233 }; 234 spinlock_t lock; /* crypto lock */ 235 236 } ____cacheline_aligned; 237 238 /* struct tipc_crypto_tx_ctx - TX context for callbacks */ 239 struct tipc_crypto_tx_ctx { 240 struct tipc_aead *aead; 241 struct tipc_bearer *bearer; 242 struct tipc_media_addr dst; 243 }; 244 245 /* struct tipc_crypto_rx_ctx - RX context for callbacks */ 246 struct tipc_crypto_rx_ctx { 247 struct tipc_aead *aead; 248 struct tipc_bearer *bearer; 249 }; 250 251 static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead); 252 static inline void tipc_aead_put(struct tipc_aead *aead); 253 static void tipc_aead_free(struct rcu_head *rp); 254 static int tipc_aead_users(struct tipc_aead __rcu *aead); 255 static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim); 256 static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim); 257 static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val); 258 static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead); 259 static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey, 260 u8 mode); 261 static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src); 262 static void *tipc_aead_mem_alloc(struct crypto_aead *tfm, 263 unsigned int crypto_ctx_size, 264 u8 **iv, struct aead_request **req, 265 struct scatterlist **sg, int nsg); 266 static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, 267 struct tipc_bearer *b, 268 struct tipc_media_addr *dst, 269 struct tipc_node *__dnode); 270 static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err); 271 static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead, 272 struct sk_buff *skb, struct tipc_bearer *b); 273 static void tipc_aead_decrypt_done(struct crypto_async_request *base, int err); 274 static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr); 275 static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead, 276 u8 tx_key, struct sk_buff *skb, 277 struct tipc_crypto *__rx); 278 static inline void tipc_crypto_key_set_state(struct tipc_crypto *c, 279 u8 new_passive, 280 u8 new_active, 281 u8 new_pending); 282 static int tipc_crypto_key_attach(struct tipc_crypto *c, 283 struct tipc_aead *aead, u8 pos, 284 bool master_key); 285 static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending); 286 static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx, 287 struct tipc_crypto *rx, 288 struct sk_buff *skb, 289 u8 tx_key); 290 static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb); 291 static int tipc_crypto_key_revoke(struct net *net, u8 tx_key); 292 static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb, 293 struct tipc_bearer *b, 294 struct tipc_media_addr *dst, 295 struct tipc_node *__dnode, u8 type); 296 static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead, 297 struct tipc_bearer *b, 298 struct sk_buff **skb, int err); 299 static void tipc_crypto_do_cmd(struct net *net, int cmd); 300 static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf); 301 static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new, 302 char *buf); 303 static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey, 304 u16 gen, u8 mode, u32 dnode); 305 static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr); 306 static void tipc_crypto_work_tx(struct work_struct *work); 307 static void tipc_crypto_work_rx(struct work_struct *work); 308 static int tipc_aead_key_generate(struct tipc_aead_key *skey); 309 310 #define is_tx(crypto) (!(crypto)->node) 311 #define is_rx(crypto) (!is_tx(crypto)) 312 313 #define key_next(cur) ((cur) % KEY_MAX + 1) 314 315 #define tipc_aead_rcu_ptr(rcu_ptr, lock) \ 316 rcu_dereference_protected((rcu_ptr), lockdep_is_held(lock)) 317 318 #define tipc_aead_rcu_replace(rcu_ptr, ptr, lock) \ 319 do { \ 320 struct tipc_aead *__tmp = rcu_dereference_protected((rcu_ptr), \ 321 lockdep_is_held(lock)); \ 322 rcu_assign_pointer((rcu_ptr), (ptr)); \ 323 tipc_aead_put(__tmp); \ 324 } while (0) 325 326 #define tipc_crypto_key_detach(rcu_ptr, lock) \ 327 tipc_aead_rcu_replace((rcu_ptr), NULL, lock) 328 329 /** 330 * tipc_aead_key_validate - Validate a AEAD user key 331 * @ukey: pointer to user key data 332 * @info: netlink info pointer 333 */ 334 int tipc_aead_key_validate(struct tipc_aead_key *ukey, struct genl_info *info) 335 { 336 int keylen; 337 338 /* Check if algorithm exists */ 339 if (unlikely(!crypto_has_alg(ukey->alg_name, 0, 0))) { 340 GENL_SET_ERR_MSG(info, "unable to load the algorithm (module existed?)"); 341 return -ENODEV; 342 } 343 344 /* Currently, we only support the "gcm(aes)" cipher algorithm */ 345 if (strcmp(ukey->alg_name, "gcm(aes)")) { 346 GENL_SET_ERR_MSG(info, "not supported yet the algorithm"); 347 return -ENOTSUPP; 348 } 349 350 /* Check if key size is correct */ 351 keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE; 352 if (unlikely(keylen != TIPC_AES_GCM_KEY_SIZE_128 && 353 keylen != TIPC_AES_GCM_KEY_SIZE_192 && 354 keylen != TIPC_AES_GCM_KEY_SIZE_256)) { 355 GENL_SET_ERR_MSG(info, "incorrect key length (20, 28 or 36 octets?)"); 356 return -EKEYREJECTED; 357 } 358 359 return 0; 360 } 361 362 /** 363 * tipc_aead_key_generate - Generate new session key 364 * @skey: input/output key with new content 365 * 366 * Return: 0 in case of success, otherwise < 0 367 */ 368 static int tipc_aead_key_generate(struct tipc_aead_key *skey) 369 { 370 int rc = 0; 371 372 /* Fill the key's content with a random value via RNG cipher */ 373 rc = crypto_get_default_rng(); 374 if (likely(!rc)) { 375 rc = crypto_rng_get_bytes(crypto_default_rng, skey->key, 376 skey->keylen); 377 crypto_put_default_rng(); 378 } 379 380 return rc; 381 } 382 383 static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead) 384 { 385 struct tipc_aead *tmp; 386 387 rcu_read_lock(); 388 tmp = rcu_dereference(aead); 389 if (unlikely(!tmp || !refcount_inc_not_zero(&tmp->refcnt))) 390 tmp = NULL; 391 rcu_read_unlock(); 392 393 return tmp; 394 } 395 396 static inline void tipc_aead_put(struct tipc_aead *aead) 397 { 398 if (aead && refcount_dec_and_test(&aead->refcnt)) 399 call_rcu(&aead->rcu, tipc_aead_free); 400 } 401 402 /** 403 * tipc_aead_free - Release AEAD key incl. all the TFMs in the list 404 * @rp: rcu head pointer 405 */ 406 static void tipc_aead_free(struct rcu_head *rp) 407 { 408 struct tipc_aead *aead = container_of(rp, struct tipc_aead, rcu); 409 struct tipc_tfm *tfm_entry, *head, *tmp; 410 411 if (aead->cloned) { 412 tipc_aead_put(aead->cloned); 413 } else { 414 head = *get_cpu_ptr(aead->tfm_entry); 415 put_cpu_ptr(aead->tfm_entry); 416 list_for_each_entry_safe(tfm_entry, tmp, &head->list, list) { 417 crypto_free_aead(tfm_entry->tfm); 418 list_del(&tfm_entry->list); 419 kfree(tfm_entry); 420 } 421 /* Free the head */ 422 crypto_free_aead(head->tfm); 423 list_del(&head->list); 424 kfree(head); 425 } 426 free_percpu(aead->tfm_entry); 427 kfree_sensitive(aead->key); 428 kfree(aead); 429 } 430 431 static int tipc_aead_users(struct tipc_aead __rcu *aead) 432 { 433 struct tipc_aead *tmp; 434 int users = 0; 435 436 rcu_read_lock(); 437 tmp = rcu_dereference(aead); 438 if (tmp) 439 users = atomic_read(&tmp->users); 440 rcu_read_unlock(); 441 442 return users; 443 } 444 445 static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim) 446 { 447 struct tipc_aead *tmp; 448 449 rcu_read_lock(); 450 tmp = rcu_dereference(aead); 451 if (tmp) 452 atomic_add_unless(&tmp->users, 1, lim); 453 rcu_read_unlock(); 454 } 455 456 static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim) 457 { 458 struct tipc_aead *tmp; 459 460 rcu_read_lock(); 461 tmp = rcu_dereference(aead); 462 if (tmp) 463 atomic_add_unless(&rcu_dereference(aead)->users, -1, lim); 464 rcu_read_unlock(); 465 } 466 467 static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val) 468 { 469 struct tipc_aead *tmp; 470 int cur; 471 472 rcu_read_lock(); 473 tmp = rcu_dereference(aead); 474 if (tmp) { 475 do { 476 cur = atomic_read(&tmp->users); 477 if (cur == val) 478 break; 479 } while (atomic_cmpxchg(&tmp->users, cur, val) != cur); 480 } 481 rcu_read_unlock(); 482 } 483 484 /** 485 * tipc_aead_tfm_next - Move TFM entry to the next one in list and return it 486 * @aead: the AEAD key pointer 487 */ 488 static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead) 489 { 490 struct tipc_tfm **tfm_entry; 491 struct crypto_aead *tfm; 492 493 tfm_entry = get_cpu_ptr(aead->tfm_entry); 494 *tfm_entry = list_next_entry(*tfm_entry, list); 495 tfm = (*tfm_entry)->tfm; 496 put_cpu_ptr(tfm_entry); 497 498 return tfm; 499 } 500 501 /** 502 * tipc_aead_init - Initiate TIPC AEAD 503 * @aead: returned new TIPC AEAD key handle pointer 504 * @ukey: pointer to user key data 505 * @mode: the key mode 506 * 507 * Allocate a (list of) new cipher transformation (TFM) with the specific user 508 * key data if valid. The number of the allocated TFMs can be set via the sysfs 509 * "net/tipc/max_tfms" first. 510 * Also, all the other AEAD data are also initialized. 511 * 512 * Return: 0 if the initiation is successful, otherwise: < 0 513 */ 514 static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey, 515 u8 mode) 516 { 517 struct tipc_tfm *tfm_entry, *head; 518 struct crypto_aead *tfm; 519 struct tipc_aead *tmp; 520 int keylen, err, cpu; 521 int tfm_cnt = 0; 522 523 if (unlikely(*aead)) 524 return -EEXIST; 525 526 /* Allocate a new AEAD */ 527 tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC); 528 if (unlikely(!tmp)) 529 return -ENOMEM; 530 531 /* The key consists of two parts: [AES-KEY][SALT] */ 532 keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE; 533 534 /* Allocate per-cpu TFM entry pointer */ 535 tmp->tfm_entry = alloc_percpu(struct tipc_tfm *); 536 if (!tmp->tfm_entry) { 537 kfree_sensitive(tmp); 538 return -ENOMEM; 539 } 540 541 /* Make a list of TFMs with the user key data */ 542 do { 543 tfm = crypto_alloc_aead(ukey->alg_name, 0, 0); 544 if (IS_ERR(tfm)) { 545 err = PTR_ERR(tfm); 546 break; 547 } 548 549 if (unlikely(!tfm_cnt && 550 crypto_aead_ivsize(tfm) != TIPC_AES_GCM_IV_SIZE)) { 551 crypto_free_aead(tfm); 552 err = -ENOTSUPP; 553 break; 554 } 555 556 err = crypto_aead_setauthsize(tfm, TIPC_AES_GCM_TAG_SIZE); 557 err |= crypto_aead_setkey(tfm, ukey->key, keylen); 558 if (unlikely(err)) { 559 crypto_free_aead(tfm); 560 break; 561 } 562 563 tfm_entry = kmalloc(sizeof(*tfm_entry), GFP_KERNEL); 564 if (unlikely(!tfm_entry)) { 565 crypto_free_aead(tfm); 566 err = -ENOMEM; 567 break; 568 } 569 INIT_LIST_HEAD(&tfm_entry->list); 570 tfm_entry->tfm = tfm; 571 572 /* First entry? */ 573 if (!tfm_cnt) { 574 head = tfm_entry; 575 for_each_possible_cpu(cpu) { 576 *per_cpu_ptr(tmp->tfm_entry, cpu) = head; 577 } 578 } else { 579 list_add_tail(&tfm_entry->list, &head->list); 580 } 581 582 } while (++tfm_cnt < sysctl_tipc_max_tfms); 583 584 /* Not any TFM is allocated? */ 585 if (!tfm_cnt) { 586 free_percpu(tmp->tfm_entry); 587 kfree_sensitive(tmp); 588 return err; 589 } 590 591 /* Form a hex string of some last bytes as the key's hint */ 592 bin2hex(tmp->hint, ukey->key + keylen - TIPC_AEAD_HINT_LEN, 593 TIPC_AEAD_HINT_LEN); 594 595 /* Initialize the other data */ 596 tmp->mode = mode; 597 tmp->cloned = NULL; 598 tmp->authsize = TIPC_AES_GCM_TAG_SIZE; 599 tmp->key = kmemdup(ukey, tipc_aead_key_size(ukey), GFP_KERNEL); 600 memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE); 601 atomic_set(&tmp->users, 0); 602 atomic64_set(&tmp->seqno, 0); 603 refcount_set(&tmp->refcnt, 1); 604 605 *aead = tmp; 606 return 0; 607 } 608 609 /** 610 * tipc_aead_clone - Clone a TIPC AEAD key 611 * @dst: dest key for the cloning 612 * @src: source key to clone from 613 * 614 * Make a "copy" of the source AEAD key data to the dest, the TFMs list is 615 * common for the keys. 616 * A reference to the source is hold in the "cloned" pointer for the later 617 * freeing purposes. 618 * 619 * Note: this must be done in cluster-key mode only! 620 * Return: 0 in case of success, otherwise < 0 621 */ 622 static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src) 623 { 624 struct tipc_aead *aead; 625 int cpu; 626 627 if (!src) 628 return -ENOKEY; 629 630 if (src->mode != CLUSTER_KEY) 631 return -EINVAL; 632 633 if (unlikely(*dst)) 634 return -EEXIST; 635 636 aead = kzalloc(sizeof(*aead), GFP_ATOMIC); 637 if (unlikely(!aead)) 638 return -ENOMEM; 639 640 aead->tfm_entry = alloc_percpu_gfp(struct tipc_tfm *, GFP_ATOMIC); 641 if (unlikely(!aead->tfm_entry)) { 642 kfree_sensitive(aead); 643 return -ENOMEM; 644 } 645 646 for_each_possible_cpu(cpu) { 647 *per_cpu_ptr(aead->tfm_entry, cpu) = 648 *per_cpu_ptr(src->tfm_entry, cpu); 649 } 650 651 memcpy(aead->hint, src->hint, sizeof(src->hint)); 652 aead->mode = src->mode; 653 aead->salt = src->salt; 654 aead->authsize = src->authsize; 655 atomic_set(&aead->users, 0); 656 atomic64_set(&aead->seqno, 0); 657 refcount_set(&aead->refcnt, 1); 658 659 WARN_ON(!refcount_inc_not_zero(&src->refcnt)); 660 aead->cloned = src; 661 662 *dst = aead; 663 return 0; 664 } 665 666 /** 667 * tipc_aead_mem_alloc - Allocate memory for AEAD request operations 668 * @tfm: cipher handle to be registered with the request 669 * @crypto_ctx_size: size of crypto context for callback 670 * @iv: returned pointer to IV data 671 * @req: returned pointer to AEAD request data 672 * @sg: returned pointer to SG lists 673 * @nsg: number of SG lists to be allocated 674 * 675 * Allocate memory to store the crypto context data, AEAD request, IV and SG 676 * lists, the memory layout is as follows: 677 * crypto_ctx || iv || aead_req || sg[] 678 * 679 * Return: the pointer to the memory areas in case of success, otherwise NULL 680 */ 681 static void *tipc_aead_mem_alloc(struct crypto_aead *tfm, 682 unsigned int crypto_ctx_size, 683 u8 **iv, struct aead_request **req, 684 struct scatterlist **sg, int nsg) 685 { 686 unsigned int iv_size, req_size; 687 unsigned int len; 688 u8 *mem; 689 690 iv_size = crypto_aead_ivsize(tfm); 691 req_size = sizeof(**req) + crypto_aead_reqsize(tfm); 692 693 len = crypto_ctx_size; 694 len += iv_size; 695 len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1); 696 len = ALIGN(len, crypto_tfm_ctx_alignment()); 697 len += req_size; 698 len = ALIGN(len, __alignof__(struct scatterlist)); 699 len += nsg * sizeof(**sg); 700 701 mem = kmalloc(len, GFP_ATOMIC); 702 if (!mem) 703 return NULL; 704 705 *iv = (u8 *)PTR_ALIGN(mem + crypto_ctx_size, 706 crypto_aead_alignmask(tfm) + 1); 707 *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size, 708 crypto_tfm_ctx_alignment()); 709 *sg = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size, 710 __alignof__(struct scatterlist)); 711 712 return (void *)mem; 713 } 714 715 /** 716 * tipc_aead_encrypt - Encrypt a message 717 * @aead: TIPC AEAD key for the message encryption 718 * @skb: the input/output skb 719 * @b: TIPC bearer where the message will be delivered after the encryption 720 * @dst: the destination media address 721 * @__dnode: TIPC dest node if "known" 722 * 723 * Return: 724 * * 0 : if the encryption has completed 725 * * -EINPROGRESS/-EBUSY : if a callback will be performed 726 * * < 0 : the encryption has failed 727 */ 728 static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, 729 struct tipc_bearer *b, 730 struct tipc_media_addr *dst, 731 struct tipc_node *__dnode) 732 { 733 struct crypto_aead *tfm = tipc_aead_tfm_next(aead); 734 struct tipc_crypto_tx_ctx *tx_ctx; 735 struct aead_request *req; 736 struct sk_buff *trailer; 737 struct scatterlist *sg; 738 struct tipc_ehdr *ehdr; 739 int ehsz, len, tailen, nsg, rc; 740 void *ctx; 741 u32 salt; 742 u8 *iv; 743 744 /* Make sure message len at least 4-byte aligned */ 745 len = ALIGN(skb->len, 4); 746 tailen = len - skb->len + aead->authsize; 747 748 /* Expand skb tail for authentication tag: 749 * As for simplicity, we'd have made sure skb having enough tailroom 750 * for authentication tag @skb allocation. Even when skb is nonlinear 751 * but there is no frag_list, it should be still fine! 752 * Otherwise, we must cow it to be a writable buffer with the tailroom. 753 */ 754 SKB_LINEAR_ASSERT(skb); 755 if (tailen > skb_tailroom(skb)) { 756 pr_debug("TX(): skb tailroom is not enough: %d, requires: %d\n", 757 skb_tailroom(skb), tailen); 758 } 759 760 if (unlikely(!skb_cloned(skb) && tailen <= skb_tailroom(skb))) { 761 nsg = 1; 762 trailer = skb; 763 } else { 764 /* TODO: We could avoid skb_cow_data() if skb has no frag_list 765 * e.g. by skb_fill_page_desc() to add another page to the skb 766 * with the wanted tailen... However, page skbs look not often, 767 * so take it easy now! 768 * Cloned skbs e.g. from link_xmit() seems no choice though :( 769 */ 770 nsg = skb_cow_data(skb, tailen, &trailer); 771 if (unlikely(nsg < 0)) { 772 pr_err("TX: skb_cow_data() returned %d\n", nsg); 773 return nsg; 774 } 775 } 776 777 pskb_put(skb, trailer, tailen); 778 779 /* Allocate memory for the AEAD operation */ 780 ctx = tipc_aead_mem_alloc(tfm, sizeof(*tx_ctx), &iv, &req, &sg, nsg); 781 if (unlikely(!ctx)) 782 return -ENOMEM; 783 TIPC_SKB_CB(skb)->crypto_ctx = ctx; 784 785 /* Map skb to the sg lists */ 786 sg_init_table(sg, nsg); 787 rc = skb_to_sgvec(skb, sg, 0, skb->len); 788 if (unlikely(rc < 0)) { 789 pr_err("TX: skb_to_sgvec() returned %d, nsg %d!\n", rc, nsg); 790 goto exit; 791 } 792 793 /* Prepare IV: [SALT (4 octets)][SEQNO (8 octets)] 794 * In case we're in cluster-key mode, SALT is varied by xor-ing with 795 * the source address (or w0 of id), otherwise with the dest address 796 * if dest is known. 797 */ 798 ehdr = (struct tipc_ehdr *)skb->data; 799 salt = aead->salt; 800 if (aead->mode == CLUSTER_KEY) 801 salt ^= __be32_to_cpu(ehdr->addr); 802 else if (__dnode) 803 salt ^= tipc_node_get_addr(__dnode); 804 memcpy(iv, &salt, 4); 805 memcpy(iv + 4, (u8 *)&ehdr->seqno, 8); 806 807 /* Prepare request */ 808 ehsz = tipc_ehdr_size(ehdr); 809 aead_request_set_tfm(req, tfm); 810 aead_request_set_ad(req, ehsz); 811 aead_request_set_crypt(req, sg, sg, len - ehsz, iv); 812 813 /* Set callback function & data */ 814 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 815 tipc_aead_encrypt_done, skb); 816 tx_ctx = (struct tipc_crypto_tx_ctx *)ctx; 817 tx_ctx->aead = aead; 818 tx_ctx->bearer = b; 819 memcpy(&tx_ctx->dst, dst, sizeof(*dst)); 820 821 /* Hold bearer */ 822 if (unlikely(!tipc_bearer_hold(b))) { 823 rc = -ENODEV; 824 goto exit; 825 } 826 827 /* Now, do encrypt */ 828 rc = crypto_aead_encrypt(req); 829 if (rc == -EINPROGRESS || rc == -EBUSY) 830 return rc; 831 832 tipc_bearer_put(b); 833 834 exit: 835 kfree(ctx); 836 TIPC_SKB_CB(skb)->crypto_ctx = NULL; 837 return rc; 838 } 839 840 static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err) 841 { 842 struct sk_buff *skb = base->data; 843 struct tipc_crypto_tx_ctx *tx_ctx = TIPC_SKB_CB(skb)->crypto_ctx; 844 struct tipc_bearer *b = tx_ctx->bearer; 845 struct tipc_aead *aead = tx_ctx->aead; 846 struct tipc_crypto *tx = aead->crypto; 847 struct net *net = tx->net; 848 849 switch (err) { 850 case 0: 851 this_cpu_inc(tx->stats->stat[STAT_ASYNC_OK]); 852 rcu_read_lock(); 853 if (likely(test_bit(0, &b->up))) 854 b->media->send_msg(net, skb, b, &tx_ctx->dst); 855 else 856 kfree_skb(skb); 857 rcu_read_unlock(); 858 break; 859 case -EINPROGRESS: 860 return; 861 default: 862 this_cpu_inc(tx->stats->stat[STAT_ASYNC_NOK]); 863 kfree_skb(skb); 864 break; 865 } 866 867 kfree(tx_ctx); 868 tipc_bearer_put(b); 869 tipc_aead_put(aead); 870 } 871 872 /** 873 * tipc_aead_decrypt - Decrypt an encrypted message 874 * @net: struct net 875 * @aead: TIPC AEAD for the message decryption 876 * @skb: the input/output skb 877 * @b: TIPC bearer where the message has been received 878 * 879 * Return: 880 * * 0 : if the decryption has completed 881 * * -EINPROGRESS/-EBUSY : if a callback will be performed 882 * * < 0 : the decryption has failed 883 */ 884 static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead, 885 struct sk_buff *skb, struct tipc_bearer *b) 886 { 887 struct tipc_crypto_rx_ctx *rx_ctx; 888 struct aead_request *req; 889 struct crypto_aead *tfm; 890 struct sk_buff *unused; 891 struct scatterlist *sg; 892 struct tipc_ehdr *ehdr; 893 int ehsz, nsg, rc; 894 void *ctx; 895 u32 salt; 896 u8 *iv; 897 898 if (unlikely(!aead)) 899 return -ENOKEY; 900 901 nsg = skb_cow_data(skb, 0, &unused); 902 if (unlikely(nsg < 0)) { 903 pr_err("RX: skb_cow_data() returned %d\n", nsg); 904 return nsg; 905 } 906 907 /* Allocate memory for the AEAD operation */ 908 tfm = tipc_aead_tfm_next(aead); 909 ctx = tipc_aead_mem_alloc(tfm, sizeof(*rx_ctx), &iv, &req, &sg, nsg); 910 if (unlikely(!ctx)) 911 return -ENOMEM; 912 TIPC_SKB_CB(skb)->crypto_ctx = ctx; 913 914 /* Map skb to the sg lists */ 915 sg_init_table(sg, nsg); 916 rc = skb_to_sgvec(skb, sg, 0, skb->len); 917 if (unlikely(rc < 0)) { 918 pr_err("RX: skb_to_sgvec() returned %d, nsg %d\n", rc, nsg); 919 goto exit; 920 } 921 922 /* Reconstruct IV: */ 923 ehdr = (struct tipc_ehdr *)skb->data; 924 salt = aead->salt; 925 if (aead->mode == CLUSTER_KEY) 926 salt ^= __be32_to_cpu(ehdr->addr); 927 else if (ehdr->destined) 928 salt ^= tipc_own_addr(net); 929 memcpy(iv, &salt, 4); 930 memcpy(iv + 4, (u8 *)&ehdr->seqno, 8); 931 932 /* Prepare request */ 933 ehsz = tipc_ehdr_size(ehdr); 934 aead_request_set_tfm(req, tfm); 935 aead_request_set_ad(req, ehsz); 936 aead_request_set_crypt(req, sg, sg, skb->len - ehsz, iv); 937 938 /* Set callback function & data */ 939 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 940 tipc_aead_decrypt_done, skb); 941 rx_ctx = (struct tipc_crypto_rx_ctx *)ctx; 942 rx_ctx->aead = aead; 943 rx_ctx->bearer = b; 944 945 /* Hold bearer */ 946 if (unlikely(!tipc_bearer_hold(b))) { 947 rc = -ENODEV; 948 goto exit; 949 } 950 951 /* Now, do decrypt */ 952 rc = crypto_aead_decrypt(req); 953 if (rc == -EINPROGRESS || rc == -EBUSY) 954 return rc; 955 956 tipc_bearer_put(b); 957 958 exit: 959 kfree(ctx); 960 TIPC_SKB_CB(skb)->crypto_ctx = NULL; 961 return rc; 962 } 963 964 static void tipc_aead_decrypt_done(struct crypto_async_request *base, int err) 965 { 966 struct sk_buff *skb = base->data; 967 struct tipc_crypto_rx_ctx *rx_ctx = TIPC_SKB_CB(skb)->crypto_ctx; 968 struct tipc_bearer *b = rx_ctx->bearer; 969 struct tipc_aead *aead = rx_ctx->aead; 970 struct tipc_crypto_stats __percpu *stats = aead->crypto->stats; 971 struct net *net = aead->crypto->net; 972 973 switch (err) { 974 case 0: 975 this_cpu_inc(stats->stat[STAT_ASYNC_OK]); 976 break; 977 case -EINPROGRESS: 978 return; 979 default: 980 this_cpu_inc(stats->stat[STAT_ASYNC_NOK]); 981 break; 982 } 983 984 kfree(rx_ctx); 985 tipc_crypto_rcv_complete(net, aead, b, &skb, err); 986 if (likely(skb)) { 987 if (likely(test_bit(0, &b->up))) 988 tipc_rcv(net, skb, b); 989 else 990 kfree_skb(skb); 991 } 992 993 tipc_bearer_put(b); 994 } 995 996 static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr) 997 { 998 return (ehdr->user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE; 999 } 1000 1001 /** 1002 * tipc_ehdr_validate - Validate an encryption message 1003 * @skb: the message buffer 1004 * 1005 * Return: "true" if this is a valid encryption message, otherwise "false" 1006 */ 1007 bool tipc_ehdr_validate(struct sk_buff *skb) 1008 { 1009 struct tipc_ehdr *ehdr; 1010 int ehsz; 1011 1012 if (unlikely(!pskb_may_pull(skb, EHDR_MIN_SIZE))) 1013 return false; 1014 1015 ehdr = (struct tipc_ehdr *)skb->data; 1016 if (unlikely(ehdr->version != TIPC_EVERSION)) 1017 return false; 1018 ehsz = tipc_ehdr_size(ehdr); 1019 if (unlikely(!pskb_may_pull(skb, ehsz))) 1020 return false; 1021 if (unlikely(skb->len <= ehsz + TIPC_AES_GCM_TAG_SIZE)) 1022 return false; 1023 1024 return true; 1025 } 1026 1027 /** 1028 * tipc_ehdr_build - Build TIPC encryption message header 1029 * @net: struct net 1030 * @aead: TX AEAD key to be used for the message encryption 1031 * @tx_key: key id used for the message encryption 1032 * @skb: input/output message skb 1033 * @__rx: RX crypto handle if dest is "known" 1034 * 1035 * Return: the header size if the building is successful, otherwise < 0 1036 */ 1037 static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead, 1038 u8 tx_key, struct sk_buff *skb, 1039 struct tipc_crypto *__rx) 1040 { 1041 struct tipc_msg *hdr = buf_msg(skb); 1042 struct tipc_ehdr *ehdr; 1043 u32 user = msg_user(hdr); 1044 u64 seqno; 1045 int ehsz; 1046 1047 /* Make room for encryption header */ 1048 ehsz = (user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE; 1049 WARN_ON(skb_headroom(skb) < ehsz); 1050 ehdr = (struct tipc_ehdr *)skb_push(skb, ehsz); 1051 1052 /* Obtain a seqno first: 1053 * Use the key seqno (= cluster wise) if dest is unknown or we're in 1054 * cluster key mode, otherwise it's better for a per-peer seqno! 1055 */ 1056 if (!__rx || aead->mode == CLUSTER_KEY) 1057 seqno = atomic64_inc_return(&aead->seqno); 1058 else 1059 seqno = atomic64_inc_return(&__rx->sndnxt); 1060 1061 /* Revoke the key if seqno is wrapped around */ 1062 if (unlikely(!seqno)) 1063 return tipc_crypto_key_revoke(net, tx_key); 1064 1065 /* Word 1-2 */ 1066 ehdr->seqno = cpu_to_be64(seqno); 1067 1068 /* Words 0, 3- */ 1069 ehdr->version = TIPC_EVERSION; 1070 ehdr->user = 0; 1071 ehdr->keepalive = 0; 1072 ehdr->tx_key = tx_key; 1073 ehdr->destined = (__rx) ? 1 : 0; 1074 ehdr->rx_key_active = (__rx) ? __rx->key.active : 0; 1075 ehdr->rx_nokey = (__rx) ? __rx->nokey : 0; 1076 ehdr->master_key = aead->crypto->key_master; 1077 ehdr->reserved_1 = 0; 1078 ehdr->reserved_2 = 0; 1079 1080 switch (user) { 1081 case LINK_CONFIG: 1082 ehdr->user = LINK_CONFIG; 1083 memcpy(ehdr->id, tipc_own_id(net), NODE_ID_LEN); 1084 break; 1085 default: 1086 if (user == LINK_PROTOCOL && msg_type(hdr) == STATE_MSG) { 1087 ehdr->user = LINK_PROTOCOL; 1088 ehdr->keepalive = msg_is_keepalive(hdr); 1089 } 1090 ehdr->addr = hdr->hdr[3]; 1091 break; 1092 } 1093 1094 return ehsz; 1095 } 1096 1097 static inline void tipc_crypto_key_set_state(struct tipc_crypto *c, 1098 u8 new_passive, 1099 u8 new_active, 1100 u8 new_pending) 1101 { 1102 struct tipc_key old = c->key; 1103 char buf[32]; 1104 1105 c->key.keys = ((new_passive & KEY_MASK) << (KEY_BITS * 2)) | 1106 ((new_active & KEY_MASK) << (KEY_BITS)) | 1107 ((new_pending & KEY_MASK)); 1108 1109 pr_debug("%s: key changing %s ::%pS\n", c->name, 1110 tipc_key_change_dump(old, c->key, buf), 1111 __builtin_return_address(0)); 1112 } 1113 1114 /** 1115 * tipc_crypto_key_init - Initiate a new user / AEAD key 1116 * @c: TIPC crypto to which new key is attached 1117 * @ukey: the user key 1118 * @mode: the key mode (CLUSTER_KEY or PER_NODE_KEY) 1119 * @master_key: specify this is a cluster master key 1120 * 1121 * A new TIPC AEAD key will be allocated and initiated with the specified user 1122 * key, then attached to the TIPC crypto. 1123 * 1124 * Return: new key id in case of success, otherwise: < 0 1125 */ 1126 int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey, 1127 u8 mode, bool master_key) 1128 { 1129 struct tipc_aead *aead = NULL; 1130 int rc = 0; 1131 1132 /* Initiate with the new user key */ 1133 rc = tipc_aead_init(&aead, ukey, mode); 1134 1135 /* Attach it to the crypto */ 1136 if (likely(!rc)) { 1137 rc = tipc_crypto_key_attach(c, aead, 0, master_key); 1138 if (rc < 0) 1139 tipc_aead_free(&aead->rcu); 1140 } 1141 1142 return rc; 1143 } 1144 1145 /** 1146 * tipc_crypto_key_attach - Attach a new AEAD key to TIPC crypto 1147 * @c: TIPC crypto to which the new AEAD key is attached 1148 * @aead: the new AEAD key pointer 1149 * @pos: desired slot in the crypto key array, = 0 if any! 1150 * @master_key: specify this is a cluster master key 1151 * 1152 * Return: new key id in case of success, otherwise: -EBUSY 1153 */ 1154 static int tipc_crypto_key_attach(struct tipc_crypto *c, 1155 struct tipc_aead *aead, u8 pos, 1156 bool master_key) 1157 { 1158 struct tipc_key key; 1159 int rc = -EBUSY; 1160 u8 new_key; 1161 1162 spin_lock_bh(&c->lock); 1163 key = c->key; 1164 if (master_key) { 1165 new_key = KEY_MASTER; 1166 goto attach; 1167 } 1168 if (key.active && key.passive) 1169 goto exit; 1170 if (key.pending) { 1171 if (tipc_aead_users(c->aead[key.pending]) > 0) 1172 goto exit; 1173 /* if (pos): ok with replacing, will be aligned when needed */ 1174 /* Replace it */ 1175 new_key = key.pending; 1176 } else { 1177 if (pos) { 1178 if (key.active && pos != key_next(key.active)) { 1179 key.passive = pos; 1180 new_key = pos; 1181 goto attach; 1182 } else if (!key.active && !key.passive) { 1183 key.pending = pos; 1184 new_key = pos; 1185 goto attach; 1186 } 1187 } 1188 key.pending = key_next(key.active ?: key.passive); 1189 new_key = key.pending; 1190 } 1191 1192 attach: 1193 aead->crypto = c; 1194 aead->gen = (is_tx(c)) ? ++c->key_gen : c->key_gen; 1195 tipc_aead_rcu_replace(c->aead[new_key], aead, &c->lock); 1196 if (likely(c->key.keys != key.keys)) 1197 tipc_crypto_key_set_state(c, key.passive, key.active, 1198 key.pending); 1199 c->working = 1; 1200 c->nokey = 0; 1201 c->key_master |= master_key; 1202 rc = new_key; 1203 1204 exit: 1205 spin_unlock_bh(&c->lock); 1206 return rc; 1207 } 1208 1209 void tipc_crypto_key_flush(struct tipc_crypto *c) 1210 { 1211 struct tipc_crypto *tx, *rx; 1212 int k; 1213 1214 spin_lock_bh(&c->lock); 1215 if (is_rx(c)) { 1216 /* Try to cancel pending work */ 1217 rx = c; 1218 tx = tipc_net(rx->net)->crypto_tx; 1219 if (cancel_delayed_work(&rx->work)) { 1220 kfree(rx->skey); 1221 rx->skey = NULL; 1222 atomic_xchg(&rx->key_distr, 0); 1223 tipc_node_put(rx->node); 1224 } 1225 /* RX stopping => decrease TX key users if any */ 1226 k = atomic_xchg(&rx->peer_rx_active, 0); 1227 if (k) { 1228 tipc_aead_users_dec(tx->aead[k], 0); 1229 /* Mark the point TX key users changed */ 1230 tx->timer1 = jiffies; 1231 } 1232 } 1233 1234 c->flags = 0; 1235 tipc_crypto_key_set_state(c, 0, 0, 0); 1236 for (k = KEY_MIN; k <= KEY_MAX; k++) 1237 tipc_crypto_key_detach(c->aead[k], &c->lock); 1238 atomic64_set(&c->sndnxt, 0); 1239 spin_unlock_bh(&c->lock); 1240 } 1241 1242 /** 1243 * tipc_crypto_key_try_align - Align RX keys if possible 1244 * @rx: RX crypto handle 1245 * @new_pending: new pending slot if aligned (= TX key from peer) 1246 * 1247 * Peer has used an unknown key slot, this only happens when peer has left and 1248 * rejoned, or we are newcomer. 1249 * That means, there must be no active key but a pending key at unaligned slot. 1250 * If so, we try to move the pending key to the new slot. 1251 * Note: A potential passive key can exist, it will be shifted correspondingly! 1252 * 1253 * Return: "true" if key is successfully aligned, otherwise "false" 1254 */ 1255 static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending) 1256 { 1257 struct tipc_aead *tmp1, *tmp2 = NULL; 1258 struct tipc_key key; 1259 bool aligned = false; 1260 u8 new_passive = 0; 1261 int x; 1262 1263 spin_lock(&rx->lock); 1264 key = rx->key; 1265 if (key.pending == new_pending) { 1266 aligned = true; 1267 goto exit; 1268 } 1269 if (key.active) 1270 goto exit; 1271 if (!key.pending) 1272 goto exit; 1273 if (tipc_aead_users(rx->aead[key.pending]) > 0) 1274 goto exit; 1275 1276 /* Try to "isolate" this pending key first */ 1277 tmp1 = tipc_aead_rcu_ptr(rx->aead[key.pending], &rx->lock); 1278 if (!refcount_dec_if_one(&tmp1->refcnt)) 1279 goto exit; 1280 rcu_assign_pointer(rx->aead[key.pending], NULL); 1281 1282 /* Move passive key if any */ 1283 if (key.passive) { 1284 tmp2 = rcu_replace_pointer(rx->aead[key.passive], tmp2, lockdep_is_held(&rx->lock)); 1285 x = (key.passive - key.pending + new_pending) % KEY_MAX; 1286 new_passive = (x <= 0) ? x + KEY_MAX : x; 1287 } 1288 1289 /* Re-allocate the key(s) */ 1290 tipc_crypto_key_set_state(rx, new_passive, 0, new_pending); 1291 rcu_assign_pointer(rx->aead[new_pending], tmp1); 1292 if (new_passive) 1293 rcu_assign_pointer(rx->aead[new_passive], tmp2); 1294 refcount_set(&tmp1->refcnt, 1); 1295 aligned = true; 1296 pr_info_ratelimited("%s: key[%d] -> key[%d]\n", rx->name, key.pending, 1297 new_pending); 1298 1299 exit: 1300 spin_unlock(&rx->lock); 1301 return aligned; 1302 } 1303 1304 /** 1305 * tipc_crypto_key_pick_tx - Pick one TX key for message decryption 1306 * @tx: TX crypto handle 1307 * @rx: RX crypto handle (can be NULL) 1308 * @skb: the message skb which will be decrypted later 1309 * @tx_key: peer TX key id 1310 * 1311 * This function looks up the existing TX keys and pick one which is suitable 1312 * for the message decryption, that must be a cluster key and not used before 1313 * on the same message (i.e. recursive). 1314 * 1315 * Return: the TX AEAD key handle in case of success, otherwise NULL 1316 */ 1317 static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx, 1318 struct tipc_crypto *rx, 1319 struct sk_buff *skb, 1320 u8 tx_key) 1321 { 1322 struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(skb); 1323 struct tipc_aead *aead = NULL; 1324 struct tipc_key key = tx->key; 1325 u8 k, i = 0; 1326 1327 /* Initialize data if not yet */ 1328 if (!skb_cb->tx_clone_deferred) { 1329 skb_cb->tx_clone_deferred = 1; 1330 memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx)); 1331 } 1332 1333 skb_cb->tx_clone_ctx.rx = rx; 1334 if (++skb_cb->tx_clone_ctx.recurs > 2) 1335 return NULL; 1336 1337 /* Pick one TX key */ 1338 spin_lock(&tx->lock); 1339 if (tx_key == KEY_MASTER) { 1340 aead = tipc_aead_rcu_ptr(tx->aead[KEY_MASTER], &tx->lock); 1341 goto done; 1342 } 1343 do { 1344 k = (i == 0) ? key.pending : 1345 ((i == 1) ? key.active : key.passive); 1346 if (!k) 1347 continue; 1348 aead = tipc_aead_rcu_ptr(tx->aead[k], &tx->lock); 1349 if (!aead) 1350 continue; 1351 if (aead->mode != CLUSTER_KEY || 1352 aead == skb_cb->tx_clone_ctx.last) { 1353 aead = NULL; 1354 continue; 1355 } 1356 /* Ok, found one cluster key */ 1357 skb_cb->tx_clone_ctx.last = aead; 1358 WARN_ON(skb->next); 1359 skb->next = skb_clone(skb, GFP_ATOMIC); 1360 if (unlikely(!skb->next)) 1361 pr_warn("Failed to clone skb for next round if any\n"); 1362 break; 1363 } while (++i < 3); 1364 1365 done: 1366 if (likely(aead)) 1367 WARN_ON(!refcount_inc_not_zero(&aead->refcnt)); 1368 spin_unlock(&tx->lock); 1369 1370 return aead; 1371 } 1372 1373 /** 1374 * tipc_crypto_key_synch: Synch own key data according to peer key status 1375 * @rx: RX crypto handle 1376 * @skb: TIPCv2 message buffer (incl. the ehdr from peer) 1377 * 1378 * This function updates the peer node related data as the peer RX active key 1379 * has changed, so the number of TX keys' users on this node are increased and 1380 * decreased correspondingly. 1381 * 1382 * It also considers if peer has no key, then we need to make own master key 1383 * (if any) taking over i.e. starting grace period and also trigger key 1384 * distributing process. 1385 * 1386 * The "per-peer" sndnxt is also reset when the peer key has switched. 1387 */ 1388 static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb) 1389 { 1390 struct tipc_ehdr *ehdr = (struct tipc_ehdr *)skb_network_header(skb); 1391 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; 1392 struct tipc_msg *hdr = buf_msg(skb); 1393 u32 self = tipc_own_addr(rx->net); 1394 u8 cur, new; 1395 unsigned long delay; 1396 1397 /* Update RX 'key_master' flag according to peer, also mark "legacy" if 1398 * a peer has no master key. 1399 */ 1400 rx->key_master = ehdr->master_key; 1401 if (!rx->key_master) 1402 tx->legacy_user = 1; 1403 1404 /* For later cases, apply only if message is destined to this node */ 1405 if (!ehdr->destined || msg_short(hdr) || msg_destnode(hdr) != self) 1406 return; 1407 1408 /* Case 1: Peer has no keys, let's make master key take over */ 1409 if (ehdr->rx_nokey) { 1410 /* Set or extend grace period */ 1411 tx->timer2 = jiffies; 1412 /* Schedule key distributing for the peer if not yet */ 1413 if (tx->key.keys && 1414 !atomic_cmpxchg(&rx->key_distr, 0, KEY_DISTR_SCHED)) { 1415 get_random_bytes(&delay, 2); 1416 delay %= 5; 1417 delay = msecs_to_jiffies(500 * ++delay); 1418 if (queue_delayed_work(tx->wq, &rx->work, delay)) 1419 tipc_node_get(rx->node); 1420 } 1421 } else { 1422 /* Cancel a pending key distributing if any */ 1423 atomic_xchg(&rx->key_distr, 0); 1424 } 1425 1426 /* Case 2: Peer RX active key has changed, let's update own TX users */ 1427 cur = atomic_read(&rx->peer_rx_active); 1428 new = ehdr->rx_key_active; 1429 if (tx->key.keys && 1430 cur != new && 1431 atomic_cmpxchg(&rx->peer_rx_active, cur, new) == cur) { 1432 if (new) 1433 tipc_aead_users_inc(tx->aead[new], INT_MAX); 1434 if (cur) 1435 tipc_aead_users_dec(tx->aead[cur], 0); 1436 1437 atomic64_set(&rx->sndnxt, 0); 1438 /* Mark the point TX key users changed */ 1439 tx->timer1 = jiffies; 1440 1441 pr_debug("%s: key users changed %d-- %d++, peer %s\n", 1442 tx->name, cur, new, rx->name); 1443 } 1444 } 1445 1446 static int tipc_crypto_key_revoke(struct net *net, u8 tx_key) 1447 { 1448 struct tipc_crypto *tx = tipc_net(net)->crypto_tx; 1449 struct tipc_key key; 1450 1451 spin_lock(&tx->lock); 1452 key = tx->key; 1453 WARN_ON(!key.active || tx_key != key.active); 1454 1455 /* Free the active key */ 1456 tipc_crypto_key_set_state(tx, key.passive, 0, key.pending); 1457 tipc_crypto_key_detach(tx->aead[key.active], &tx->lock); 1458 spin_unlock(&tx->lock); 1459 1460 pr_warn("%s: key is revoked\n", tx->name); 1461 return -EKEYREVOKED; 1462 } 1463 1464 int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net, 1465 struct tipc_node *node) 1466 { 1467 struct tipc_crypto *c; 1468 1469 if (*crypto) 1470 return -EEXIST; 1471 1472 /* Allocate crypto */ 1473 c = kzalloc(sizeof(*c), GFP_ATOMIC); 1474 if (!c) 1475 return -ENOMEM; 1476 1477 /* Allocate workqueue on TX */ 1478 if (!node) { 1479 c->wq = alloc_ordered_workqueue("tipc_crypto", 0); 1480 if (!c->wq) { 1481 kfree(c); 1482 return -ENOMEM; 1483 } 1484 } 1485 1486 /* Allocate statistic structure */ 1487 c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC); 1488 if (!c->stats) { 1489 if (c->wq) 1490 destroy_workqueue(c->wq); 1491 kfree_sensitive(c); 1492 return -ENOMEM; 1493 } 1494 1495 c->flags = 0; 1496 c->net = net; 1497 c->node = node; 1498 get_random_bytes(&c->key_gen, 2); 1499 tipc_crypto_key_set_state(c, 0, 0, 0); 1500 atomic_set(&c->key_distr, 0); 1501 atomic_set(&c->peer_rx_active, 0); 1502 atomic64_set(&c->sndnxt, 0); 1503 c->timer1 = jiffies; 1504 c->timer2 = jiffies; 1505 c->rekeying_intv = TIPC_REKEYING_INTV_DEF; 1506 spin_lock_init(&c->lock); 1507 scnprintf(c->name, 48, "%s(%s)", (is_rx(c)) ? "RX" : "TX", 1508 (is_rx(c)) ? tipc_node_get_id_str(c->node) : 1509 tipc_own_id_string(c->net)); 1510 1511 if (is_rx(c)) 1512 INIT_DELAYED_WORK(&c->work, tipc_crypto_work_rx); 1513 else 1514 INIT_DELAYED_WORK(&c->work, tipc_crypto_work_tx); 1515 1516 *crypto = c; 1517 return 0; 1518 } 1519 1520 void tipc_crypto_stop(struct tipc_crypto **crypto) 1521 { 1522 struct tipc_crypto *c = *crypto; 1523 u8 k; 1524 1525 if (!c) 1526 return; 1527 1528 /* Flush any queued works & destroy wq */ 1529 if (is_tx(c)) { 1530 c->rekeying_intv = 0; 1531 cancel_delayed_work_sync(&c->work); 1532 destroy_workqueue(c->wq); 1533 } 1534 1535 /* Release AEAD keys */ 1536 rcu_read_lock(); 1537 for (k = KEY_MIN; k <= KEY_MAX; k++) 1538 tipc_aead_put(rcu_dereference(c->aead[k])); 1539 rcu_read_unlock(); 1540 pr_debug("%s: has been stopped\n", c->name); 1541 1542 /* Free this crypto statistics */ 1543 free_percpu(c->stats); 1544 1545 *crypto = NULL; 1546 kfree_sensitive(c); 1547 } 1548 1549 void tipc_crypto_timeout(struct tipc_crypto *rx) 1550 { 1551 struct tipc_net *tn = tipc_net(rx->net); 1552 struct tipc_crypto *tx = tn->crypto_tx; 1553 struct tipc_key key; 1554 int cmd; 1555 1556 /* TX pending: taking all users & stable -> active */ 1557 spin_lock(&tx->lock); 1558 key = tx->key; 1559 if (key.active && tipc_aead_users(tx->aead[key.active]) > 0) 1560 goto s1; 1561 if (!key.pending || tipc_aead_users(tx->aead[key.pending]) <= 0) 1562 goto s1; 1563 if (time_before(jiffies, tx->timer1 + TIPC_TX_LASTING_TIME)) 1564 goto s1; 1565 1566 tipc_crypto_key_set_state(tx, key.passive, key.pending, 0); 1567 if (key.active) 1568 tipc_crypto_key_detach(tx->aead[key.active], &tx->lock); 1569 this_cpu_inc(tx->stats->stat[STAT_SWITCHES]); 1570 pr_info("%s: key[%d] is activated\n", tx->name, key.pending); 1571 1572 s1: 1573 spin_unlock(&tx->lock); 1574 1575 /* RX pending: having user -> active */ 1576 spin_lock(&rx->lock); 1577 key = rx->key; 1578 if (!key.pending || tipc_aead_users(rx->aead[key.pending]) <= 0) 1579 goto s2; 1580 1581 if (key.active) 1582 key.passive = key.active; 1583 key.active = key.pending; 1584 rx->timer2 = jiffies; 1585 tipc_crypto_key_set_state(rx, key.passive, key.active, 0); 1586 this_cpu_inc(rx->stats->stat[STAT_SWITCHES]); 1587 pr_info("%s: key[%d] is activated\n", rx->name, key.pending); 1588 goto s5; 1589 1590 s2: 1591 /* RX pending: not working -> remove */ 1592 if (!key.pending || tipc_aead_users(rx->aead[key.pending]) > -10) 1593 goto s3; 1594 1595 tipc_crypto_key_set_state(rx, key.passive, key.active, 0); 1596 tipc_crypto_key_detach(rx->aead[key.pending], &rx->lock); 1597 pr_debug("%s: key[%d] is removed\n", rx->name, key.pending); 1598 goto s5; 1599 1600 s3: 1601 /* RX active: timed out or no user -> pending */ 1602 if (!key.active) 1603 goto s4; 1604 if (time_before(jiffies, rx->timer1 + TIPC_RX_ACTIVE_LIM) && 1605 tipc_aead_users(rx->aead[key.active]) > 0) 1606 goto s4; 1607 1608 if (key.pending) 1609 key.passive = key.active; 1610 else 1611 key.pending = key.active; 1612 rx->timer2 = jiffies; 1613 tipc_crypto_key_set_state(rx, key.passive, 0, key.pending); 1614 tipc_aead_users_set(rx->aead[key.pending], 0); 1615 pr_debug("%s: key[%d] is deactivated\n", rx->name, key.active); 1616 goto s5; 1617 1618 s4: 1619 /* RX passive: outdated or not working -> free */ 1620 if (!key.passive) 1621 goto s5; 1622 if (time_before(jiffies, rx->timer2 + TIPC_RX_PASSIVE_LIM) && 1623 tipc_aead_users(rx->aead[key.passive]) > -10) 1624 goto s5; 1625 1626 tipc_crypto_key_set_state(rx, 0, key.active, key.pending); 1627 tipc_crypto_key_detach(rx->aead[key.passive], &rx->lock); 1628 pr_debug("%s: key[%d] is freed\n", rx->name, key.passive); 1629 1630 s5: 1631 spin_unlock(&rx->lock); 1632 1633 /* Relax it here, the flag will be set again if it really is, but only 1634 * when we are not in grace period for safety! 1635 */ 1636 if (time_after(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) 1637 tx->legacy_user = 0; 1638 1639 /* Limit max_tfms & do debug commands if needed */ 1640 if (likely(sysctl_tipc_max_tfms <= TIPC_MAX_TFMS_LIM)) 1641 return; 1642 1643 cmd = sysctl_tipc_max_tfms; 1644 sysctl_tipc_max_tfms = TIPC_MAX_TFMS_DEF; 1645 tipc_crypto_do_cmd(rx->net, cmd); 1646 } 1647 1648 static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb, 1649 struct tipc_bearer *b, 1650 struct tipc_media_addr *dst, 1651 struct tipc_node *__dnode, u8 type) 1652 { 1653 struct sk_buff *skb; 1654 1655 skb = skb_clone(_skb, GFP_ATOMIC); 1656 if (skb) { 1657 TIPC_SKB_CB(skb)->xmit_type = type; 1658 tipc_crypto_xmit(net, &skb, b, dst, __dnode); 1659 if (skb) 1660 b->media->send_msg(net, skb, b, dst); 1661 } 1662 } 1663 1664 /** 1665 * tipc_crypto_xmit - Build & encrypt TIPC message for xmit 1666 * @net: struct net 1667 * @skb: input/output message skb pointer 1668 * @b: bearer used for xmit later 1669 * @dst: destination media address 1670 * @__dnode: destination node for reference if any 1671 * 1672 * First, build an encryption message header on the top of the message, then 1673 * encrypt the original TIPC message by using the pending, master or active 1674 * key with this preference order. 1675 * If the encryption is successful, the encrypted skb is returned directly or 1676 * via the callback. 1677 * Otherwise, the skb is freed! 1678 * 1679 * Return: 1680 * * 0 : the encryption has succeeded (or no encryption) 1681 * * -EINPROGRESS/-EBUSY : the encryption is ongoing, a callback will be made 1682 * * -ENOKEK : the encryption has failed due to no key 1683 * * -EKEYREVOKED : the encryption has failed due to key revoked 1684 * * -ENOMEM : the encryption has failed due to no memory 1685 * * < 0 : the encryption has failed due to other reasons 1686 */ 1687 int tipc_crypto_xmit(struct net *net, struct sk_buff **skb, 1688 struct tipc_bearer *b, struct tipc_media_addr *dst, 1689 struct tipc_node *__dnode) 1690 { 1691 struct tipc_crypto *__rx = tipc_node_crypto_rx(__dnode); 1692 struct tipc_crypto *tx = tipc_net(net)->crypto_tx; 1693 struct tipc_crypto_stats __percpu *stats = tx->stats; 1694 struct tipc_msg *hdr = buf_msg(*skb); 1695 struct tipc_key key = tx->key; 1696 struct tipc_aead *aead = NULL; 1697 u32 user = msg_user(hdr); 1698 u32 type = msg_type(hdr); 1699 int rc = -ENOKEY; 1700 u8 tx_key = 0; 1701 1702 /* No encryption? */ 1703 if (!tx->working) 1704 return 0; 1705 1706 /* Pending key if peer has active on it or probing time */ 1707 if (unlikely(key.pending)) { 1708 tx_key = key.pending; 1709 if (!tx->key_master && !key.active) 1710 goto encrypt; 1711 if (__rx && atomic_read(&__rx->peer_rx_active) == tx_key) 1712 goto encrypt; 1713 if (TIPC_SKB_CB(*skb)->xmit_type == SKB_PROBING) { 1714 pr_debug("%s: probing for key[%d]\n", tx->name, 1715 key.pending); 1716 goto encrypt; 1717 } 1718 if (user == LINK_CONFIG || user == LINK_PROTOCOL) 1719 tipc_crypto_clone_msg(net, *skb, b, dst, __dnode, 1720 SKB_PROBING); 1721 } 1722 1723 /* Master key if this is a *vital* message or in grace period */ 1724 if (tx->key_master) { 1725 tx_key = KEY_MASTER; 1726 if (!key.active) 1727 goto encrypt; 1728 if (TIPC_SKB_CB(*skb)->xmit_type == SKB_GRACING) { 1729 pr_debug("%s: gracing for msg (%d %d)\n", tx->name, 1730 user, type); 1731 goto encrypt; 1732 } 1733 if (user == LINK_CONFIG || 1734 (user == LINK_PROTOCOL && type == RESET_MSG) || 1735 (user == MSG_CRYPTO && type == KEY_DISTR_MSG) || 1736 time_before(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) { 1737 if (__rx && __rx->key_master && 1738 !atomic_read(&__rx->peer_rx_active)) 1739 goto encrypt; 1740 if (!__rx) { 1741 if (likely(!tx->legacy_user)) 1742 goto encrypt; 1743 tipc_crypto_clone_msg(net, *skb, b, dst, 1744 __dnode, SKB_GRACING); 1745 } 1746 } 1747 } 1748 1749 /* Else, use the active key if any */ 1750 if (likely(key.active)) { 1751 tx_key = key.active; 1752 goto encrypt; 1753 } 1754 1755 goto exit; 1756 1757 encrypt: 1758 aead = tipc_aead_get(tx->aead[tx_key]); 1759 if (unlikely(!aead)) 1760 goto exit; 1761 rc = tipc_ehdr_build(net, aead, tx_key, *skb, __rx); 1762 if (likely(rc > 0)) 1763 rc = tipc_aead_encrypt(aead, *skb, b, dst, __dnode); 1764 1765 exit: 1766 switch (rc) { 1767 case 0: 1768 this_cpu_inc(stats->stat[STAT_OK]); 1769 break; 1770 case -EINPROGRESS: 1771 case -EBUSY: 1772 this_cpu_inc(stats->stat[STAT_ASYNC]); 1773 *skb = NULL; 1774 return rc; 1775 default: 1776 this_cpu_inc(stats->stat[STAT_NOK]); 1777 if (rc == -ENOKEY) 1778 this_cpu_inc(stats->stat[STAT_NOKEYS]); 1779 else if (rc == -EKEYREVOKED) 1780 this_cpu_inc(stats->stat[STAT_BADKEYS]); 1781 kfree_skb(*skb); 1782 *skb = NULL; 1783 break; 1784 } 1785 1786 tipc_aead_put(aead); 1787 return rc; 1788 } 1789 1790 /** 1791 * tipc_crypto_rcv - Decrypt an encrypted TIPC message from peer 1792 * @net: struct net 1793 * @rx: RX crypto handle 1794 * @skb: input/output message skb pointer 1795 * @b: bearer where the message has been received 1796 * 1797 * If the decryption is successful, the decrypted skb is returned directly or 1798 * as the callback, the encryption header and auth tag will be trimed out 1799 * before forwarding to tipc_rcv() via the tipc_crypto_rcv_complete(). 1800 * Otherwise, the skb will be freed! 1801 * Note: RX key(s) can be re-aligned, or in case of no key suitable, TX 1802 * cluster key(s) can be taken for decryption (- recursive). 1803 * 1804 * Return: 1805 * * 0 : the decryption has successfully completed 1806 * * -EINPROGRESS/-EBUSY : the decryption is ongoing, a callback will be made 1807 * * -ENOKEY : the decryption has failed due to no key 1808 * * -EBADMSG : the decryption has failed due to bad message 1809 * * -ENOMEM : the decryption has failed due to no memory 1810 * * < 0 : the decryption has failed due to other reasons 1811 */ 1812 int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx, 1813 struct sk_buff **skb, struct tipc_bearer *b) 1814 { 1815 struct tipc_crypto *tx = tipc_net(net)->crypto_tx; 1816 struct tipc_crypto_stats __percpu *stats; 1817 struct tipc_aead *aead = NULL; 1818 struct tipc_key key; 1819 int rc = -ENOKEY; 1820 u8 tx_key, n; 1821 1822 tx_key = ((struct tipc_ehdr *)(*skb)->data)->tx_key; 1823 1824 /* New peer? 1825 * Let's try with TX key (i.e. cluster mode) & verify the skb first! 1826 */ 1827 if (unlikely(!rx || tx_key == KEY_MASTER)) 1828 goto pick_tx; 1829 1830 /* Pick RX key according to TX key if any */ 1831 key = rx->key; 1832 if (tx_key == key.active || tx_key == key.pending || 1833 tx_key == key.passive) 1834 goto decrypt; 1835 1836 /* Unknown key, let's try to align RX key(s) */ 1837 if (tipc_crypto_key_try_align(rx, tx_key)) 1838 goto decrypt; 1839 1840 pick_tx: 1841 /* No key suitable? Try to pick one from TX... */ 1842 aead = tipc_crypto_key_pick_tx(tx, rx, *skb, tx_key); 1843 if (aead) 1844 goto decrypt; 1845 goto exit; 1846 1847 decrypt: 1848 rcu_read_lock(); 1849 if (!aead) 1850 aead = tipc_aead_get(rx->aead[tx_key]); 1851 rc = tipc_aead_decrypt(net, aead, *skb, b); 1852 rcu_read_unlock(); 1853 1854 exit: 1855 stats = ((rx) ?: tx)->stats; 1856 switch (rc) { 1857 case 0: 1858 this_cpu_inc(stats->stat[STAT_OK]); 1859 break; 1860 case -EINPROGRESS: 1861 case -EBUSY: 1862 this_cpu_inc(stats->stat[STAT_ASYNC]); 1863 *skb = NULL; 1864 return rc; 1865 default: 1866 this_cpu_inc(stats->stat[STAT_NOK]); 1867 if (rc == -ENOKEY) { 1868 kfree_skb(*skb); 1869 *skb = NULL; 1870 if (rx) { 1871 /* Mark rx->nokey only if we dont have a 1872 * pending received session key, nor a newer 1873 * one i.e. in the next slot. 1874 */ 1875 n = key_next(tx_key); 1876 rx->nokey = !(rx->skey || 1877 rcu_access_pointer(rx->aead[n])); 1878 pr_debug_ratelimited("%s: nokey %d, key %d/%x\n", 1879 rx->name, rx->nokey, 1880 tx_key, rx->key.keys); 1881 tipc_node_put(rx->node); 1882 } 1883 this_cpu_inc(stats->stat[STAT_NOKEYS]); 1884 return rc; 1885 } else if (rc == -EBADMSG) { 1886 this_cpu_inc(stats->stat[STAT_BADMSGS]); 1887 } 1888 break; 1889 } 1890 1891 tipc_crypto_rcv_complete(net, aead, b, skb, rc); 1892 return rc; 1893 } 1894 1895 static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead, 1896 struct tipc_bearer *b, 1897 struct sk_buff **skb, int err) 1898 { 1899 struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(*skb); 1900 struct tipc_crypto *rx = aead->crypto; 1901 struct tipc_aead *tmp = NULL; 1902 struct tipc_ehdr *ehdr; 1903 struct tipc_node *n; 1904 1905 /* Is this completed by TX? */ 1906 if (unlikely(is_tx(aead->crypto))) { 1907 rx = skb_cb->tx_clone_ctx.rx; 1908 pr_debug("TX->RX(%s): err %d, aead %p, skb->next %p, flags %x\n", 1909 (rx) ? tipc_node_get_id_str(rx->node) : "-", err, aead, 1910 (*skb)->next, skb_cb->flags); 1911 pr_debug("skb_cb [recurs %d, last %p], tx->aead [%p %p %p]\n", 1912 skb_cb->tx_clone_ctx.recurs, skb_cb->tx_clone_ctx.last, 1913 aead->crypto->aead[1], aead->crypto->aead[2], 1914 aead->crypto->aead[3]); 1915 if (unlikely(err)) { 1916 if (err == -EBADMSG && (*skb)->next) 1917 tipc_rcv(net, (*skb)->next, b); 1918 goto free_skb; 1919 } 1920 1921 if (likely((*skb)->next)) { 1922 kfree_skb((*skb)->next); 1923 (*skb)->next = NULL; 1924 } 1925 ehdr = (struct tipc_ehdr *)(*skb)->data; 1926 if (!rx) { 1927 WARN_ON(ehdr->user != LINK_CONFIG); 1928 n = tipc_node_create(net, 0, ehdr->id, 0xffffu, 0, 1929 true); 1930 rx = tipc_node_crypto_rx(n); 1931 if (unlikely(!rx)) 1932 goto free_skb; 1933 } 1934 1935 /* Ignore cloning if it was TX master key */ 1936 if (ehdr->tx_key == KEY_MASTER) 1937 goto rcv; 1938 if (tipc_aead_clone(&tmp, aead) < 0) 1939 goto rcv; 1940 WARN_ON(!refcount_inc_not_zero(&tmp->refcnt)); 1941 if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key, false) < 0) { 1942 tipc_aead_free(&tmp->rcu); 1943 goto rcv; 1944 } 1945 tipc_aead_put(aead); 1946 aead = tmp; 1947 } 1948 1949 if (unlikely(err)) { 1950 tipc_aead_users_dec((struct tipc_aead __force __rcu *)aead, INT_MIN); 1951 goto free_skb; 1952 } 1953 1954 /* Set the RX key's user */ 1955 tipc_aead_users_set((struct tipc_aead __force __rcu *)aead, 1); 1956 1957 /* Mark this point, RX works */ 1958 rx->timer1 = jiffies; 1959 1960 rcv: 1961 /* Remove ehdr & auth. tag prior to tipc_rcv() */ 1962 ehdr = (struct tipc_ehdr *)(*skb)->data; 1963 1964 /* Mark this point, RX passive still works */ 1965 if (rx->key.passive && ehdr->tx_key == rx->key.passive) 1966 rx->timer2 = jiffies; 1967 1968 skb_reset_network_header(*skb); 1969 skb_pull(*skb, tipc_ehdr_size(ehdr)); 1970 pskb_trim(*skb, (*skb)->len - aead->authsize); 1971 1972 /* Validate TIPCv2 message */ 1973 if (unlikely(!tipc_msg_validate(skb))) { 1974 pr_err_ratelimited("Packet dropped after decryption!\n"); 1975 goto free_skb; 1976 } 1977 1978 /* Ok, everything's fine, try to synch own keys according to peers' */ 1979 tipc_crypto_key_synch(rx, *skb); 1980 1981 /* Mark skb decrypted */ 1982 skb_cb->decrypted = 1; 1983 1984 /* Clear clone cxt if any */ 1985 if (likely(!skb_cb->tx_clone_deferred)) 1986 goto exit; 1987 skb_cb->tx_clone_deferred = 0; 1988 memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx)); 1989 goto exit; 1990 1991 free_skb: 1992 kfree_skb(*skb); 1993 *skb = NULL; 1994 1995 exit: 1996 tipc_aead_put(aead); 1997 if (rx) 1998 tipc_node_put(rx->node); 1999 } 2000 2001 static void tipc_crypto_do_cmd(struct net *net, int cmd) 2002 { 2003 struct tipc_net *tn = tipc_net(net); 2004 struct tipc_crypto *tx = tn->crypto_tx, *rx; 2005 struct list_head *p; 2006 unsigned int stat; 2007 int i, j, cpu; 2008 char buf[200]; 2009 2010 /* Currently only one command is supported */ 2011 switch (cmd) { 2012 case 0xfff1: 2013 goto print_stats; 2014 default: 2015 return; 2016 } 2017 2018 print_stats: 2019 /* Print a header */ 2020 pr_info("\n=============== TIPC Crypto Statistics ===============\n\n"); 2021 2022 /* Print key status */ 2023 pr_info("Key status:\n"); 2024 pr_info("TX(%7.7s)\n%s", tipc_own_id_string(net), 2025 tipc_crypto_key_dump(tx, buf)); 2026 2027 rcu_read_lock(); 2028 for (p = tn->node_list.next; p != &tn->node_list; p = p->next) { 2029 rx = tipc_node_crypto_rx_by_list(p); 2030 pr_info("RX(%7.7s)\n%s", tipc_node_get_id_str(rx->node), 2031 tipc_crypto_key_dump(rx, buf)); 2032 } 2033 rcu_read_unlock(); 2034 2035 /* Print crypto statistics */ 2036 for (i = 0, j = 0; i < MAX_STATS; i++) 2037 j += scnprintf(buf + j, 200 - j, "|%11s ", hstats[i]); 2038 pr_info("Counter %s", buf); 2039 2040 memset(buf, '-', 115); 2041 buf[115] = '\0'; 2042 pr_info("%s\n", buf); 2043 2044 j = scnprintf(buf, 200, "TX(%7.7s) ", tipc_own_id_string(net)); 2045 for_each_possible_cpu(cpu) { 2046 for (i = 0; i < MAX_STATS; i++) { 2047 stat = per_cpu_ptr(tx->stats, cpu)->stat[i]; 2048 j += scnprintf(buf + j, 200 - j, "|%11d ", stat); 2049 } 2050 pr_info("%s", buf); 2051 j = scnprintf(buf, 200, "%12s", " "); 2052 } 2053 2054 rcu_read_lock(); 2055 for (p = tn->node_list.next; p != &tn->node_list; p = p->next) { 2056 rx = tipc_node_crypto_rx_by_list(p); 2057 j = scnprintf(buf, 200, "RX(%7.7s) ", 2058 tipc_node_get_id_str(rx->node)); 2059 for_each_possible_cpu(cpu) { 2060 for (i = 0; i < MAX_STATS; i++) { 2061 stat = per_cpu_ptr(rx->stats, cpu)->stat[i]; 2062 j += scnprintf(buf + j, 200 - j, "|%11d ", 2063 stat); 2064 } 2065 pr_info("%s", buf); 2066 j = scnprintf(buf, 200, "%12s", " "); 2067 } 2068 } 2069 rcu_read_unlock(); 2070 2071 pr_info("\n======================== Done ========================\n"); 2072 } 2073 2074 static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf) 2075 { 2076 struct tipc_key key = c->key; 2077 struct tipc_aead *aead; 2078 int k, i = 0; 2079 char *s; 2080 2081 for (k = KEY_MIN; k <= KEY_MAX; k++) { 2082 if (k == KEY_MASTER) { 2083 if (is_rx(c)) 2084 continue; 2085 if (time_before(jiffies, 2086 c->timer2 + TIPC_TX_GRACE_PERIOD)) 2087 s = "ACT"; 2088 else 2089 s = "PAS"; 2090 } else { 2091 if (k == key.passive) 2092 s = "PAS"; 2093 else if (k == key.active) 2094 s = "ACT"; 2095 else if (k == key.pending) 2096 s = "PEN"; 2097 else 2098 s = "-"; 2099 } 2100 i += scnprintf(buf + i, 200 - i, "\tKey%d: %s", k, s); 2101 2102 rcu_read_lock(); 2103 aead = rcu_dereference(c->aead[k]); 2104 if (aead) 2105 i += scnprintf(buf + i, 200 - i, 2106 "{\"0x...%s\", \"%s\"}/%d:%d", 2107 aead->hint, 2108 (aead->mode == CLUSTER_KEY) ? "c" : "p", 2109 atomic_read(&aead->users), 2110 refcount_read(&aead->refcnt)); 2111 rcu_read_unlock(); 2112 i += scnprintf(buf + i, 200 - i, "\n"); 2113 } 2114 2115 if (is_rx(c)) 2116 i += scnprintf(buf + i, 200 - i, "\tPeer RX active: %d\n", 2117 atomic_read(&c->peer_rx_active)); 2118 2119 return buf; 2120 } 2121 2122 static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new, 2123 char *buf) 2124 { 2125 struct tipc_key *key = &old; 2126 int k, i = 0; 2127 char *s; 2128 2129 /* Output format: "[%s %s %s] -> [%s %s %s]", max len = 32 */ 2130 again: 2131 i += scnprintf(buf + i, 32 - i, "["); 2132 for (k = KEY_1; k <= KEY_3; k++) { 2133 if (k == key->passive) 2134 s = "pas"; 2135 else if (k == key->active) 2136 s = "act"; 2137 else if (k == key->pending) 2138 s = "pen"; 2139 else 2140 s = "-"; 2141 i += scnprintf(buf + i, 32 - i, 2142 (k != KEY_3) ? "%s " : "%s", s); 2143 } 2144 if (key != &new) { 2145 i += scnprintf(buf + i, 32 - i, "] -> "); 2146 key = &new; 2147 goto again; 2148 } 2149 i += scnprintf(buf + i, 32 - i, "]"); 2150 return buf; 2151 } 2152 2153 /** 2154 * tipc_crypto_msg_rcv - Common 'MSG_CRYPTO' processing point 2155 * @net: the struct net 2156 * @skb: the receiving message buffer 2157 */ 2158 void tipc_crypto_msg_rcv(struct net *net, struct sk_buff *skb) 2159 { 2160 struct tipc_crypto *rx; 2161 struct tipc_msg *hdr; 2162 2163 if (unlikely(skb_linearize(skb))) 2164 goto exit; 2165 2166 hdr = buf_msg(skb); 2167 rx = tipc_node_crypto_rx_by_addr(net, msg_prevnode(hdr)); 2168 if (unlikely(!rx)) 2169 goto exit; 2170 2171 switch (msg_type(hdr)) { 2172 case KEY_DISTR_MSG: 2173 if (tipc_crypto_key_rcv(rx, hdr)) 2174 goto exit; 2175 break; 2176 default: 2177 break; 2178 } 2179 2180 tipc_node_put(rx->node); 2181 2182 exit: 2183 kfree_skb(skb); 2184 } 2185 2186 /** 2187 * tipc_crypto_key_distr - Distribute a TX key 2188 * @tx: the TX crypto 2189 * @key: the key's index 2190 * @dest: the destination tipc node, = NULL if distributing to all nodes 2191 * 2192 * Return: 0 in case of success, otherwise < 0 2193 */ 2194 int tipc_crypto_key_distr(struct tipc_crypto *tx, u8 key, 2195 struct tipc_node *dest) 2196 { 2197 struct tipc_aead *aead; 2198 u32 dnode = tipc_node_get_addr(dest); 2199 int rc = -ENOKEY; 2200 2201 if (!sysctl_tipc_key_exchange_enabled) 2202 return 0; 2203 2204 if (key) { 2205 rcu_read_lock(); 2206 aead = tipc_aead_get(tx->aead[key]); 2207 if (likely(aead)) { 2208 rc = tipc_crypto_key_xmit(tx->net, aead->key, 2209 aead->gen, aead->mode, 2210 dnode); 2211 tipc_aead_put(aead); 2212 } 2213 rcu_read_unlock(); 2214 } 2215 2216 return rc; 2217 } 2218 2219 /** 2220 * tipc_crypto_key_xmit - Send a session key 2221 * @net: the struct net 2222 * @skey: the session key to be sent 2223 * @gen: the key's generation 2224 * @mode: the key's mode 2225 * @dnode: the destination node address, = 0 if broadcasting to all nodes 2226 * 2227 * The session key 'skey' is packed in a TIPC v2 'MSG_CRYPTO/KEY_DISTR_MSG' 2228 * as its data section, then xmit-ed through the uc/bc link. 2229 * 2230 * Return: 0 in case of success, otherwise < 0 2231 */ 2232 static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey, 2233 u16 gen, u8 mode, u32 dnode) 2234 { 2235 struct sk_buff_head pkts; 2236 struct tipc_msg *hdr; 2237 struct sk_buff *skb; 2238 u16 size, cong_link_cnt; 2239 u8 *data; 2240 int rc; 2241 2242 size = tipc_aead_key_size(skey); 2243 skb = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC); 2244 if (!skb) 2245 return -ENOMEM; 2246 2247 hdr = buf_msg(skb); 2248 tipc_msg_init(tipc_own_addr(net), hdr, MSG_CRYPTO, KEY_DISTR_MSG, 2249 INT_H_SIZE, dnode); 2250 msg_set_size(hdr, INT_H_SIZE + size); 2251 msg_set_key_gen(hdr, gen); 2252 msg_set_key_mode(hdr, mode); 2253 2254 data = msg_data(hdr); 2255 *((__be32 *)(data + TIPC_AEAD_ALG_NAME)) = htonl(skey->keylen); 2256 memcpy(data, skey->alg_name, TIPC_AEAD_ALG_NAME); 2257 memcpy(data + TIPC_AEAD_ALG_NAME + sizeof(__be32), skey->key, 2258 skey->keylen); 2259 2260 __skb_queue_head_init(&pkts); 2261 __skb_queue_tail(&pkts, skb); 2262 if (dnode) 2263 rc = tipc_node_xmit(net, &pkts, dnode, 0); 2264 else 2265 rc = tipc_bcast_xmit(net, &pkts, &cong_link_cnt); 2266 2267 return rc; 2268 } 2269 2270 /** 2271 * tipc_crypto_key_rcv - Receive a session key 2272 * @rx: the RX crypto 2273 * @hdr: the TIPC v2 message incl. the receiving session key in its data 2274 * 2275 * This function retrieves the session key in the message from peer, then 2276 * schedules a RX work to attach the key to the corresponding RX crypto. 2277 * 2278 * Return: "true" if the key has been scheduled for attaching, otherwise 2279 * "false". 2280 */ 2281 static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr) 2282 { 2283 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; 2284 struct tipc_aead_key *skey = NULL; 2285 u16 key_gen = msg_key_gen(hdr); 2286 u16 size = msg_data_sz(hdr); 2287 u8 *data = msg_data(hdr); 2288 unsigned int keylen; 2289 2290 /* Verify whether the size can exist in the packet */ 2291 if (unlikely(size < sizeof(struct tipc_aead_key) + TIPC_AEAD_KEYLEN_MIN)) { 2292 pr_debug("%s: message data size is too small\n", rx->name); 2293 goto exit; 2294 } 2295 2296 keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME))); 2297 2298 /* Verify the supplied size values */ 2299 if (unlikely(size != keylen + sizeof(struct tipc_aead_key) || 2300 keylen > TIPC_AEAD_KEY_SIZE_MAX)) { 2301 pr_debug("%s: invalid MSG_CRYPTO key size\n", rx->name); 2302 goto exit; 2303 } 2304 2305 spin_lock(&rx->lock); 2306 if (unlikely(rx->skey || (key_gen == rx->key_gen && rx->key.keys))) { 2307 pr_err("%s: key existed <%p>, gen %d vs %d\n", rx->name, 2308 rx->skey, key_gen, rx->key_gen); 2309 goto exit_unlock; 2310 } 2311 2312 /* Allocate memory for the key */ 2313 skey = kmalloc(size, GFP_ATOMIC); 2314 if (unlikely(!skey)) { 2315 pr_err("%s: unable to allocate memory for skey\n", rx->name); 2316 goto exit_unlock; 2317 } 2318 2319 /* Copy key from msg data */ 2320 skey->keylen = keylen; 2321 memcpy(skey->alg_name, data, TIPC_AEAD_ALG_NAME); 2322 memcpy(skey->key, data + TIPC_AEAD_ALG_NAME + sizeof(__be32), 2323 skey->keylen); 2324 2325 rx->key_gen = key_gen; 2326 rx->skey_mode = msg_key_mode(hdr); 2327 rx->skey = skey; 2328 rx->nokey = 0; 2329 mb(); /* for nokey flag */ 2330 2331 exit_unlock: 2332 spin_unlock(&rx->lock); 2333 2334 exit: 2335 /* Schedule the key attaching on this crypto */ 2336 if (likely(skey && queue_delayed_work(tx->wq, &rx->work, 0))) 2337 return true; 2338 2339 return false; 2340 } 2341 2342 /** 2343 * tipc_crypto_work_rx - Scheduled RX works handler 2344 * @work: the struct RX work 2345 * 2346 * The function processes the previous scheduled works i.e. distributing TX key 2347 * or attaching a received session key on RX crypto. 2348 */ 2349 static void tipc_crypto_work_rx(struct work_struct *work) 2350 { 2351 struct delayed_work *dwork = to_delayed_work(work); 2352 struct tipc_crypto *rx = container_of(dwork, struct tipc_crypto, work); 2353 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; 2354 unsigned long delay = msecs_to_jiffies(5000); 2355 bool resched = false; 2356 u8 key; 2357 int rc; 2358 2359 /* Case 1: Distribute TX key to peer if scheduled */ 2360 if (atomic_cmpxchg(&rx->key_distr, 2361 KEY_DISTR_SCHED, 2362 KEY_DISTR_COMPL) == KEY_DISTR_SCHED) { 2363 /* Always pick the newest one for distributing */ 2364 key = tx->key.pending ?: tx->key.active; 2365 rc = tipc_crypto_key_distr(tx, key, rx->node); 2366 if (unlikely(rc)) 2367 pr_warn("%s: unable to distr key[%d] to %s, err %d\n", 2368 tx->name, key, tipc_node_get_id_str(rx->node), 2369 rc); 2370 2371 /* Sched for key_distr releasing */ 2372 resched = true; 2373 } else { 2374 atomic_cmpxchg(&rx->key_distr, KEY_DISTR_COMPL, 0); 2375 } 2376 2377 /* Case 2: Attach a pending received session key from peer if any */ 2378 if (rx->skey) { 2379 rc = tipc_crypto_key_init(rx, rx->skey, rx->skey_mode, false); 2380 if (unlikely(rc < 0)) 2381 pr_warn("%s: unable to attach received skey, err %d\n", 2382 rx->name, rc); 2383 switch (rc) { 2384 case -EBUSY: 2385 case -ENOMEM: 2386 /* Resched the key attaching */ 2387 resched = true; 2388 break; 2389 default: 2390 synchronize_rcu(); 2391 kfree(rx->skey); 2392 rx->skey = NULL; 2393 break; 2394 } 2395 } 2396 2397 if (resched && queue_delayed_work(tx->wq, &rx->work, delay)) 2398 return; 2399 2400 tipc_node_put(rx->node); 2401 } 2402 2403 /** 2404 * tipc_crypto_rekeying_sched - (Re)schedule rekeying w/o new interval 2405 * @tx: TX crypto 2406 * @changed: if the rekeying needs to be rescheduled with new interval 2407 * @new_intv: new rekeying interval (when "changed" = true) 2408 */ 2409 void tipc_crypto_rekeying_sched(struct tipc_crypto *tx, bool changed, 2410 u32 new_intv) 2411 { 2412 unsigned long delay; 2413 bool now = false; 2414 2415 if (changed) { 2416 if (new_intv == TIPC_REKEYING_NOW) 2417 now = true; 2418 else 2419 tx->rekeying_intv = new_intv; 2420 cancel_delayed_work_sync(&tx->work); 2421 } 2422 2423 if (tx->rekeying_intv || now) { 2424 delay = (now) ? 0 : tx->rekeying_intv * 60 * 1000; 2425 queue_delayed_work(tx->wq, &tx->work, msecs_to_jiffies(delay)); 2426 } 2427 } 2428 2429 /** 2430 * tipc_crypto_work_tx - Scheduled TX works handler 2431 * @work: the struct TX work 2432 * 2433 * The function processes the previous scheduled work, i.e. key rekeying, by 2434 * generating a new session key based on current one, then attaching it to the 2435 * TX crypto and finally distributing it to peers. It also re-schedules the 2436 * rekeying if needed. 2437 */ 2438 static void tipc_crypto_work_tx(struct work_struct *work) 2439 { 2440 struct delayed_work *dwork = to_delayed_work(work); 2441 struct tipc_crypto *tx = container_of(dwork, struct tipc_crypto, work); 2442 struct tipc_aead_key *skey = NULL; 2443 struct tipc_key key = tx->key; 2444 struct tipc_aead *aead; 2445 int rc = -ENOMEM; 2446 2447 if (unlikely(key.pending)) 2448 goto resched; 2449 2450 /* Take current key as a template */ 2451 rcu_read_lock(); 2452 aead = rcu_dereference(tx->aead[key.active ?: KEY_MASTER]); 2453 if (unlikely(!aead)) { 2454 rcu_read_unlock(); 2455 /* At least one key should exist for securing */ 2456 return; 2457 } 2458 2459 /* Lets duplicate it first */ 2460 skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC); 2461 rcu_read_unlock(); 2462 2463 /* Now, generate new key, initiate & distribute it */ 2464 if (likely(skey)) { 2465 rc = tipc_aead_key_generate(skey) ?: 2466 tipc_crypto_key_init(tx, skey, PER_NODE_KEY, false); 2467 if (likely(rc > 0)) 2468 rc = tipc_crypto_key_distr(tx, rc, NULL); 2469 kfree_sensitive(skey); 2470 } 2471 2472 if (unlikely(rc)) 2473 pr_warn_ratelimited("%s: rekeying returns %d\n", tx->name, rc); 2474 2475 resched: 2476 /* Re-schedule rekeying if any */ 2477 tipc_crypto_rekeying_sched(tx, false, 0); 2478 } 2479