Lines Matching +full:tx +full:- +full:rx

1 // SPDX-License-Identifier: GPL-2.0
75 STAT_BADKEYS, /* tx only */
76 STAT_BADMSGS = STAT_BADKEYS, /* rx only */
94 * struct tipc_key - TIPC keys' status indicator
97 * +-----+-----+-----+-----+-----+-----+-----+-----+
99 * +-----+-----+-----+-----+-----+-----+-----+-----+
103 #define KEY_MASK ((1 << KEY_BITS) - 1)
109 passive:2, /* rx only */
113 passive:2, /* rx only */
125 * struct tipc_tfm - TIPC TFM structure to form a list of TFMs
135 * struct tipc_aead - TIPC AEAD key structure
136 * @tfm_entry: per-cpu pointer to one entry in TFM list
139 * @users: the number of the key users (TX/RX)
170 * struct tipc_crypto_stats - TIPC Crypto statistics
178 * struct tipc_crypto - TIPC TX/RX crypto structure
180 * @node: TIPC node (RX)
182 * @peer_rx_active: replicated peer RX active key index
183 * @key_gen: TX/RX key generation
187 * @wq: common workqueue on TX crypto
188 * @work: delayed work sched for TX/RX
193 * @sndnxt: the per-peer sndnxt (TX)
238 /* struct tipc_crypto_tx_ctx - TX context for callbacks */
245 /* struct tipc_crypto_rx_ctx - RX context for callbacks */
285 static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending);
286 static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
287 struct tipc_crypto *rx,
290 static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb);
305 static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr);
310 #define is_tx(crypto) (!(crypto)->node)
330 * tipc_aead_key_validate - Validate a AEAD user key
339 if (unlikely(!crypto_has_alg(ukey->alg_name, 0, 0))) { in tipc_aead_key_validate()
341 return -ENODEV; in tipc_aead_key_validate()
345 if (strcmp(ukey->alg_name, "gcm(aes)")) { in tipc_aead_key_validate()
347 return -ENOTSUPP; in tipc_aead_key_validate()
351 keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE; in tipc_aead_key_validate()
356 return -EKEYREJECTED; in tipc_aead_key_validate()
363 * tipc_aead_key_generate - Generate new session key
375 rc = crypto_rng_get_bytes(crypto_default_rng, skey->key, in tipc_aead_key_generate()
376 skey->keylen); in tipc_aead_key_generate()
389 if (unlikely(!tmp || !refcount_inc_not_zero(&tmp->refcnt))) in tipc_aead_get()
398 if (aead && refcount_dec_and_test(&aead->refcnt)) in tipc_aead_put()
399 call_rcu(&aead->rcu, tipc_aead_free); in tipc_aead_put()
403 * tipc_aead_free - Release AEAD key incl. all the TFMs in the list
411 if (aead->cloned) { in tipc_aead_free()
412 tipc_aead_put(aead->cloned); in tipc_aead_free()
414 head = *get_cpu_ptr(aead->tfm_entry); in tipc_aead_free()
415 put_cpu_ptr(aead->tfm_entry); in tipc_aead_free()
416 list_for_each_entry_safe(tfm_entry, tmp, &head->list, list) { in tipc_aead_free()
417 crypto_free_aead(tfm_entry->tfm); in tipc_aead_free()
418 list_del(&tfm_entry->list); in tipc_aead_free()
422 crypto_free_aead(head->tfm); in tipc_aead_free()
423 list_del(&head->list); in tipc_aead_free()
426 free_percpu(aead->tfm_entry); in tipc_aead_free()
427 kfree_sensitive(aead->key); in tipc_aead_free()
439 users = atomic_read(&tmp->users); in tipc_aead_users()
452 atomic_add_unless(&tmp->users, 1, lim); in tipc_aead_users_inc()
463 atomic_add_unless(&rcu_dereference(aead)->users, -1, lim); in tipc_aead_users_dec()
476 cur = atomic_read(&tmp->users); in tipc_aead_users_set()
479 } while (atomic_cmpxchg(&tmp->users, cur, val) != cur); in tipc_aead_users_set()
485 * tipc_aead_tfm_next - Move TFM entry to the next one in list and return it
493 tfm_entry = get_cpu_ptr(aead->tfm_entry); in tipc_aead_tfm_next()
495 tfm = (*tfm_entry)->tfm; in tipc_aead_tfm_next()
502 * tipc_aead_init - Initiate TIPC AEAD
524 return -EEXIST; in tipc_aead_init()
529 return -ENOMEM; in tipc_aead_init()
531 /* The key consists of two parts: [AES-KEY][SALT] */ in tipc_aead_init()
532 keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE; in tipc_aead_init()
534 /* Allocate per-cpu TFM entry pointer */ in tipc_aead_init()
535 tmp->tfm_entry = alloc_percpu(struct tipc_tfm *); in tipc_aead_init()
536 if (!tmp->tfm_entry) { in tipc_aead_init()
538 return -ENOMEM; in tipc_aead_init()
543 tfm = crypto_alloc_aead(ukey->alg_name, 0, 0); in tipc_aead_init()
552 err = -ENOTSUPP; in tipc_aead_init()
557 err |= crypto_aead_setkey(tfm, ukey->key, keylen); in tipc_aead_init()
566 err = -ENOMEM; in tipc_aead_init()
569 INIT_LIST_HEAD(&tfm_entry->list); in tipc_aead_init()
570 tfm_entry->tfm = tfm; in tipc_aead_init()
576 *per_cpu_ptr(tmp->tfm_entry, cpu) = head; in tipc_aead_init()
579 list_add_tail(&tfm_entry->list, &head->list); in tipc_aead_init()
586 free_percpu(tmp->tfm_entry); in tipc_aead_init()
592 bin2hex(tmp->hint, ukey->key + keylen - TIPC_AEAD_HINT_LEN, in tipc_aead_init()
596 tmp->mode = mode; in tipc_aead_init()
597 tmp->cloned = NULL; in tipc_aead_init()
598 tmp->authsize = TIPC_AES_GCM_TAG_SIZE; in tipc_aead_init()
599 tmp->key = kmemdup(ukey, tipc_aead_key_size(ukey), GFP_KERNEL); in tipc_aead_init()
600 if (!tmp->key) { in tipc_aead_init()
601 tipc_aead_free(&tmp->rcu); in tipc_aead_init()
602 return -ENOMEM; in tipc_aead_init()
604 memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE); in tipc_aead_init()
605 atomic_set(&tmp->users, 0); in tipc_aead_init()
606 atomic64_set(&tmp->seqno, 0); in tipc_aead_init()
607 refcount_set(&tmp->refcnt, 1); in tipc_aead_init()
614 * tipc_aead_clone - Clone a TIPC AEAD key
623 * Note: this must be done in cluster-key mode only!
632 return -ENOKEY; in tipc_aead_clone()
634 if (src->mode != CLUSTER_KEY) in tipc_aead_clone()
635 return -EINVAL; in tipc_aead_clone()
638 return -EEXIST; in tipc_aead_clone()
642 return -ENOMEM; in tipc_aead_clone()
644 aead->tfm_entry = alloc_percpu_gfp(struct tipc_tfm *, GFP_ATOMIC); in tipc_aead_clone()
645 if (unlikely(!aead->tfm_entry)) { in tipc_aead_clone()
647 return -ENOMEM; in tipc_aead_clone()
651 *per_cpu_ptr(aead->tfm_entry, cpu) = in tipc_aead_clone()
652 *per_cpu_ptr(src->tfm_entry, cpu); in tipc_aead_clone()
655 memcpy(aead->hint, src->hint, sizeof(src->hint)); in tipc_aead_clone()
656 aead->mode = src->mode; in tipc_aead_clone()
657 aead->salt = src->salt; in tipc_aead_clone()
658 aead->authsize = src->authsize; in tipc_aead_clone()
659 atomic_set(&aead->users, 0); in tipc_aead_clone()
660 atomic64_set(&aead->seqno, 0); in tipc_aead_clone()
661 refcount_set(&aead->refcnt, 1); in tipc_aead_clone()
663 WARN_ON(!refcount_inc_not_zero(&src->refcnt)); in tipc_aead_clone()
664 aead->cloned = src; in tipc_aead_clone()
671 * tipc_aead_mem_alloc - Allocate memory for AEAD request operations
699 len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1); in tipc_aead_mem_alloc()
720 * tipc_aead_encrypt - Encrypt a message
729 * * -EINPROGRESS/-EBUSY : if a callback will be performed
748 /* Make sure message len at least 4-byte aligned */ in tipc_aead_encrypt()
749 len = ALIGN(skb->len, 4); in tipc_aead_encrypt()
750 tailen = len - skb->len + aead->authsize; in tipc_aead_encrypt()
760 pr_debug("TX(): skb tailroom is not enough: %d, requires: %d\n", in tipc_aead_encrypt()
766 pr_err("TX: skb_cow_data() returned %d\n", nsg); in tipc_aead_encrypt()
775 return -ENOMEM; in tipc_aead_encrypt()
776 TIPC_SKB_CB(skb)->crypto_ctx = ctx; in tipc_aead_encrypt()
780 rc = skb_to_sgvec(skb, sg, 0, skb->len); in tipc_aead_encrypt()
782 pr_err("TX: skb_to_sgvec() returned %d, nsg %d!\n", rc, nsg); in tipc_aead_encrypt()
787 * In case we're in cluster-key mode, SALT is varied by xor-ing with in tipc_aead_encrypt()
791 ehdr = (struct tipc_ehdr *)skb->data; in tipc_aead_encrypt()
792 salt = aead->salt; in tipc_aead_encrypt()
793 if (aead->mode == CLUSTER_KEY) in tipc_aead_encrypt()
794 salt ^= __be32_to_cpu(ehdr->addr); in tipc_aead_encrypt()
798 memcpy(iv + 4, (u8 *)&ehdr->seqno, 8); in tipc_aead_encrypt()
804 aead_request_set_crypt(req, sg, sg, len - ehsz, iv); in tipc_aead_encrypt()
810 tx_ctx->aead = aead; in tipc_aead_encrypt()
811 tx_ctx->bearer = b; in tipc_aead_encrypt()
812 memcpy(&tx_ctx->dst, dst, sizeof(*dst)); in tipc_aead_encrypt()
816 rc = -ENODEV; in tipc_aead_encrypt()
822 if (rc == -EINPROGRESS || rc == -EBUSY) in tipc_aead_encrypt()
829 TIPC_SKB_CB(skb)->crypto_ctx = NULL; in tipc_aead_encrypt()
836 struct tipc_crypto_tx_ctx *tx_ctx = TIPC_SKB_CB(skb)->crypto_ctx; in tipc_aead_encrypt_done()
837 struct tipc_bearer *b = tx_ctx->bearer; in tipc_aead_encrypt_done()
838 struct tipc_aead *aead = tx_ctx->aead; in tipc_aead_encrypt_done()
839 struct tipc_crypto *tx = aead->crypto; in tipc_aead_encrypt_done() local
840 struct net *net = tx->net; in tipc_aead_encrypt_done()
844 this_cpu_inc(tx->stats->stat[STAT_ASYNC_OK]); in tipc_aead_encrypt_done()
846 if (likely(test_bit(0, &b->up))) in tipc_aead_encrypt_done()
847 b->media->send_msg(net, skb, b, &tx_ctx->dst); in tipc_aead_encrypt_done()
852 case -EINPROGRESS: in tipc_aead_encrypt_done()
855 this_cpu_inc(tx->stats->stat[STAT_ASYNC_NOK]); in tipc_aead_encrypt_done()
866 * tipc_aead_decrypt - Decrypt an encrypted message
874 * * -EINPROGRESS/-EBUSY : if a callback will be performed
892 return -ENOKEY; in tipc_aead_decrypt()
896 pr_err("RX: skb_cow_data() returned %d\n", nsg); in tipc_aead_decrypt()
904 return -ENOMEM; in tipc_aead_decrypt()
905 TIPC_SKB_CB(skb)->crypto_ctx = ctx; in tipc_aead_decrypt()
909 rc = skb_to_sgvec(skb, sg, 0, skb->len); in tipc_aead_decrypt()
911 pr_err("RX: skb_to_sgvec() returned %d, nsg %d\n", rc, nsg); in tipc_aead_decrypt()
916 ehdr = (struct tipc_ehdr *)skb->data; in tipc_aead_decrypt()
917 salt = aead->salt; in tipc_aead_decrypt()
918 if (aead->mode == CLUSTER_KEY) in tipc_aead_decrypt()
919 salt ^= __be32_to_cpu(ehdr->addr); in tipc_aead_decrypt()
920 else if (ehdr->destined) in tipc_aead_decrypt()
923 memcpy(iv + 4, (u8 *)&ehdr->seqno, 8); in tipc_aead_decrypt()
929 aead_request_set_crypt(req, sg, sg, skb->len - ehsz, iv); in tipc_aead_decrypt()
935 rx_ctx->aead = aead; in tipc_aead_decrypt()
936 rx_ctx->bearer = b; in tipc_aead_decrypt()
940 rc = -ENODEV; in tipc_aead_decrypt()
946 if (rc == -EINPROGRESS || rc == -EBUSY) in tipc_aead_decrypt()
953 TIPC_SKB_CB(skb)->crypto_ctx = NULL; in tipc_aead_decrypt()
960 struct tipc_crypto_rx_ctx *rx_ctx = TIPC_SKB_CB(skb)->crypto_ctx; in tipc_aead_decrypt_done()
961 struct tipc_bearer *b = rx_ctx->bearer; in tipc_aead_decrypt_done()
962 struct tipc_aead *aead = rx_ctx->aead; in tipc_aead_decrypt_done()
963 struct tipc_crypto_stats __percpu *stats = aead->crypto->stats; in tipc_aead_decrypt_done()
964 struct net *net = aead->crypto->net; in tipc_aead_decrypt_done()
968 this_cpu_inc(stats->stat[STAT_ASYNC_OK]); in tipc_aead_decrypt_done()
970 case -EINPROGRESS: in tipc_aead_decrypt_done()
973 this_cpu_inc(stats->stat[STAT_ASYNC_NOK]); in tipc_aead_decrypt_done()
980 if (likely(test_bit(0, &b->up))) in tipc_aead_decrypt_done()
991 return (ehdr->user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE; in tipc_ehdr_size()
995 * tipc_ehdr_validate - Validate an encryption message
1008 ehdr = (struct tipc_ehdr *)skb->data; in tipc_ehdr_validate()
1009 if (unlikely(ehdr->version != TIPC_EVERSION)) in tipc_ehdr_validate()
1014 if (unlikely(skb->len <= ehsz + TIPC_AES_GCM_TAG_SIZE)) in tipc_ehdr_validate()
1021 * tipc_ehdr_build - Build TIPC encryption message header
1023 * @aead: TX AEAD key to be used for the message encryption
1026 * @__rx: RX crypto handle if dest is "known"
1047 * cluster key mode, otherwise it's better for a per-peer seqno! in tipc_ehdr_build()
1049 if (!__rx || aead->mode == CLUSTER_KEY) in tipc_ehdr_build()
1050 seqno = atomic64_inc_return(&aead->seqno); in tipc_ehdr_build()
1052 seqno = atomic64_inc_return(&__rx->sndnxt); in tipc_ehdr_build()
1058 /* Word 1-2 */ in tipc_ehdr_build()
1059 ehdr->seqno = cpu_to_be64(seqno); in tipc_ehdr_build()
1061 /* Words 0, 3- */ in tipc_ehdr_build()
1062 ehdr->version = TIPC_EVERSION; in tipc_ehdr_build()
1063 ehdr->user = 0; in tipc_ehdr_build()
1064 ehdr->keepalive = 0; in tipc_ehdr_build()
1065 ehdr->tx_key = tx_key; in tipc_ehdr_build()
1066 ehdr->destined = (__rx) ? 1 : 0; in tipc_ehdr_build()
1067 ehdr->rx_key_active = (__rx) ? __rx->key.active : 0; in tipc_ehdr_build()
1068 ehdr->rx_nokey = (__rx) ? __rx->nokey : 0; in tipc_ehdr_build()
1069 ehdr->master_key = aead->crypto->key_master; in tipc_ehdr_build()
1070 ehdr->reserved_1 = 0; in tipc_ehdr_build()
1071 ehdr->reserved_2 = 0; in tipc_ehdr_build()
1075 ehdr->user = LINK_CONFIG; in tipc_ehdr_build()
1076 memcpy(ehdr->id, tipc_own_id(net), NODE_ID_LEN); in tipc_ehdr_build()
1080 ehdr->user = LINK_PROTOCOL; in tipc_ehdr_build()
1081 ehdr->keepalive = msg_is_keepalive(hdr); in tipc_ehdr_build()
1083 ehdr->addr = hdr->hdr[3]; in tipc_ehdr_build()
1095 struct tipc_key old = c->key; in tipc_crypto_key_set_state()
1098 c->key.keys = ((new_passive & KEY_MASK) << (KEY_BITS * 2)) | in tipc_crypto_key_set_state()
1102 pr_debug("%s: key changing %s ::%pS\n", c->name, in tipc_crypto_key_set_state()
1103 tipc_key_change_dump(old, c->key, buf), in tipc_crypto_key_set_state()
1108 * tipc_crypto_key_init - Initiate a new user / AEAD key
1132 tipc_aead_free(&aead->rcu); in tipc_crypto_key_init()
1139 * tipc_crypto_key_attach - Attach a new AEAD key to TIPC crypto
1145 * Return: new key id in case of success, otherwise: -EBUSY
1152 int rc = -EBUSY; in tipc_crypto_key_attach()
1155 spin_lock_bh(&c->lock); in tipc_crypto_key_attach()
1156 key = c->key; in tipc_crypto_key_attach()
1164 if (tipc_aead_users(c->aead[key.pending]) > 0) in tipc_crypto_key_attach()
1186 aead->crypto = c; in tipc_crypto_key_attach()
1187 aead->gen = (is_tx(c)) ? ++c->key_gen : c->key_gen; in tipc_crypto_key_attach()
1188 tipc_aead_rcu_replace(c->aead[new_key], aead, &c->lock); in tipc_crypto_key_attach()
1189 if (likely(c->key.keys != key.keys)) in tipc_crypto_key_attach()
1192 c->working = 1; in tipc_crypto_key_attach()
1193 c->nokey = 0; in tipc_crypto_key_attach()
1194 c->key_master |= master_key; in tipc_crypto_key_attach()
1198 spin_unlock_bh(&c->lock); in tipc_crypto_key_attach()
1204 struct tipc_crypto *tx, *rx; in tipc_crypto_key_flush() local
1207 spin_lock_bh(&c->lock); in tipc_crypto_key_flush()
1210 rx = c; in tipc_crypto_key_flush()
1211 tx = tipc_net(rx->net)->crypto_tx; in tipc_crypto_key_flush()
1212 if (cancel_delayed_work(&rx->work)) { in tipc_crypto_key_flush()
1213 kfree(rx->skey); in tipc_crypto_key_flush()
1214 rx->skey = NULL; in tipc_crypto_key_flush()
1215 atomic_xchg(&rx->key_distr, 0); in tipc_crypto_key_flush()
1216 tipc_node_put(rx->node); in tipc_crypto_key_flush()
1218 /* RX stopping => decrease TX key users if any */ in tipc_crypto_key_flush()
1219 k = atomic_xchg(&rx->peer_rx_active, 0); in tipc_crypto_key_flush()
1221 tipc_aead_users_dec(tx->aead[k], 0); in tipc_crypto_key_flush()
1222 /* Mark the point TX key users changed */ in tipc_crypto_key_flush()
1223 tx->timer1 = jiffies; in tipc_crypto_key_flush()
1227 c->flags = 0; in tipc_crypto_key_flush()
1230 tipc_crypto_key_detach(c->aead[k], &c->lock); in tipc_crypto_key_flush()
1231 atomic64_set(&c->sndnxt, 0); in tipc_crypto_key_flush()
1232 spin_unlock_bh(&c->lock); in tipc_crypto_key_flush()
1236 * tipc_crypto_key_try_align - Align RX keys if possible
1237 * @rx: RX crypto handle
1238 * @new_pending: new pending slot if aligned (= TX key from peer)
1248 static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending) in tipc_crypto_key_try_align() argument
1256 spin_lock(&rx->lock); in tipc_crypto_key_try_align()
1257 key = rx->key; in tipc_crypto_key_try_align()
1266 if (tipc_aead_users(rx->aead[key.pending]) > 0) in tipc_crypto_key_try_align()
1270 tmp1 = tipc_aead_rcu_ptr(rx->aead[key.pending], &rx->lock); in tipc_crypto_key_try_align()
1271 if (!refcount_dec_if_one(&tmp1->refcnt)) in tipc_crypto_key_try_align()
1273 rcu_assign_pointer(rx->aead[key.pending], NULL); in tipc_crypto_key_try_align()
1277 tmp2 = rcu_replace_pointer(rx->aead[key.passive], tmp2, lockdep_is_held(&rx->lock)); in tipc_crypto_key_try_align()
1278 x = (key.passive - key.pending + new_pending) % KEY_MAX; in tipc_crypto_key_try_align()
1282 /* Re-allocate the key(s) */ in tipc_crypto_key_try_align()
1283 tipc_crypto_key_set_state(rx, new_passive, 0, new_pending); in tipc_crypto_key_try_align()
1284 rcu_assign_pointer(rx->aead[new_pending], tmp1); in tipc_crypto_key_try_align()
1286 rcu_assign_pointer(rx->aead[new_passive], tmp2); in tipc_crypto_key_try_align()
1287 refcount_set(&tmp1->refcnt, 1); in tipc_crypto_key_try_align()
1289 pr_info_ratelimited("%s: key[%d] -> key[%d]\n", rx->name, key.pending, in tipc_crypto_key_try_align()
1293 spin_unlock(&rx->lock); in tipc_crypto_key_try_align()
1298 * tipc_crypto_key_pick_tx - Pick one TX key for message decryption
1299 * @tx: TX crypto handle
1300 * @rx: RX crypto handle (can be NULL)
1302 * @tx_key: peer TX key id
1304 * This function looks up the existing TX keys and pick one which is suitable
1308 * Return: the TX AEAD key handle in case of success, otherwise NULL
1310 static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx, in tipc_crypto_key_pick_tx() argument
1311 struct tipc_crypto *rx, in tipc_crypto_key_pick_tx() argument
1317 struct tipc_key key = tx->key; in tipc_crypto_key_pick_tx()
1321 if (!skb_cb->tx_clone_deferred) { in tipc_crypto_key_pick_tx()
1322 skb_cb->tx_clone_deferred = 1; in tipc_crypto_key_pick_tx()
1323 memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx)); in tipc_crypto_key_pick_tx()
1326 skb_cb->tx_clone_ctx.rx = rx; in tipc_crypto_key_pick_tx()
1327 if (++skb_cb->tx_clone_ctx.recurs > 2) in tipc_crypto_key_pick_tx()
1330 /* Pick one TX key */ in tipc_crypto_key_pick_tx()
1331 spin_lock(&tx->lock); in tipc_crypto_key_pick_tx()
1333 aead = tipc_aead_rcu_ptr(tx->aead[KEY_MASTER], &tx->lock); in tipc_crypto_key_pick_tx()
1341 aead = tipc_aead_rcu_ptr(tx->aead[k], &tx->lock); in tipc_crypto_key_pick_tx()
1344 if (aead->mode != CLUSTER_KEY || in tipc_crypto_key_pick_tx()
1345 aead == skb_cb->tx_clone_ctx.last) { in tipc_crypto_key_pick_tx()
1350 skb_cb->tx_clone_ctx.last = aead; in tipc_crypto_key_pick_tx()
1351 WARN_ON(skb->next); in tipc_crypto_key_pick_tx()
1352 skb->next = skb_clone(skb, GFP_ATOMIC); in tipc_crypto_key_pick_tx()
1353 if (unlikely(!skb->next)) in tipc_crypto_key_pick_tx()
1360 WARN_ON(!refcount_inc_not_zero(&aead->refcnt)); in tipc_crypto_key_pick_tx()
1361 spin_unlock(&tx->lock); in tipc_crypto_key_pick_tx()
1368 * @rx: RX crypto handle
1371 * This function updates the peer node related data as the peer RX active key
1372 * has changed, so the number of TX keys' users on this node are increased and
1379 * The "per-peer" sndnxt is also reset when the peer key has switched.
1381 static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb) in tipc_crypto_key_synch() argument
1384 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; in tipc_crypto_key_synch() local
1386 u32 self = tipc_own_addr(rx->net); in tipc_crypto_key_synch()
1390 /* Update RX 'key_master' flag according to peer, also mark "legacy" if in tipc_crypto_key_synch()
1393 rx->key_master = ehdr->master_key; in tipc_crypto_key_synch()
1394 if (!rx->key_master) in tipc_crypto_key_synch()
1395 tx->legacy_user = 1; in tipc_crypto_key_synch()
1398 if (!ehdr->destined || msg_short(hdr) || msg_destnode(hdr) != self) in tipc_crypto_key_synch()
1402 if (ehdr->rx_nokey) { in tipc_crypto_key_synch()
1404 tx->timer2 = jiffies; in tipc_crypto_key_synch()
1406 if (tx->key.keys && in tipc_crypto_key_synch()
1407 !atomic_cmpxchg(&rx->key_distr, 0, KEY_DISTR_SCHED)) { in tipc_crypto_key_synch()
1411 if (queue_delayed_work(tx->wq, &rx->work, delay)) in tipc_crypto_key_synch()
1412 tipc_node_get(rx->node); in tipc_crypto_key_synch()
1416 atomic_xchg(&rx->key_distr, 0); in tipc_crypto_key_synch()
1419 /* Case 2: Peer RX active key has changed, let's update own TX users */ in tipc_crypto_key_synch()
1420 cur = atomic_read(&rx->peer_rx_active); in tipc_crypto_key_synch()
1421 new = ehdr->rx_key_active; in tipc_crypto_key_synch()
1422 if (tx->key.keys && in tipc_crypto_key_synch()
1424 atomic_cmpxchg(&rx->peer_rx_active, cur, new) == cur) { in tipc_crypto_key_synch()
1426 tipc_aead_users_inc(tx->aead[new], INT_MAX); in tipc_crypto_key_synch()
1428 tipc_aead_users_dec(tx->aead[cur], 0); in tipc_crypto_key_synch()
1430 atomic64_set(&rx->sndnxt, 0); in tipc_crypto_key_synch()
1431 /* Mark the point TX key users changed */ in tipc_crypto_key_synch()
1432 tx->timer1 = jiffies; in tipc_crypto_key_synch()
1434 pr_debug("%s: key users changed %d-- %d++, peer %s\n", in tipc_crypto_key_synch()
1435 tx->name, cur, new, rx->name); in tipc_crypto_key_synch()
1441 struct tipc_crypto *tx = tipc_net(net)->crypto_tx; in tipc_crypto_key_revoke() local
1444 spin_lock_bh(&tx->lock); in tipc_crypto_key_revoke()
1445 key = tx->key; in tipc_crypto_key_revoke()
1449 tipc_crypto_key_set_state(tx, key.passive, 0, key.pending); in tipc_crypto_key_revoke()
1450 tipc_crypto_key_detach(tx->aead[key.active], &tx->lock); in tipc_crypto_key_revoke()
1451 spin_unlock_bh(&tx->lock); in tipc_crypto_key_revoke()
1453 pr_warn("%s: key is revoked\n", tx->name); in tipc_crypto_key_revoke()
1454 return -EKEYREVOKED; in tipc_crypto_key_revoke()
1463 return -EEXIST; in tipc_crypto_start()
1468 return -ENOMEM; in tipc_crypto_start()
1470 /* Allocate workqueue on TX */ in tipc_crypto_start()
1472 c->wq = alloc_ordered_workqueue("tipc_crypto", 0); in tipc_crypto_start()
1473 if (!c->wq) { in tipc_crypto_start()
1475 return -ENOMEM; in tipc_crypto_start()
1480 c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC); in tipc_crypto_start()
1481 if (!c->stats) { in tipc_crypto_start()
1482 if (c->wq) in tipc_crypto_start()
1483 destroy_workqueue(c->wq); in tipc_crypto_start()
1485 return -ENOMEM; in tipc_crypto_start()
1488 c->flags = 0; in tipc_crypto_start()
1489 c->net = net; in tipc_crypto_start()
1490 c->node = node; in tipc_crypto_start()
1491 get_random_bytes(&c->key_gen, 2); in tipc_crypto_start()
1493 atomic_set(&c->key_distr, 0); in tipc_crypto_start()
1494 atomic_set(&c->peer_rx_active, 0); in tipc_crypto_start()
1495 atomic64_set(&c->sndnxt, 0); in tipc_crypto_start()
1496 c->timer1 = jiffies; in tipc_crypto_start()
1497 c->timer2 = jiffies; in tipc_crypto_start()
1498 c->rekeying_intv = TIPC_REKEYING_INTV_DEF; in tipc_crypto_start()
1499 spin_lock_init(&c->lock); in tipc_crypto_start()
1500 scnprintf(c->name, 48, "%s(%s)", (is_rx(c)) ? "RX" : "TX", in tipc_crypto_start()
1501 (is_rx(c)) ? tipc_node_get_id_str(c->node) : in tipc_crypto_start()
1502 tipc_own_id_string(c->net)); in tipc_crypto_start()
1505 INIT_DELAYED_WORK(&c->work, tipc_crypto_work_rx); in tipc_crypto_start()
1507 INIT_DELAYED_WORK(&c->work, tipc_crypto_work_tx); in tipc_crypto_start()
1523 c->rekeying_intv = 0; in tipc_crypto_stop()
1524 cancel_delayed_work_sync(&c->work); in tipc_crypto_stop()
1525 destroy_workqueue(c->wq); in tipc_crypto_stop()
1531 tipc_aead_put(rcu_dereference(c->aead[k])); in tipc_crypto_stop()
1533 pr_debug("%s: has been stopped\n", c->name); in tipc_crypto_stop()
1536 free_percpu(c->stats); in tipc_crypto_stop()
1542 void tipc_crypto_timeout(struct tipc_crypto *rx) in tipc_crypto_timeout() argument
1544 struct tipc_net *tn = tipc_net(rx->net); in tipc_crypto_timeout()
1545 struct tipc_crypto *tx = tn->crypto_tx; in tipc_crypto_timeout() local
1549 /* TX pending: taking all users & stable -> active */ in tipc_crypto_timeout()
1550 spin_lock(&tx->lock); in tipc_crypto_timeout()
1551 key = tx->key; in tipc_crypto_timeout()
1552 if (key.active && tipc_aead_users(tx->aead[key.active]) > 0) in tipc_crypto_timeout()
1554 if (!key.pending || tipc_aead_users(tx->aead[key.pending]) <= 0) in tipc_crypto_timeout()
1556 if (time_before(jiffies, tx->timer1 + TIPC_TX_LASTING_TIME)) in tipc_crypto_timeout()
1559 tipc_crypto_key_set_state(tx, key.passive, key.pending, 0); in tipc_crypto_timeout()
1561 tipc_crypto_key_detach(tx->aead[key.active], &tx->lock); in tipc_crypto_timeout()
1562 this_cpu_inc(tx->stats->stat[STAT_SWITCHES]); in tipc_crypto_timeout()
1563 pr_info("%s: key[%d] is activated\n", tx->name, key.pending); in tipc_crypto_timeout()
1566 spin_unlock(&tx->lock); in tipc_crypto_timeout()
1568 /* RX pending: having user -> active */ in tipc_crypto_timeout()
1569 spin_lock(&rx->lock); in tipc_crypto_timeout()
1570 key = rx->key; in tipc_crypto_timeout()
1571 if (!key.pending || tipc_aead_users(rx->aead[key.pending]) <= 0) in tipc_crypto_timeout()
1577 rx->timer2 = jiffies; in tipc_crypto_timeout()
1578 tipc_crypto_key_set_state(rx, key.passive, key.active, 0); in tipc_crypto_timeout()
1579 this_cpu_inc(rx->stats->stat[STAT_SWITCHES]); in tipc_crypto_timeout()
1580 pr_info("%s: key[%d] is activated\n", rx->name, key.pending); in tipc_crypto_timeout()
1584 /* RX pending: not working -> remove */ in tipc_crypto_timeout()
1585 if (!key.pending || tipc_aead_users(rx->aead[key.pending]) > -10) in tipc_crypto_timeout()
1588 tipc_crypto_key_set_state(rx, key.passive, key.active, 0); in tipc_crypto_timeout()
1589 tipc_crypto_key_detach(rx->aead[key.pending], &rx->lock); in tipc_crypto_timeout()
1590 pr_debug("%s: key[%d] is removed\n", rx->name, key.pending); in tipc_crypto_timeout()
1594 /* RX active: timed out or no user -> pending */ in tipc_crypto_timeout()
1597 if (time_before(jiffies, rx->timer1 + TIPC_RX_ACTIVE_LIM) && in tipc_crypto_timeout()
1598 tipc_aead_users(rx->aead[key.active]) > 0) in tipc_crypto_timeout()
1605 rx->timer2 = jiffies; in tipc_crypto_timeout()
1606 tipc_crypto_key_set_state(rx, key.passive, 0, key.pending); in tipc_crypto_timeout()
1607 tipc_aead_users_set(rx->aead[key.pending], 0); in tipc_crypto_timeout()
1608 pr_debug("%s: key[%d] is deactivated\n", rx->name, key.active); in tipc_crypto_timeout()
1612 /* RX passive: outdated or not working -> free */ in tipc_crypto_timeout()
1615 if (time_before(jiffies, rx->timer2 + TIPC_RX_PASSIVE_LIM) && in tipc_crypto_timeout()
1616 tipc_aead_users(rx->aead[key.passive]) > -10) in tipc_crypto_timeout()
1619 tipc_crypto_key_set_state(rx, 0, key.active, key.pending); in tipc_crypto_timeout()
1620 tipc_crypto_key_detach(rx->aead[key.passive], &rx->lock); in tipc_crypto_timeout()
1621 pr_debug("%s: key[%d] is freed\n", rx->name, key.passive); in tipc_crypto_timeout()
1624 spin_unlock(&rx->lock); in tipc_crypto_timeout()
1629 if (time_after(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) in tipc_crypto_timeout()
1630 tx->legacy_user = 0; in tipc_crypto_timeout()
1638 tipc_crypto_do_cmd(rx->net, cmd); in tipc_crypto_timeout()
1650 TIPC_SKB_CB(skb)->xmit_type = type; in tipc_crypto_clone_msg()
1653 b->media->send_msg(net, skb, b, dst); in tipc_crypto_clone_msg()
1658 * tipc_crypto_xmit - Build & encrypt TIPC message for xmit
1674 * * -EINPROGRESS/-EBUSY : the encryption is ongoing, a callback will be made
1675 * * -ENOKEK : the encryption has failed due to no key
1676 * * -EKEYREVOKED : the encryption has failed due to key revoked
1677 * * -ENOMEM : the encryption has failed due to no memory
1685 struct tipc_crypto *tx = tipc_net(net)->crypto_tx; in tipc_crypto_xmit() local
1686 struct tipc_crypto_stats __percpu *stats = tx->stats; in tipc_crypto_xmit()
1688 struct tipc_key key = tx->key; in tipc_crypto_xmit()
1692 int rc = -ENOKEY; in tipc_crypto_xmit()
1696 if (!tx->working) in tipc_crypto_xmit()
1702 if (!tx->key_master && !key.active) in tipc_crypto_xmit()
1704 if (__rx && atomic_read(&__rx->peer_rx_active) == tx_key) in tipc_crypto_xmit()
1706 if (TIPC_SKB_CB(*skb)->xmit_type == SKB_PROBING) { in tipc_crypto_xmit()
1707 pr_debug("%s: probing for key[%d]\n", tx->name, in tipc_crypto_xmit()
1717 if (tx->key_master) { in tipc_crypto_xmit()
1721 if (TIPC_SKB_CB(*skb)->xmit_type == SKB_GRACING) { in tipc_crypto_xmit()
1722 pr_debug("%s: gracing for msg (%d %d)\n", tx->name, in tipc_crypto_xmit()
1729 time_before(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) { in tipc_crypto_xmit()
1730 if (__rx && __rx->key_master && in tipc_crypto_xmit()
1731 !atomic_read(&__rx->peer_rx_active)) in tipc_crypto_xmit()
1734 if (likely(!tx->legacy_user)) in tipc_crypto_xmit()
1751 aead = tipc_aead_get(tx->aead[tx_key]); in tipc_crypto_xmit()
1761 this_cpu_inc(stats->stat[STAT_OK]); in tipc_crypto_xmit()
1763 case -EINPROGRESS: in tipc_crypto_xmit()
1764 case -EBUSY: in tipc_crypto_xmit()
1765 this_cpu_inc(stats->stat[STAT_ASYNC]); in tipc_crypto_xmit()
1769 this_cpu_inc(stats->stat[STAT_NOK]); in tipc_crypto_xmit()
1770 if (rc == -ENOKEY) in tipc_crypto_xmit()
1771 this_cpu_inc(stats->stat[STAT_NOKEYS]); in tipc_crypto_xmit()
1772 else if (rc == -EKEYREVOKED) in tipc_crypto_xmit()
1773 this_cpu_inc(stats->stat[STAT_BADKEYS]); in tipc_crypto_xmit()
1784 * tipc_crypto_rcv - Decrypt an encrypted TIPC message from peer
1786 * @rx: RX crypto handle
1794 * Note: RX key(s) can be re-aligned, or in case of no key suitable, TX
1795 * cluster key(s) can be taken for decryption (- recursive).
1799 * * -EINPROGRESS/-EBUSY : the decryption is ongoing, a callback will be made
1800 * * -ENOKEY : the decryption has failed due to no key
1801 * * -EBADMSG : the decryption has failed due to bad message
1802 * * -ENOMEM : the decryption has failed due to no memory
1805 int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx, in tipc_crypto_rcv() argument
1808 struct tipc_crypto *tx = tipc_net(net)->crypto_tx; in tipc_crypto_rcv() local
1812 int rc = -ENOKEY; in tipc_crypto_rcv()
1815 tx_key = ((struct tipc_ehdr *)(*skb)->data)->tx_key; in tipc_crypto_rcv()
1818 * Let's try with TX key (i.e. cluster mode) & verify the skb first! in tipc_crypto_rcv()
1820 if (unlikely(!rx || tx_key == KEY_MASTER)) in tipc_crypto_rcv()
1823 /* Pick RX key according to TX key if any */ in tipc_crypto_rcv()
1824 key = rx->key; in tipc_crypto_rcv()
1829 /* Unknown key, let's try to align RX key(s) */ in tipc_crypto_rcv()
1830 if (tipc_crypto_key_try_align(rx, tx_key)) in tipc_crypto_rcv()
1834 /* No key suitable? Try to pick one from TX... */ in tipc_crypto_rcv()
1835 aead = tipc_crypto_key_pick_tx(tx, rx, *skb, tx_key); in tipc_crypto_rcv()
1843 aead = tipc_aead_get(rx->aead[tx_key]); in tipc_crypto_rcv()
1848 stats = ((rx) ?: tx)->stats; in tipc_crypto_rcv()
1851 this_cpu_inc(stats->stat[STAT_OK]); in tipc_crypto_rcv()
1853 case -EINPROGRESS: in tipc_crypto_rcv()
1854 case -EBUSY: in tipc_crypto_rcv()
1855 this_cpu_inc(stats->stat[STAT_ASYNC]); in tipc_crypto_rcv()
1859 this_cpu_inc(stats->stat[STAT_NOK]); in tipc_crypto_rcv()
1860 if (rc == -ENOKEY) { in tipc_crypto_rcv()
1863 if (rx) { in tipc_crypto_rcv()
1864 /* Mark rx->nokey only if we dont have a in tipc_crypto_rcv()
1869 rx->nokey = !(rx->skey || in tipc_crypto_rcv()
1870 rcu_access_pointer(rx->aead[n])); in tipc_crypto_rcv()
1872 rx->name, rx->nokey, in tipc_crypto_rcv()
1873 tx_key, rx->key.keys); in tipc_crypto_rcv()
1874 tipc_node_put(rx->node); in tipc_crypto_rcv()
1876 this_cpu_inc(stats->stat[STAT_NOKEYS]); in tipc_crypto_rcv()
1878 } else if (rc == -EBADMSG) { in tipc_crypto_rcv()
1879 this_cpu_inc(stats->stat[STAT_BADMSGS]); in tipc_crypto_rcv()
1893 struct tipc_crypto *rx = aead->crypto; in tipc_crypto_rcv_complete() local
1898 /* Is this completed by TX? */ in tipc_crypto_rcv_complete()
1899 if (unlikely(is_tx(aead->crypto))) { in tipc_crypto_rcv_complete()
1900 rx = skb_cb->tx_clone_ctx.rx; in tipc_crypto_rcv_complete()
1901 pr_debug("TX->RX(%s): err %d, aead %p, skb->next %p, flags %x\n", in tipc_crypto_rcv_complete()
1902 (rx) ? tipc_node_get_id_str(rx->node) : "-", err, aead, in tipc_crypto_rcv_complete()
1903 (*skb)->next, skb_cb->flags); in tipc_crypto_rcv_complete()
1904 pr_debug("skb_cb [recurs %d, last %p], tx->aead [%p %p %p]\n", in tipc_crypto_rcv_complete()
1905 skb_cb->tx_clone_ctx.recurs, skb_cb->tx_clone_ctx.last, in tipc_crypto_rcv_complete()
1906 aead->crypto->aead[1], aead->crypto->aead[2], in tipc_crypto_rcv_complete()
1907 aead->crypto->aead[3]); in tipc_crypto_rcv_complete()
1909 if (err == -EBADMSG && (*skb)->next) in tipc_crypto_rcv_complete()
1910 tipc_rcv(net, (*skb)->next, b); in tipc_crypto_rcv_complete()
1914 if (likely((*skb)->next)) { in tipc_crypto_rcv_complete()
1915 kfree_skb((*skb)->next); in tipc_crypto_rcv_complete()
1916 (*skb)->next = NULL; in tipc_crypto_rcv_complete()
1918 ehdr = (struct tipc_ehdr *)(*skb)->data; in tipc_crypto_rcv_complete()
1919 if (!rx) { in tipc_crypto_rcv_complete()
1920 WARN_ON(ehdr->user != LINK_CONFIG); in tipc_crypto_rcv_complete()
1921 n = tipc_node_create(net, 0, ehdr->id, 0xffffu, 0, in tipc_crypto_rcv_complete()
1923 rx = tipc_node_crypto_rx(n); in tipc_crypto_rcv_complete()
1924 if (unlikely(!rx)) in tipc_crypto_rcv_complete()
1928 /* Ignore cloning if it was TX master key */ in tipc_crypto_rcv_complete()
1929 if (ehdr->tx_key == KEY_MASTER) in tipc_crypto_rcv_complete()
1933 WARN_ON(!refcount_inc_not_zero(&tmp->refcnt)); in tipc_crypto_rcv_complete()
1934 if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key, false) < 0) { in tipc_crypto_rcv_complete()
1935 tipc_aead_free(&tmp->rcu); in tipc_crypto_rcv_complete()
1947 /* Set the RX key's user */ in tipc_crypto_rcv_complete()
1950 /* Mark this point, RX works */ in tipc_crypto_rcv_complete()
1951 rx->timer1 = jiffies; in tipc_crypto_rcv_complete()
1955 ehdr = (struct tipc_ehdr *)(*skb)->data; in tipc_crypto_rcv_complete()
1957 /* Mark this point, RX passive still works */ in tipc_crypto_rcv_complete()
1958 if (rx->key.passive && ehdr->tx_key == rx->key.passive) in tipc_crypto_rcv_complete()
1959 rx->timer2 = jiffies; in tipc_crypto_rcv_complete()
1963 if (pskb_trim(*skb, (*skb)->len - aead->authsize)) in tipc_crypto_rcv_complete()
1973 tipc_crypto_key_synch(rx, *skb); in tipc_crypto_rcv_complete()
1975 /* Re-fetch skb cb as skb might be changed in tipc_msg_validate */ in tipc_crypto_rcv_complete()
1979 skb_cb->decrypted = 1; in tipc_crypto_rcv_complete()
1982 if (likely(!skb_cb->tx_clone_deferred)) in tipc_crypto_rcv_complete()
1984 skb_cb->tx_clone_deferred = 0; in tipc_crypto_rcv_complete()
1985 memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx)); in tipc_crypto_rcv_complete()
1994 if (rx) in tipc_crypto_rcv_complete()
1995 tipc_node_put(rx->node); in tipc_crypto_rcv_complete()
2001 struct tipc_crypto *tx = tn->crypto_tx, *rx; in tipc_crypto_do_cmd() local
2021 pr_info("TX(%7.7s)\n%s", tipc_own_id_string(net), in tipc_crypto_do_cmd()
2022 tipc_crypto_key_dump(tx, buf)); in tipc_crypto_do_cmd()
2025 for (p = tn->node_list.next; p != &tn->node_list; p = p->next) { in tipc_crypto_do_cmd()
2026 rx = tipc_node_crypto_rx_by_list(p); in tipc_crypto_do_cmd()
2027 pr_info("RX(%7.7s)\n%s", tipc_node_get_id_str(rx->node), in tipc_crypto_do_cmd()
2028 tipc_crypto_key_dump(rx, buf)); in tipc_crypto_do_cmd()
2034 j += scnprintf(buf + j, 200 - j, "|%11s ", hstats[i]); in tipc_crypto_do_cmd()
2037 memset(buf, '-', 115); in tipc_crypto_do_cmd()
2041 j = scnprintf(buf, 200, "TX(%7.7s) ", tipc_own_id_string(net)); in tipc_crypto_do_cmd()
2044 stat = per_cpu_ptr(tx->stats, cpu)->stat[i]; in tipc_crypto_do_cmd()
2045 j += scnprintf(buf + j, 200 - j, "|%11d ", stat); in tipc_crypto_do_cmd()
2052 for (p = tn->node_list.next; p != &tn->node_list; p = p->next) { in tipc_crypto_do_cmd()
2053 rx = tipc_node_crypto_rx_by_list(p); in tipc_crypto_do_cmd()
2054 j = scnprintf(buf, 200, "RX(%7.7s) ", in tipc_crypto_do_cmd()
2055 tipc_node_get_id_str(rx->node)); in tipc_crypto_do_cmd()
2058 stat = per_cpu_ptr(rx->stats, cpu)->stat[i]; in tipc_crypto_do_cmd()
2059 j += scnprintf(buf + j, 200 - j, "|%11d ", in tipc_crypto_do_cmd()
2073 struct tipc_key key = c->key; in tipc_crypto_key_dump()
2083 c->timer2 + TIPC_TX_GRACE_PERIOD)) in tipc_crypto_key_dump()
2095 s = "-"; in tipc_crypto_key_dump()
2097 i += scnprintf(buf + i, 200 - i, "\tKey%d: %s", k, s); in tipc_crypto_key_dump()
2100 aead = rcu_dereference(c->aead[k]); in tipc_crypto_key_dump()
2102 i += scnprintf(buf + i, 200 - i, in tipc_crypto_key_dump()
2104 aead->hint, in tipc_crypto_key_dump()
2105 (aead->mode == CLUSTER_KEY) ? "c" : "p", in tipc_crypto_key_dump()
2106 atomic_read(&aead->users), in tipc_crypto_key_dump()
2107 refcount_read(&aead->refcnt)); in tipc_crypto_key_dump()
2109 i += scnprintf(buf + i, 200 - i, "\n"); in tipc_crypto_key_dump()
2113 i += scnprintf(buf + i, 200 - i, "\tPeer RX active: %d\n", in tipc_crypto_key_dump()
2114 atomic_read(&c->peer_rx_active)); in tipc_crypto_key_dump()
2126 /* Output format: "[%s %s %s] -> [%s %s %s]", max len = 32 */ in tipc_key_change_dump()
2128 i += scnprintf(buf + i, 32 - i, "["); in tipc_key_change_dump()
2130 if (k == key->passive) in tipc_key_change_dump()
2132 else if (k == key->active) in tipc_key_change_dump()
2134 else if (k == key->pending) in tipc_key_change_dump()
2137 s = "-"; in tipc_key_change_dump()
2138 i += scnprintf(buf + i, 32 - i, in tipc_key_change_dump()
2142 i += scnprintf(buf + i, 32 - i, "] -> "); in tipc_key_change_dump()
2146 i += scnprintf(buf + i, 32 - i, "]"); in tipc_key_change_dump()
2151 * tipc_crypto_msg_rcv - Common 'MSG_CRYPTO' processing point
2157 struct tipc_crypto *rx; in tipc_crypto_msg_rcv() local
2164 rx = tipc_node_crypto_rx_by_addr(net, msg_prevnode(hdr)); in tipc_crypto_msg_rcv()
2165 if (unlikely(!rx)) in tipc_crypto_msg_rcv()
2170 if (tipc_crypto_key_rcv(rx, hdr)) in tipc_crypto_msg_rcv()
2177 tipc_node_put(rx->node); in tipc_crypto_msg_rcv()
2184 * tipc_crypto_key_distr - Distribute a TX key
2185 * @tx: the TX crypto
2191 int tipc_crypto_key_distr(struct tipc_crypto *tx, u8 key, in tipc_crypto_key_distr() argument
2196 int rc = -ENOKEY; in tipc_crypto_key_distr()
2203 aead = tipc_aead_get(tx->aead[key]); in tipc_crypto_key_distr()
2205 rc = tipc_crypto_key_xmit(tx->net, aead->key, in tipc_crypto_key_distr()
2206 aead->gen, aead->mode, in tipc_crypto_key_distr()
2217 * tipc_crypto_key_xmit - Send a session key
2225 * as its data section, then xmit-ed through the uc/bc link.
2242 return -ENOMEM; in tipc_crypto_key_xmit()
2252 *((__be32 *)(data + TIPC_AEAD_ALG_NAME)) = htonl(skey->keylen); in tipc_crypto_key_xmit()
2253 memcpy(data, skey->alg_name, TIPC_AEAD_ALG_NAME); in tipc_crypto_key_xmit()
2254 memcpy(data + TIPC_AEAD_ALG_NAME + sizeof(__be32), skey->key, in tipc_crypto_key_xmit()
2255 skey->keylen); in tipc_crypto_key_xmit()
2268 * tipc_crypto_key_rcv - Receive a session key
2269 * @rx: the RX crypto
2273 * schedules a RX work to attach the key to the corresponding RX crypto.
2278 static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr) in tipc_crypto_key_rcv() argument
2280 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; in tipc_crypto_key_rcv() local
2289 pr_debug("%s: message data size is too small\n", rx->name); in tipc_crypto_key_rcv()
2298 pr_debug("%s: invalid MSG_CRYPTO key size\n", rx->name); in tipc_crypto_key_rcv()
2302 spin_lock(&rx->lock); in tipc_crypto_key_rcv()
2303 if (unlikely(rx->skey || (key_gen == rx->key_gen && rx->key.keys))) { in tipc_crypto_key_rcv()
2304 pr_err("%s: key existed <%p>, gen %d vs %d\n", rx->name, in tipc_crypto_key_rcv()
2305 rx->skey, key_gen, rx->key_gen); in tipc_crypto_key_rcv()
2312 pr_err("%s: unable to allocate memory for skey\n", rx->name); in tipc_crypto_key_rcv()
2317 skey->keylen = keylen; in tipc_crypto_key_rcv()
2318 memcpy(skey->alg_name, data, TIPC_AEAD_ALG_NAME); in tipc_crypto_key_rcv()
2319 memcpy(skey->key, data + TIPC_AEAD_ALG_NAME + sizeof(__be32), in tipc_crypto_key_rcv()
2320 skey->keylen); in tipc_crypto_key_rcv()
2322 rx->key_gen = key_gen; in tipc_crypto_key_rcv()
2323 rx->skey_mode = msg_key_mode(hdr); in tipc_crypto_key_rcv()
2324 rx->skey = skey; in tipc_crypto_key_rcv()
2325 rx->nokey = 0; in tipc_crypto_key_rcv()
2329 spin_unlock(&rx->lock); in tipc_crypto_key_rcv()
2333 if (likely(skey && queue_delayed_work(tx->wq, &rx->work, 0))) in tipc_crypto_key_rcv()
2340 * tipc_crypto_work_rx - Scheduled RX works handler
2341 * @work: the struct RX work
2343 * The function processes the previous scheduled works i.e. distributing TX key
2344 * or attaching a received session key on RX crypto.
2349 struct tipc_crypto *rx = container_of(dwork, struct tipc_crypto, work); in tipc_crypto_work_rx() local
2350 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; in tipc_crypto_work_rx() local
2356 /* Case 1: Distribute TX key to peer if scheduled */ in tipc_crypto_work_rx()
2357 if (atomic_cmpxchg(&rx->key_distr, in tipc_crypto_work_rx()
2361 key = tx->key.pending ?: tx->key.active; in tipc_crypto_work_rx()
2362 rc = tipc_crypto_key_distr(tx, key, rx->node); in tipc_crypto_work_rx()
2365 tx->name, key, tipc_node_get_id_str(rx->node), in tipc_crypto_work_rx()
2371 atomic_cmpxchg(&rx->key_distr, KEY_DISTR_COMPL, 0); in tipc_crypto_work_rx()
2375 if (rx->skey) { in tipc_crypto_work_rx()
2376 rc = tipc_crypto_key_init(rx, rx->skey, rx->skey_mode, false); in tipc_crypto_work_rx()
2379 rx->name, rc); in tipc_crypto_work_rx()
2381 case -EBUSY: in tipc_crypto_work_rx()
2382 case -ENOMEM: in tipc_crypto_work_rx()
2388 kfree(rx->skey); in tipc_crypto_work_rx()
2389 rx->skey = NULL; in tipc_crypto_work_rx()
2394 if (resched && queue_delayed_work(tx->wq, &rx->work, delay)) in tipc_crypto_work_rx()
2397 tipc_node_put(rx->node); in tipc_crypto_work_rx()
2401 * tipc_crypto_rekeying_sched - (Re)schedule rekeying w/o new interval
2402 * @tx: TX crypto
2406 void tipc_crypto_rekeying_sched(struct tipc_crypto *tx, bool changed, in tipc_crypto_rekeying_sched() argument
2416 tx->rekeying_intv = new_intv; in tipc_crypto_rekeying_sched()
2417 cancel_delayed_work_sync(&tx->work); in tipc_crypto_rekeying_sched()
2420 if (tx->rekeying_intv || now) { in tipc_crypto_rekeying_sched()
2421 delay = (now) ? 0 : tx->rekeying_intv * 60 * 1000; in tipc_crypto_rekeying_sched()
2422 queue_delayed_work(tx->wq, &tx->work, msecs_to_jiffies(delay)); in tipc_crypto_rekeying_sched()
2427 * tipc_crypto_work_tx - Scheduled TX works handler
2428 * @work: the struct TX work
2432 * TX crypto and finally distributing it to peers. It also re-schedules the
2438 struct tipc_crypto *tx = container_of(dwork, struct tipc_crypto, work); in tipc_crypto_work_tx() local
2440 struct tipc_key key = tx->key; in tipc_crypto_work_tx()
2442 int rc = -ENOMEM; in tipc_crypto_work_tx()
2449 aead = rcu_dereference(tx->aead[key.active ?: KEY_MASTER]); in tipc_crypto_work_tx()
2457 skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC); in tipc_crypto_work_tx()
2463 tipc_crypto_key_init(tx, skey, PER_NODE_KEY, false); in tipc_crypto_work_tx()
2465 rc = tipc_crypto_key_distr(tx, rc, NULL); in tipc_crypto_work_tx()
2470 pr_warn_ratelimited("%s: rekeying returns %d\n", tx->name, rc); in tipc_crypto_work_tx()
2473 /* Re-schedule rekeying if any */ in tipc_crypto_work_tx()
2474 tipc_crypto_rekeying_sched(tx, false, 0); in tipc_crypto_work_tx()