Lines Matching +full:rx +full:- +full:tx
1 // SPDX-License-Identifier: GPL-2.0
75 STAT_BADKEYS, /* tx only */
76 STAT_BADMSGS = STAT_BADKEYS, /* rx only */
94 * struct tipc_key - TIPC keys' status indicator
97 * +-----+-----+-----+-----+-----+-----+-----+-----+
99 * +-----+-----+-----+-----+-----+-----+-----+-----+
103 #define KEY_MASK ((1 << KEY_BITS) - 1)
109 passive:2, /* rx only */
113 passive:2, /* rx only */
125 * struct tipc_tfm - TIPC TFM structure to form a list of TFMs
135 * struct tipc_aead - TIPC AEAD key structure
136 * @tfm_entry: per-cpu pointer to one entry in TFM list
139 * @users: the number of the key users (TX/RX)
170 * struct tipc_crypto_stats - TIPC Crypto statistics
178 * struct tipc_crypto - TIPC TX/RX crypto structure
180 * @node: TIPC node (RX)
182 * @peer_rx_active: replicated peer RX active key index
183 * @key_gen: TX/RX key generation
187 * @wq: common workqueue on TX crypto
188 * @work: delayed work sched for TX/RX
193 * @sndnxt: the per-peer sndnxt (TX)
238 /* struct tipc_crypto_tx_ctx - TX context for callbacks */
245 /* struct tipc_crypto_rx_ctx - RX context for callbacks */
285 static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending);
286 static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
287 struct tipc_crypto *rx,
290 static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb);
305 static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr);
310 #define is_tx(crypto) (!(crypto)->node)
330 * tipc_aead_key_validate - Validate a AEAD user key
339 if (unlikely(!crypto_has_alg(ukey->alg_name, 0, 0))) { in tipc_aead_key_validate()
341 return -ENODEV; in tipc_aead_key_validate()
345 if (strcmp(ukey->alg_name, "gcm(aes)")) { in tipc_aead_key_validate()
347 return -ENOTSUPP; in tipc_aead_key_validate()
351 keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE; in tipc_aead_key_validate()
356 return -EKEYREJECTED; in tipc_aead_key_validate()
363 * tipc_aead_key_generate - Generate new session key
375 rc = crypto_rng_get_bytes(crypto_default_rng, skey->key, in tipc_aead_key_generate()
376 skey->keylen); in tipc_aead_key_generate()
389 if (unlikely(!tmp || !refcount_inc_not_zero(&tmp->refcnt))) in tipc_aead_get()
398 if (aead && refcount_dec_and_test(&aead->refcnt)) in tipc_aead_put()
399 call_rcu(&aead->rcu, tipc_aead_free); in tipc_aead_put()
403 * tipc_aead_free - Release AEAD key incl. all the TFMs in the list
411 if (aead->cloned) { in tipc_aead_free()
412 tipc_aead_put(aead->cloned); in tipc_aead_free()
414 head = *get_cpu_ptr(aead->tfm_entry); in tipc_aead_free()
415 put_cpu_ptr(aead->tfm_entry); in tipc_aead_free()
416 list_for_each_entry_safe(tfm_entry, tmp, &head->list, list) { in tipc_aead_free()
417 crypto_free_aead(tfm_entry->tfm); in tipc_aead_free()
418 list_del(&tfm_entry->list); in tipc_aead_free()
422 crypto_free_aead(head->tfm); in tipc_aead_free()
423 list_del(&head->list); in tipc_aead_free()
426 free_percpu(aead->tfm_entry); in tipc_aead_free()
427 kfree_sensitive(aead->key); in tipc_aead_free()
439 users = atomic_read(&tmp->users); in tipc_aead_users()
452 atomic_add_unless(&tmp->users, 1, lim); in tipc_aead_users_inc()
463 atomic_add_unless(&rcu_dereference(aead)->users, -1, lim); in tipc_aead_users_dec()
476 cur = atomic_read(&tmp->users); in tipc_aead_users_set()
479 } while (atomic_cmpxchg(&tmp->users, cur, val) != cur); in tipc_aead_users_set()
485 * tipc_aead_tfm_next - Move TFM entry to the next one in list and return it
493 tfm_entry = get_cpu_ptr(aead->tfm_entry); in tipc_aead_tfm_next()
495 tfm = (*tfm_entry)->tfm; in tipc_aead_tfm_next()
502 * tipc_aead_init - Initiate TIPC AEAD
524 return -EEXIST; in tipc_aead_init()
529 return -ENOMEM; in tipc_aead_init()
531 /* The key consists of two parts: [AES-KEY][SALT] */ in tipc_aead_init()
532 keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE; in tipc_aead_init()
534 /* Allocate per-cpu TFM entry pointer */ in tipc_aead_init()
535 tmp->tfm_entry = alloc_percpu(struct tipc_tfm *); in tipc_aead_init()
536 if (!tmp->tfm_entry) { in tipc_aead_init()
538 return -ENOMEM; in tipc_aead_init()
543 tfm = crypto_alloc_aead(ukey->alg_name, 0, 0); in tipc_aead_init()
552 err = -ENOTSUPP; in tipc_aead_init()
557 err |= crypto_aead_setkey(tfm, ukey->key, keylen); in tipc_aead_init()
566 err = -ENOMEM; in tipc_aead_init()
569 INIT_LIST_HEAD(&tfm_entry->list); in tipc_aead_init()
570 tfm_entry->tfm = tfm; in tipc_aead_init()
576 *per_cpu_ptr(tmp->tfm_entry, cpu) = head; in tipc_aead_init()
579 list_add_tail(&tfm_entry->list, &head->list); in tipc_aead_init()
586 free_percpu(tmp->tfm_entry); in tipc_aead_init()
592 bin2hex(tmp->hint, ukey->key + keylen - TIPC_AEAD_HINT_LEN, in tipc_aead_init()
596 tmp->mode = mode; in tipc_aead_init()
597 tmp->cloned = NULL; in tipc_aead_init()
598 tmp->authsize = TIPC_AES_GCM_TAG_SIZE; in tipc_aead_init()
599 tmp->key = kmemdup(ukey, tipc_aead_key_size(ukey), GFP_KERNEL); in tipc_aead_init()
600 if (!tmp->key) { in tipc_aead_init()
601 tipc_aead_free(&tmp->rcu); in tipc_aead_init()
602 return -ENOMEM; in tipc_aead_init()
604 memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE); in tipc_aead_init()
605 atomic_set(&tmp->users, 0); in tipc_aead_init()
606 atomic64_set(&tmp->seqno, 0); in tipc_aead_init()
607 refcount_set(&tmp->refcnt, 1); in tipc_aead_init()
614 * tipc_aead_clone - Clone a TIPC AEAD key
623 * Note: this must be done in cluster-key mode only!
632 return -ENOKEY; in tipc_aead_clone()
634 if (src->mode != CLUSTER_KEY) in tipc_aead_clone()
635 return -EINVAL; in tipc_aead_clone()
638 return -EEXIST; in tipc_aead_clone()
642 return -ENOMEM; in tipc_aead_clone()
644 aead->tfm_entry = alloc_percpu_gfp(struct tipc_tfm *, GFP_ATOMIC); in tipc_aead_clone()
645 if (unlikely(!aead->tfm_entry)) { in tipc_aead_clone()
647 return -ENOMEM; in tipc_aead_clone()
651 *per_cpu_ptr(aead->tfm_entry, cpu) = in tipc_aead_clone()
652 *per_cpu_ptr(src->tfm_entry, cpu); in tipc_aead_clone()
655 memcpy(aead->hint, src->hint, sizeof(src->hint)); in tipc_aead_clone()
656 aead->mode = src->mode; in tipc_aead_clone()
657 aead->salt = src->salt; in tipc_aead_clone()
658 aead->authsize = src->authsize; in tipc_aead_clone()
659 atomic_set(&aead->users, 0); in tipc_aead_clone()
660 atomic64_set(&aead->seqno, 0); in tipc_aead_clone()
661 refcount_set(&aead->refcnt, 1); in tipc_aead_clone()
663 WARN_ON(!refcount_inc_not_zero(&src->refcnt)); in tipc_aead_clone()
664 aead->cloned = src; in tipc_aead_clone()
671 * tipc_aead_mem_alloc - Allocate memory for AEAD request operations
699 len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1); in tipc_aead_mem_alloc()
720 * tipc_aead_encrypt - Encrypt a message
729 * * -EINPROGRESS/-EBUSY : if a callback will be performed
748 /* Make sure message len at least 4-byte aligned */ in tipc_aead_encrypt()
749 len = ALIGN(skb->len, 4); in tipc_aead_encrypt()
750 tailen = len - skb->len + aead->authsize; in tipc_aead_encrypt()
760 pr_debug("TX(): skb tailroom is not enough: %d, requires: %d\n", in tipc_aead_encrypt()
766 pr_err("TX: skb_cow_data() returned %d\n", nsg); in tipc_aead_encrypt()
775 return -ENOMEM; in tipc_aead_encrypt()
776 TIPC_SKB_CB(skb)->crypto_ctx = ctx; in tipc_aead_encrypt()
780 rc = skb_to_sgvec(skb, sg, 0, skb->len); in tipc_aead_encrypt()
782 pr_err("TX: skb_to_sgvec() returned %d, nsg %d!\n", rc, nsg); in tipc_aead_encrypt()
787 * In case we're in cluster-key mode, SALT is varied by xor-ing with in tipc_aead_encrypt()
791 ehdr = (struct tipc_ehdr *)skb->data; in tipc_aead_encrypt()
792 salt = aead->salt; in tipc_aead_encrypt()
793 if (aead->mode == CLUSTER_KEY) in tipc_aead_encrypt()
794 salt ^= __be32_to_cpu(ehdr->addr); in tipc_aead_encrypt()
798 memcpy(iv + 4, (u8 *)&ehdr->seqno, 8); in tipc_aead_encrypt()
804 aead_request_set_crypt(req, sg, sg, len - ehsz, iv); in tipc_aead_encrypt()
810 tx_ctx->aead = aead; in tipc_aead_encrypt()
811 tx_ctx->bearer = b; in tipc_aead_encrypt()
812 memcpy(&tx_ctx->dst, dst, sizeof(*dst)); in tipc_aead_encrypt()
816 rc = -ENODEV; in tipc_aead_encrypt()
821 if (!maybe_get_net(aead->crypto->net)) { in tipc_aead_encrypt()
823 rc = -ENODEV; in tipc_aead_encrypt()
829 if (rc == -EINPROGRESS || rc == -EBUSY) in tipc_aead_encrypt()
833 put_net(aead->crypto->net); in tipc_aead_encrypt()
837 TIPC_SKB_CB(skb)->crypto_ctx = NULL; in tipc_aead_encrypt()
844 struct tipc_crypto_tx_ctx *tx_ctx = TIPC_SKB_CB(skb)->crypto_ctx; in tipc_aead_encrypt_done()
845 struct tipc_bearer *b = tx_ctx->bearer; in tipc_aead_encrypt_done()
846 struct tipc_aead *aead = tx_ctx->aead; in tipc_aead_encrypt_done()
847 struct tipc_crypto *tx = aead->crypto; in tipc_aead_encrypt_done() local
848 struct net *net = tx->net; in tipc_aead_encrypt_done()
852 this_cpu_inc(tx->stats->stat[STAT_ASYNC_OK]); in tipc_aead_encrypt_done()
854 if (likely(test_bit(0, &b->up))) in tipc_aead_encrypt_done()
855 b->media->send_msg(net, skb, b, &tx_ctx->dst); in tipc_aead_encrypt_done()
860 case -EINPROGRESS: in tipc_aead_encrypt_done()
863 this_cpu_inc(tx->stats->stat[STAT_ASYNC_NOK]); in tipc_aead_encrypt_done()
875 * tipc_aead_decrypt - Decrypt an encrypted message
883 * * -EINPROGRESS/-EBUSY : if a callback will be performed
901 return -ENOKEY; in tipc_aead_decrypt()
905 pr_err("RX: skb_cow_data() returned %d\n", nsg); in tipc_aead_decrypt()
913 return -ENOMEM; in tipc_aead_decrypt()
914 TIPC_SKB_CB(skb)->crypto_ctx = ctx; in tipc_aead_decrypt()
918 rc = skb_to_sgvec(skb, sg, 0, skb->len); in tipc_aead_decrypt()
920 pr_err("RX: skb_to_sgvec() returned %d, nsg %d\n", rc, nsg); in tipc_aead_decrypt()
925 ehdr = (struct tipc_ehdr *)skb->data; in tipc_aead_decrypt()
926 salt = aead->salt; in tipc_aead_decrypt()
927 if (aead->mode == CLUSTER_KEY) in tipc_aead_decrypt()
928 salt ^= __be32_to_cpu(ehdr->addr); in tipc_aead_decrypt()
929 else if (ehdr->destined) in tipc_aead_decrypt()
932 memcpy(iv + 4, (u8 *)&ehdr->seqno, 8); in tipc_aead_decrypt()
938 aead_request_set_crypt(req, sg, sg, skb->len - ehsz, iv); in tipc_aead_decrypt()
944 rx_ctx->aead = aead; in tipc_aead_decrypt()
945 rx_ctx->bearer = b; in tipc_aead_decrypt()
949 rc = -ENODEV; in tipc_aead_decrypt()
955 if (rc == -EINPROGRESS || rc == -EBUSY) in tipc_aead_decrypt()
962 TIPC_SKB_CB(skb)->crypto_ctx = NULL; in tipc_aead_decrypt()
969 struct tipc_crypto_rx_ctx *rx_ctx = TIPC_SKB_CB(skb)->crypto_ctx; in tipc_aead_decrypt_done()
970 struct tipc_bearer *b = rx_ctx->bearer; in tipc_aead_decrypt_done()
971 struct tipc_aead *aead = rx_ctx->aead; in tipc_aead_decrypt_done()
972 struct tipc_crypto_stats __percpu *stats = aead->crypto->stats; in tipc_aead_decrypt_done()
973 struct net *net = aead->crypto->net; in tipc_aead_decrypt_done()
977 this_cpu_inc(stats->stat[STAT_ASYNC_OK]); in tipc_aead_decrypt_done()
979 case -EINPROGRESS: in tipc_aead_decrypt_done()
982 this_cpu_inc(stats->stat[STAT_ASYNC_NOK]); in tipc_aead_decrypt_done()
989 if (likely(test_bit(0, &b->up))) in tipc_aead_decrypt_done()
1000 return (ehdr->user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE; in tipc_ehdr_size()
1004 * tipc_ehdr_validate - Validate an encryption message
1017 ehdr = (struct tipc_ehdr *)skb->data; in tipc_ehdr_validate()
1018 if (unlikely(ehdr->version != TIPC_EVERSION)) in tipc_ehdr_validate()
1023 if (unlikely(skb->len <= ehsz + TIPC_AES_GCM_TAG_SIZE)) in tipc_ehdr_validate()
1030 * tipc_ehdr_build - Build TIPC encryption message header
1032 * @aead: TX AEAD key to be used for the message encryption
1035 * @__rx: RX crypto handle if dest is "known"
1056 * cluster key mode, otherwise it's better for a per-peer seqno! in tipc_ehdr_build()
1058 if (!__rx || aead->mode == CLUSTER_KEY) in tipc_ehdr_build()
1059 seqno = atomic64_inc_return(&aead->seqno); in tipc_ehdr_build()
1061 seqno = atomic64_inc_return(&__rx->sndnxt); in tipc_ehdr_build()
1067 /* Word 1-2 */ in tipc_ehdr_build()
1068 ehdr->seqno = cpu_to_be64(seqno); in tipc_ehdr_build()
1070 /* Words 0, 3- */ in tipc_ehdr_build()
1071 ehdr->version = TIPC_EVERSION; in tipc_ehdr_build()
1072 ehdr->user = 0; in tipc_ehdr_build()
1073 ehdr->keepalive = 0; in tipc_ehdr_build()
1074 ehdr->tx_key = tx_key; in tipc_ehdr_build()
1075 ehdr->destined = (__rx) ? 1 : 0; in tipc_ehdr_build()
1076 ehdr->rx_key_active = (__rx) ? __rx->key.active : 0; in tipc_ehdr_build()
1077 ehdr->rx_nokey = (__rx) ? __rx->nokey : 0; in tipc_ehdr_build()
1078 ehdr->master_key = aead->crypto->key_master; in tipc_ehdr_build()
1079 ehdr->reserved_1 = 0; in tipc_ehdr_build()
1080 ehdr->reserved_2 = 0; in tipc_ehdr_build()
1084 ehdr->user = LINK_CONFIG; in tipc_ehdr_build()
1085 memcpy(ehdr->id, tipc_own_id(net), NODE_ID_LEN); in tipc_ehdr_build()
1089 ehdr->user = LINK_PROTOCOL; in tipc_ehdr_build()
1090 ehdr->keepalive = msg_is_keepalive(hdr); in tipc_ehdr_build()
1092 ehdr->addr = hdr->hdr[3]; in tipc_ehdr_build()
1104 struct tipc_key old = c->key; in tipc_crypto_key_set_state()
1107 c->key.keys = ((new_passive & KEY_MASK) << (KEY_BITS * 2)) | in tipc_crypto_key_set_state()
1111 pr_debug("%s: key changing %s ::%pS\n", c->name, in tipc_crypto_key_set_state()
1112 tipc_key_change_dump(old, c->key, buf), in tipc_crypto_key_set_state()
1117 * tipc_crypto_key_init - Initiate a new user / AEAD key
1141 tipc_aead_free(&aead->rcu); in tipc_crypto_key_init()
1148 * tipc_crypto_key_attach - Attach a new AEAD key to TIPC crypto
1154 * Return: new key id in case of success, otherwise: -EBUSY
1161 int rc = -EBUSY; in tipc_crypto_key_attach()
1164 spin_lock_bh(&c->lock); in tipc_crypto_key_attach()
1165 key = c->key; in tipc_crypto_key_attach()
1173 if (tipc_aead_users(c->aead[key.pending]) > 0) in tipc_crypto_key_attach()
1195 aead->crypto = c; in tipc_crypto_key_attach()
1196 aead->gen = (is_tx(c)) ? ++c->key_gen : c->key_gen; in tipc_crypto_key_attach()
1197 tipc_aead_rcu_replace(c->aead[new_key], aead, &c->lock); in tipc_crypto_key_attach()
1198 if (likely(c->key.keys != key.keys)) in tipc_crypto_key_attach()
1201 c->working = 1; in tipc_crypto_key_attach()
1202 c->nokey = 0; in tipc_crypto_key_attach()
1203 c->key_master |= master_key; in tipc_crypto_key_attach()
1207 spin_unlock_bh(&c->lock); in tipc_crypto_key_attach()
1213 struct tipc_crypto *tx, *rx; in tipc_crypto_key_flush() local
1216 spin_lock_bh(&c->lock); in tipc_crypto_key_flush()
1219 rx = c; in tipc_crypto_key_flush()
1220 tx = tipc_net(rx->net)->crypto_tx; in tipc_crypto_key_flush()
1221 if (cancel_delayed_work(&rx->work)) { in tipc_crypto_key_flush()
1222 kfree(rx->skey); in tipc_crypto_key_flush()
1223 rx->skey = NULL; in tipc_crypto_key_flush()
1224 atomic_xchg(&rx->key_distr, 0); in tipc_crypto_key_flush()
1225 tipc_node_put(rx->node); in tipc_crypto_key_flush()
1227 /* RX stopping => decrease TX key users if any */ in tipc_crypto_key_flush()
1228 k = atomic_xchg(&rx->peer_rx_active, 0); in tipc_crypto_key_flush()
1230 tipc_aead_users_dec(tx->aead[k], 0); in tipc_crypto_key_flush()
1231 /* Mark the point TX key users changed */ in tipc_crypto_key_flush()
1232 tx->timer1 = jiffies; in tipc_crypto_key_flush()
1236 c->flags = 0; in tipc_crypto_key_flush()
1239 tipc_crypto_key_detach(c->aead[k], &c->lock); in tipc_crypto_key_flush()
1240 atomic64_set(&c->sndnxt, 0); in tipc_crypto_key_flush()
1241 spin_unlock_bh(&c->lock); in tipc_crypto_key_flush()
1245 * tipc_crypto_key_try_align - Align RX keys if possible
1246 * @rx: RX crypto handle
1247 * @new_pending: new pending slot if aligned (= TX key from peer)
1257 static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending) in tipc_crypto_key_try_align() argument
1265 spin_lock(&rx->lock); in tipc_crypto_key_try_align()
1266 key = rx->key; in tipc_crypto_key_try_align()
1275 if (tipc_aead_users(rx->aead[key.pending]) > 0) in tipc_crypto_key_try_align()
1279 tmp1 = tipc_aead_rcu_ptr(rx->aead[key.pending], &rx->lock); in tipc_crypto_key_try_align()
1280 if (!refcount_dec_if_one(&tmp1->refcnt)) in tipc_crypto_key_try_align()
1282 rcu_assign_pointer(rx->aead[key.pending], NULL); in tipc_crypto_key_try_align()
1286 tmp2 = rcu_replace_pointer(rx->aead[key.passive], tmp2, lockdep_is_held(&rx->lock)); in tipc_crypto_key_try_align()
1287 x = (key.passive - key.pending + new_pending) % KEY_MAX; in tipc_crypto_key_try_align()
1291 /* Re-allocate the key(s) */ in tipc_crypto_key_try_align()
1292 tipc_crypto_key_set_state(rx, new_passive, 0, new_pending); in tipc_crypto_key_try_align()
1293 rcu_assign_pointer(rx->aead[new_pending], tmp1); in tipc_crypto_key_try_align()
1295 rcu_assign_pointer(rx->aead[new_passive], tmp2); in tipc_crypto_key_try_align()
1296 refcount_set(&tmp1->refcnt, 1); in tipc_crypto_key_try_align()
1298 pr_info_ratelimited("%s: key[%d] -> key[%d]\n", rx->name, key.pending, in tipc_crypto_key_try_align()
1302 spin_unlock(&rx->lock); in tipc_crypto_key_try_align()
1307 * tipc_crypto_key_pick_tx - Pick one TX key for message decryption
1308 * @tx: TX crypto handle
1309 * @rx: RX crypto handle (can be NULL)
1311 * @tx_key: peer TX key id
1313 * This function looks up the existing TX keys and pick one which is suitable
1317 * Return: the TX AEAD key handle in case of success, otherwise NULL
1319 static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx, in tipc_crypto_key_pick_tx() argument
1320 struct tipc_crypto *rx, in tipc_crypto_key_pick_tx() argument
1326 struct tipc_key key = tx->key; in tipc_crypto_key_pick_tx()
1330 if (!skb_cb->tx_clone_deferred) { in tipc_crypto_key_pick_tx()
1331 skb_cb->tx_clone_deferred = 1; in tipc_crypto_key_pick_tx()
1332 memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx)); in tipc_crypto_key_pick_tx()
1335 skb_cb->tx_clone_ctx.rx = rx; in tipc_crypto_key_pick_tx()
1336 if (++skb_cb->tx_clone_ctx.recurs > 2) in tipc_crypto_key_pick_tx()
1339 /* Pick one TX key */ in tipc_crypto_key_pick_tx()
1340 spin_lock(&tx->lock); in tipc_crypto_key_pick_tx()
1342 aead = tipc_aead_rcu_ptr(tx->aead[KEY_MASTER], &tx->lock); in tipc_crypto_key_pick_tx()
1350 aead = tipc_aead_rcu_ptr(tx->aead[k], &tx->lock); in tipc_crypto_key_pick_tx()
1353 if (aead->mode != CLUSTER_KEY || in tipc_crypto_key_pick_tx()
1354 aead == skb_cb->tx_clone_ctx.last) { in tipc_crypto_key_pick_tx()
1359 skb_cb->tx_clone_ctx.last = aead; in tipc_crypto_key_pick_tx()
1360 WARN_ON(skb->next); in tipc_crypto_key_pick_tx()
1361 skb->next = skb_clone(skb, GFP_ATOMIC); in tipc_crypto_key_pick_tx()
1362 if (unlikely(!skb->next)) in tipc_crypto_key_pick_tx()
1369 WARN_ON(!refcount_inc_not_zero(&aead->refcnt)); in tipc_crypto_key_pick_tx()
1370 spin_unlock(&tx->lock); in tipc_crypto_key_pick_tx()
1377 * @rx: RX crypto handle
1380 * This function updates the peer node related data as the peer RX active key
1381 * has changed, so the number of TX keys' users on this node are increased and
1388 * The "per-peer" sndnxt is also reset when the peer key has switched.
1390 static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb) in tipc_crypto_key_synch() argument
1393 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; in tipc_crypto_key_synch() local
1395 u32 self = tipc_own_addr(rx->net); in tipc_crypto_key_synch()
1399 /* Update RX 'key_master' flag according to peer, also mark "legacy" if in tipc_crypto_key_synch()
1402 rx->key_master = ehdr->master_key; in tipc_crypto_key_synch()
1403 if (!rx->key_master) in tipc_crypto_key_synch()
1404 tx->legacy_user = 1; in tipc_crypto_key_synch()
1407 if (!ehdr->destined || msg_short(hdr) || msg_destnode(hdr) != self) in tipc_crypto_key_synch()
1411 if (ehdr->rx_nokey) { in tipc_crypto_key_synch()
1413 tx->timer2 = jiffies; in tipc_crypto_key_synch()
1415 if (tx->key.keys && in tipc_crypto_key_synch()
1416 !atomic_cmpxchg(&rx->key_distr, 0, KEY_DISTR_SCHED)) { in tipc_crypto_key_synch()
1420 if (queue_delayed_work(tx->wq, &rx->work, delay)) in tipc_crypto_key_synch()
1421 tipc_node_get(rx->node); in tipc_crypto_key_synch()
1425 atomic_xchg(&rx->key_distr, 0); in tipc_crypto_key_synch()
1428 /* Case 2: Peer RX active key has changed, let's update own TX users */ in tipc_crypto_key_synch()
1429 cur = atomic_read(&rx->peer_rx_active); in tipc_crypto_key_synch()
1430 new = ehdr->rx_key_active; in tipc_crypto_key_synch()
1431 if (tx->key.keys && in tipc_crypto_key_synch()
1433 atomic_cmpxchg(&rx->peer_rx_active, cur, new) == cur) { in tipc_crypto_key_synch()
1435 tipc_aead_users_inc(tx->aead[new], INT_MAX); in tipc_crypto_key_synch()
1437 tipc_aead_users_dec(tx->aead[cur], 0); in tipc_crypto_key_synch()
1439 atomic64_set(&rx->sndnxt, 0); in tipc_crypto_key_synch()
1440 /* Mark the point TX key users changed */ in tipc_crypto_key_synch()
1441 tx->timer1 = jiffies; in tipc_crypto_key_synch()
1443 pr_debug("%s: key users changed %d-- %d++, peer %s\n", in tipc_crypto_key_synch()
1444 tx->name, cur, new, rx->name); in tipc_crypto_key_synch()
1450 struct tipc_crypto *tx = tipc_net(net)->crypto_tx; in tipc_crypto_key_revoke() local
1453 spin_lock_bh(&tx->lock); in tipc_crypto_key_revoke()
1454 key = tx->key; in tipc_crypto_key_revoke()
1458 tipc_crypto_key_set_state(tx, key.passive, 0, key.pending); in tipc_crypto_key_revoke()
1459 tipc_crypto_key_detach(tx->aead[key.active], &tx->lock); in tipc_crypto_key_revoke()
1460 spin_unlock_bh(&tx->lock); in tipc_crypto_key_revoke()
1462 pr_warn("%s: key is revoked\n", tx->name); in tipc_crypto_key_revoke()
1463 return -EKEYREVOKED; in tipc_crypto_key_revoke()
1472 return -EEXIST; in tipc_crypto_start()
1477 return -ENOMEM; in tipc_crypto_start()
1479 /* Allocate workqueue on TX */ in tipc_crypto_start()
1481 c->wq = alloc_ordered_workqueue("tipc_crypto", 0); in tipc_crypto_start()
1482 if (!c->wq) { in tipc_crypto_start()
1484 return -ENOMEM; in tipc_crypto_start()
1489 c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC); in tipc_crypto_start()
1490 if (!c->stats) { in tipc_crypto_start()
1491 if (c->wq) in tipc_crypto_start()
1492 destroy_workqueue(c->wq); in tipc_crypto_start()
1494 return -ENOMEM; in tipc_crypto_start()
1497 c->flags = 0; in tipc_crypto_start()
1498 c->net = net; in tipc_crypto_start()
1499 c->node = node; in tipc_crypto_start()
1500 get_random_bytes(&c->key_gen, 2); in tipc_crypto_start()
1502 atomic_set(&c->key_distr, 0); in tipc_crypto_start()
1503 atomic_set(&c->peer_rx_active, 0); in tipc_crypto_start()
1504 atomic64_set(&c->sndnxt, 0); in tipc_crypto_start()
1505 c->timer1 = jiffies; in tipc_crypto_start()
1506 c->timer2 = jiffies; in tipc_crypto_start()
1507 c->rekeying_intv = TIPC_REKEYING_INTV_DEF; in tipc_crypto_start()
1508 spin_lock_init(&c->lock); in tipc_crypto_start()
1509 scnprintf(c->name, 48, "%s(%s)", (is_rx(c)) ? "RX" : "TX", in tipc_crypto_start()
1510 (is_rx(c)) ? tipc_node_get_id_str(c->node) : in tipc_crypto_start()
1511 tipc_own_id_string(c->net)); in tipc_crypto_start()
1514 INIT_DELAYED_WORK(&c->work, tipc_crypto_work_rx); in tipc_crypto_start()
1516 INIT_DELAYED_WORK(&c->work, tipc_crypto_work_tx); in tipc_crypto_start()
1532 c->rekeying_intv = 0; in tipc_crypto_stop()
1533 cancel_delayed_work_sync(&c->work); in tipc_crypto_stop()
1534 destroy_workqueue(c->wq); in tipc_crypto_stop()
1540 tipc_aead_put(rcu_dereference(c->aead[k])); in tipc_crypto_stop()
1542 pr_debug("%s: has been stopped\n", c->name); in tipc_crypto_stop()
1545 free_percpu(c->stats); in tipc_crypto_stop()
1551 void tipc_crypto_timeout(struct tipc_crypto *rx) in tipc_crypto_timeout() argument
1553 struct tipc_net *tn = tipc_net(rx->net); in tipc_crypto_timeout()
1554 struct tipc_crypto *tx = tn->crypto_tx; in tipc_crypto_timeout() local
1558 /* TX pending: taking all users & stable -> active */ in tipc_crypto_timeout()
1559 spin_lock(&tx->lock); in tipc_crypto_timeout()
1560 key = tx->key; in tipc_crypto_timeout()
1561 if (key.active && tipc_aead_users(tx->aead[key.active]) > 0) in tipc_crypto_timeout()
1563 if (!key.pending || tipc_aead_users(tx->aead[key.pending]) <= 0) in tipc_crypto_timeout()
1565 if (time_before(jiffies, tx->timer1 + TIPC_TX_LASTING_TIME)) in tipc_crypto_timeout()
1568 tipc_crypto_key_set_state(tx, key.passive, key.pending, 0); in tipc_crypto_timeout()
1570 tipc_crypto_key_detach(tx->aead[key.active], &tx->lock); in tipc_crypto_timeout()
1571 this_cpu_inc(tx->stats->stat[STAT_SWITCHES]); in tipc_crypto_timeout()
1572 pr_info("%s: key[%d] is activated\n", tx->name, key.pending); in tipc_crypto_timeout()
1575 spin_unlock(&tx->lock); in tipc_crypto_timeout()
1577 /* RX pending: having user -> active */ in tipc_crypto_timeout()
1578 spin_lock(&rx->lock); in tipc_crypto_timeout()
1579 key = rx->key; in tipc_crypto_timeout()
1580 if (!key.pending || tipc_aead_users(rx->aead[key.pending]) <= 0) in tipc_crypto_timeout()
1586 rx->timer2 = jiffies; in tipc_crypto_timeout()
1587 tipc_crypto_key_set_state(rx, key.passive, key.active, 0); in tipc_crypto_timeout()
1588 this_cpu_inc(rx->stats->stat[STAT_SWITCHES]); in tipc_crypto_timeout()
1589 pr_info("%s: key[%d] is activated\n", rx->name, key.pending); in tipc_crypto_timeout()
1593 /* RX pending: not working -> remove */ in tipc_crypto_timeout()
1594 if (!key.pending || tipc_aead_users(rx->aead[key.pending]) > -10) in tipc_crypto_timeout()
1597 tipc_crypto_key_set_state(rx, key.passive, key.active, 0); in tipc_crypto_timeout()
1598 tipc_crypto_key_detach(rx->aead[key.pending], &rx->lock); in tipc_crypto_timeout()
1599 pr_debug("%s: key[%d] is removed\n", rx->name, key.pending); in tipc_crypto_timeout()
1603 /* RX active: timed out or no user -> pending */ in tipc_crypto_timeout()
1606 if (time_before(jiffies, rx->timer1 + TIPC_RX_ACTIVE_LIM) && in tipc_crypto_timeout()
1607 tipc_aead_users(rx->aead[key.active]) > 0) in tipc_crypto_timeout()
1614 rx->timer2 = jiffies; in tipc_crypto_timeout()
1615 tipc_crypto_key_set_state(rx, key.passive, 0, key.pending); in tipc_crypto_timeout()
1616 tipc_aead_users_set(rx->aead[key.pending], 0); in tipc_crypto_timeout()
1617 pr_debug("%s: key[%d] is deactivated\n", rx->name, key.active); in tipc_crypto_timeout()
1621 /* RX passive: outdated or not working -> free */ in tipc_crypto_timeout()
1624 if (time_before(jiffies, rx->timer2 + TIPC_RX_PASSIVE_LIM) && in tipc_crypto_timeout()
1625 tipc_aead_users(rx->aead[key.passive]) > -10) in tipc_crypto_timeout()
1628 tipc_crypto_key_set_state(rx, 0, key.active, key.pending); in tipc_crypto_timeout()
1629 tipc_crypto_key_detach(rx->aead[key.passive], &rx->lock); in tipc_crypto_timeout()
1630 pr_debug("%s: key[%d] is freed\n", rx->name, key.passive); in tipc_crypto_timeout()
1633 spin_unlock(&rx->lock); in tipc_crypto_timeout()
1638 if (time_after(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) in tipc_crypto_timeout()
1639 tx->legacy_user = 0; in tipc_crypto_timeout()
1647 tipc_crypto_do_cmd(rx->net, cmd); in tipc_crypto_timeout()
1659 TIPC_SKB_CB(skb)->xmit_type = type; in tipc_crypto_clone_msg()
1662 b->media->send_msg(net, skb, b, dst); in tipc_crypto_clone_msg()
1667 * tipc_crypto_xmit - Build & encrypt TIPC message for xmit
1683 * * -EINPROGRESS/-EBUSY : the encryption is ongoing, a callback will be made
1684 * * -ENOKEK : the encryption has failed due to no key
1685 * * -EKEYREVOKED : the encryption has failed due to key revoked
1686 * * -ENOMEM : the encryption has failed due to no memory
1694 struct tipc_crypto *tx = tipc_net(net)->crypto_tx; in tipc_crypto_xmit() local
1695 struct tipc_crypto_stats __percpu *stats = tx->stats; in tipc_crypto_xmit()
1697 struct tipc_key key = tx->key; in tipc_crypto_xmit()
1701 int rc = -ENOKEY; in tipc_crypto_xmit()
1705 if (!tx->working) in tipc_crypto_xmit()
1711 if (!tx->key_master && !key.active) in tipc_crypto_xmit()
1713 if (__rx && atomic_read(&__rx->peer_rx_active) == tx_key) in tipc_crypto_xmit()
1715 if (TIPC_SKB_CB(*skb)->xmit_type == SKB_PROBING) { in tipc_crypto_xmit()
1716 pr_debug("%s: probing for key[%d]\n", tx->name, in tipc_crypto_xmit()
1726 if (tx->key_master) { in tipc_crypto_xmit()
1730 if (TIPC_SKB_CB(*skb)->xmit_type == SKB_GRACING) { in tipc_crypto_xmit()
1731 pr_debug("%s: gracing for msg (%d %d)\n", tx->name, in tipc_crypto_xmit()
1738 time_before(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) { in tipc_crypto_xmit()
1739 if (__rx && __rx->key_master && in tipc_crypto_xmit()
1740 !atomic_read(&__rx->peer_rx_active)) in tipc_crypto_xmit()
1743 if (likely(!tx->legacy_user)) in tipc_crypto_xmit()
1760 aead = tipc_aead_get(tx->aead[tx_key]); in tipc_crypto_xmit()
1770 this_cpu_inc(stats->stat[STAT_OK]); in tipc_crypto_xmit()
1772 case -EINPROGRESS: in tipc_crypto_xmit()
1773 case -EBUSY: in tipc_crypto_xmit()
1774 this_cpu_inc(stats->stat[STAT_ASYNC]); in tipc_crypto_xmit()
1778 this_cpu_inc(stats->stat[STAT_NOK]); in tipc_crypto_xmit()
1779 if (rc == -ENOKEY) in tipc_crypto_xmit()
1780 this_cpu_inc(stats->stat[STAT_NOKEYS]); in tipc_crypto_xmit()
1781 else if (rc == -EKEYREVOKED) in tipc_crypto_xmit()
1782 this_cpu_inc(stats->stat[STAT_BADKEYS]); in tipc_crypto_xmit()
1793 * tipc_crypto_rcv - Decrypt an encrypted TIPC message from peer
1795 * @rx: RX crypto handle
1803 * Note: RX key(s) can be re-aligned, or in case of no key suitable, TX
1804 * cluster key(s) can be taken for decryption (- recursive).
1808 * * -EINPROGRESS/-EBUSY : the decryption is ongoing, a callback will be made
1809 * * -ENOKEY : the decryption has failed due to no key
1810 * * -EBADMSG : the decryption has failed due to bad message
1811 * * -ENOMEM : the decryption has failed due to no memory
1814 int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx, in tipc_crypto_rcv() argument
1817 struct tipc_crypto *tx = tipc_net(net)->crypto_tx; in tipc_crypto_rcv() local
1821 int rc = -ENOKEY; in tipc_crypto_rcv()
1824 tx_key = ((struct tipc_ehdr *)(*skb)->data)->tx_key; in tipc_crypto_rcv()
1827 * Let's try with TX key (i.e. cluster mode) & verify the skb first! in tipc_crypto_rcv()
1829 if (unlikely(!rx || tx_key == KEY_MASTER)) in tipc_crypto_rcv()
1832 /* Pick RX key according to TX key if any */ in tipc_crypto_rcv()
1833 key = rx->key; in tipc_crypto_rcv()
1838 /* Unknown key, let's try to align RX key(s) */ in tipc_crypto_rcv()
1839 if (tipc_crypto_key_try_align(rx, tx_key)) in tipc_crypto_rcv()
1843 /* No key suitable? Try to pick one from TX... */ in tipc_crypto_rcv()
1844 aead = tipc_crypto_key_pick_tx(tx, rx, *skb, tx_key); in tipc_crypto_rcv()
1852 aead = tipc_aead_get(rx->aead[tx_key]); in tipc_crypto_rcv()
1857 stats = ((rx) ?: tx)->stats; in tipc_crypto_rcv()
1860 this_cpu_inc(stats->stat[STAT_OK]); in tipc_crypto_rcv()
1862 case -EINPROGRESS: in tipc_crypto_rcv()
1863 case -EBUSY: in tipc_crypto_rcv()
1864 this_cpu_inc(stats->stat[STAT_ASYNC]); in tipc_crypto_rcv()
1868 this_cpu_inc(stats->stat[STAT_NOK]); in tipc_crypto_rcv()
1869 if (rc == -ENOKEY) { in tipc_crypto_rcv()
1872 if (rx) { in tipc_crypto_rcv()
1873 /* Mark rx->nokey only if we dont have a in tipc_crypto_rcv()
1878 rx->nokey = !(rx->skey || in tipc_crypto_rcv()
1879 rcu_access_pointer(rx->aead[n])); in tipc_crypto_rcv()
1881 rx->name, rx->nokey, in tipc_crypto_rcv()
1882 tx_key, rx->key.keys); in tipc_crypto_rcv()
1883 tipc_node_put(rx->node); in tipc_crypto_rcv()
1885 this_cpu_inc(stats->stat[STAT_NOKEYS]); in tipc_crypto_rcv()
1887 } else if (rc == -EBADMSG) { in tipc_crypto_rcv()
1888 this_cpu_inc(stats->stat[STAT_BADMSGS]); in tipc_crypto_rcv()
1902 struct tipc_crypto *rx = aead->crypto; in tipc_crypto_rcv_complete() local
1907 /* Is this completed by TX? */ in tipc_crypto_rcv_complete()
1908 if (unlikely(is_tx(aead->crypto))) { in tipc_crypto_rcv_complete()
1909 rx = skb_cb->tx_clone_ctx.rx; in tipc_crypto_rcv_complete()
1910 pr_debug("TX->RX(%s): err %d, aead %p, skb->next %p, flags %x\n", in tipc_crypto_rcv_complete()
1911 (rx) ? tipc_node_get_id_str(rx->node) : "-", err, aead, in tipc_crypto_rcv_complete()
1912 (*skb)->next, skb_cb->flags); in tipc_crypto_rcv_complete()
1913 pr_debug("skb_cb [recurs %d, last %p], tx->aead [%p %p %p]\n", in tipc_crypto_rcv_complete()
1914 skb_cb->tx_clone_ctx.recurs, skb_cb->tx_clone_ctx.last, in tipc_crypto_rcv_complete()
1915 aead->crypto->aead[1], aead->crypto->aead[2], in tipc_crypto_rcv_complete()
1916 aead->crypto->aead[3]); in tipc_crypto_rcv_complete()
1918 if (err == -EBADMSG && (*skb)->next) in tipc_crypto_rcv_complete()
1919 tipc_rcv(net, (*skb)->next, b); in tipc_crypto_rcv_complete()
1923 if (likely((*skb)->next)) { in tipc_crypto_rcv_complete()
1924 kfree_skb((*skb)->next); in tipc_crypto_rcv_complete()
1925 (*skb)->next = NULL; in tipc_crypto_rcv_complete()
1927 ehdr = (struct tipc_ehdr *)(*skb)->data; in tipc_crypto_rcv_complete()
1928 if (!rx) { in tipc_crypto_rcv_complete()
1929 WARN_ON(ehdr->user != LINK_CONFIG); in tipc_crypto_rcv_complete()
1930 n = tipc_node_create(net, 0, ehdr->id, 0xffffu, 0, in tipc_crypto_rcv_complete()
1932 rx = tipc_node_crypto_rx(n); in tipc_crypto_rcv_complete()
1933 if (unlikely(!rx)) in tipc_crypto_rcv_complete()
1937 /* Ignore cloning if it was TX master key */ in tipc_crypto_rcv_complete()
1938 if (ehdr->tx_key == KEY_MASTER) in tipc_crypto_rcv_complete()
1942 WARN_ON(!refcount_inc_not_zero(&tmp->refcnt)); in tipc_crypto_rcv_complete()
1943 if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key, false) < 0) { in tipc_crypto_rcv_complete()
1944 tipc_aead_free(&tmp->rcu); in tipc_crypto_rcv_complete()
1956 /* Set the RX key's user */ in tipc_crypto_rcv_complete()
1959 /* Mark this point, RX works */ in tipc_crypto_rcv_complete()
1960 rx->timer1 = jiffies; in tipc_crypto_rcv_complete()
1964 ehdr = (struct tipc_ehdr *)(*skb)->data; in tipc_crypto_rcv_complete()
1966 /* Mark this point, RX passive still works */ in tipc_crypto_rcv_complete()
1967 if (rx->key.passive && ehdr->tx_key == rx->key.passive) in tipc_crypto_rcv_complete()
1968 rx->timer2 = jiffies; in tipc_crypto_rcv_complete()
1972 if (pskb_trim(*skb, (*skb)->len - aead->authsize)) in tipc_crypto_rcv_complete()
1982 tipc_crypto_key_synch(rx, *skb); in tipc_crypto_rcv_complete()
1984 /* Re-fetch skb cb as skb might be changed in tipc_msg_validate */ in tipc_crypto_rcv_complete()
1988 skb_cb->decrypted = 1; in tipc_crypto_rcv_complete()
1991 if (likely(!skb_cb->tx_clone_deferred)) in tipc_crypto_rcv_complete()
1993 skb_cb->tx_clone_deferred = 0; in tipc_crypto_rcv_complete()
1994 memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx)); in tipc_crypto_rcv_complete()
2003 if (rx) in tipc_crypto_rcv_complete()
2004 tipc_node_put(rx->node); in tipc_crypto_rcv_complete()
2010 struct tipc_crypto *tx = tn->crypto_tx, *rx; in tipc_crypto_do_cmd() local
2030 pr_info("TX(%7.7s)\n%s", tipc_own_id_string(net), in tipc_crypto_do_cmd()
2031 tipc_crypto_key_dump(tx, buf)); in tipc_crypto_do_cmd()
2034 for (p = tn->node_list.next; p != &tn->node_list; p = p->next) { in tipc_crypto_do_cmd()
2035 rx = tipc_node_crypto_rx_by_list(p); in tipc_crypto_do_cmd()
2036 pr_info("RX(%7.7s)\n%s", tipc_node_get_id_str(rx->node), in tipc_crypto_do_cmd()
2037 tipc_crypto_key_dump(rx, buf)); in tipc_crypto_do_cmd()
2043 j += scnprintf(buf + j, 200 - j, "|%11s ", hstats[i]); in tipc_crypto_do_cmd()
2046 memset(buf, '-', 115); in tipc_crypto_do_cmd()
2050 j = scnprintf(buf, 200, "TX(%7.7s) ", tipc_own_id_string(net)); in tipc_crypto_do_cmd()
2053 stat = per_cpu_ptr(tx->stats, cpu)->stat[i]; in tipc_crypto_do_cmd()
2054 j += scnprintf(buf + j, 200 - j, "|%11d ", stat); in tipc_crypto_do_cmd()
2061 for (p = tn->node_list.next; p != &tn->node_list; p = p->next) { in tipc_crypto_do_cmd()
2062 rx = tipc_node_crypto_rx_by_list(p); in tipc_crypto_do_cmd()
2063 j = scnprintf(buf, 200, "RX(%7.7s) ", in tipc_crypto_do_cmd()
2064 tipc_node_get_id_str(rx->node)); in tipc_crypto_do_cmd()
2067 stat = per_cpu_ptr(rx->stats, cpu)->stat[i]; in tipc_crypto_do_cmd()
2068 j += scnprintf(buf + j, 200 - j, "|%11d ", in tipc_crypto_do_cmd()
2082 struct tipc_key key = c->key; in tipc_crypto_key_dump()
2092 c->timer2 + TIPC_TX_GRACE_PERIOD)) in tipc_crypto_key_dump()
2104 s = "-"; in tipc_crypto_key_dump()
2106 i += scnprintf(buf + i, 200 - i, "\tKey%d: %s", k, s); in tipc_crypto_key_dump()
2109 aead = rcu_dereference(c->aead[k]); in tipc_crypto_key_dump()
2111 i += scnprintf(buf + i, 200 - i, in tipc_crypto_key_dump()
2113 aead->hint, in tipc_crypto_key_dump()
2114 (aead->mode == CLUSTER_KEY) ? "c" : "p", in tipc_crypto_key_dump()
2115 atomic_read(&aead->users), in tipc_crypto_key_dump()
2116 refcount_read(&aead->refcnt)); in tipc_crypto_key_dump()
2118 i += scnprintf(buf + i, 200 - i, "\n"); in tipc_crypto_key_dump()
2122 i += scnprintf(buf + i, 200 - i, "\tPeer RX active: %d\n", in tipc_crypto_key_dump()
2123 atomic_read(&c->peer_rx_active)); in tipc_crypto_key_dump()
2135 /* Output format: "[%s %s %s] -> [%s %s %s]", max len = 32 */ in tipc_key_change_dump()
2137 i += scnprintf(buf + i, 32 - i, "["); in tipc_key_change_dump()
2139 if (k == key->passive) in tipc_key_change_dump()
2141 else if (k == key->active) in tipc_key_change_dump()
2143 else if (k == key->pending) in tipc_key_change_dump()
2146 s = "-"; in tipc_key_change_dump()
2147 i += scnprintf(buf + i, 32 - i, in tipc_key_change_dump()
2151 i += scnprintf(buf + i, 32 - i, "] -> "); in tipc_key_change_dump()
2155 i += scnprintf(buf + i, 32 - i, "]"); in tipc_key_change_dump()
2160 * tipc_crypto_msg_rcv - Common 'MSG_CRYPTO' processing point
2166 struct tipc_crypto *rx; in tipc_crypto_msg_rcv() local
2173 rx = tipc_node_crypto_rx_by_addr(net, msg_prevnode(hdr)); in tipc_crypto_msg_rcv()
2174 if (unlikely(!rx)) in tipc_crypto_msg_rcv()
2179 if (tipc_crypto_key_rcv(rx, hdr)) in tipc_crypto_msg_rcv()
2186 tipc_node_put(rx->node); in tipc_crypto_msg_rcv()
2193 * tipc_crypto_key_distr - Distribute a TX key
2194 * @tx: the TX crypto
2200 int tipc_crypto_key_distr(struct tipc_crypto *tx, u8 key, in tipc_crypto_key_distr() argument
2205 int rc = -ENOKEY; in tipc_crypto_key_distr()
2212 aead = tipc_aead_get(tx->aead[key]); in tipc_crypto_key_distr()
2214 rc = tipc_crypto_key_xmit(tx->net, aead->key, in tipc_crypto_key_distr()
2215 aead->gen, aead->mode, in tipc_crypto_key_distr()
2226 * tipc_crypto_key_xmit - Send a session key
2234 * as its data section, then xmit-ed through the uc/bc link.
2251 return -ENOMEM; in tipc_crypto_key_xmit()
2261 *((__be32 *)(data + TIPC_AEAD_ALG_NAME)) = htonl(skey->keylen); in tipc_crypto_key_xmit()
2262 memcpy(data, skey->alg_name, TIPC_AEAD_ALG_NAME); in tipc_crypto_key_xmit()
2263 memcpy(data + TIPC_AEAD_ALG_NAME + sizeof(__be32), skey->key, in tipc_crypto_key_xmit()
2264 skey->keylen); in tipc_crypto_key_xmit()
2277 * tipc_crypto_key_rcv - Receive a session key
2278 * @rx: the RX crypto
2282 * schedules a RX work to attach the key to the corresponding RX crypto.
2287 static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr) in tipc_crypto_key_rcv() argument
2289 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; in tipc_crypto_key_rcv() local
2298 pr_debug("%s: message data size is too small\n", rx->name); in tipc_crypto_key_rcv()
2307 pr_debug("%s: invalid MSG_CRYPTO key size\n", rx->name); in tipc_crypto_key_rcv()
2311 spin_lock(&rx->lock); in tipc_crypto_key_rcv()
2312 if (unlikely(rx->skey || (key_gen == rx->key_gen && rx->key.keys))) { in tipc_crypto_key_rcv()
2313 pr_err("%s: key existed <%p>, gen %d vs %d\n", rx->name, in tipc_crypto_key_rcv()
2314 rx->skey, key_gen, rx->key_gen); in tipc_crypto_key_rcv()
2321 pr_err("%s: unable to allocate memory for skey\n", rx->name); in tipc_crypto_key_rcv()
2326 skey->keylen = keylen; in tipc_crypto_key_rcv()
2327 memcpy(skey->alg_name, data, TIPC_AEAD_ALG_NAME); in tipc_crypto_key_rcv()
2328 memcpy(skey->key, data + TIPC_AEAD_ALG_NAME + sizeof(__be32), in tipc_crypto_key_rcv()
2329 skey->keylen); in tipc_crypto_key_rcv()
2331 rx->key_gen = key_gen; in tipc_crypto_key_rcv()
2332 rx->skey_mode = msg_key_mode(hdr); in tipc_crypto_key_rcv()
2333 rx->skey = skey; in tipc_crypto_key_rcv()
2334 rx->nokey = 0; in tipc_crypto_key_rcv()
2338 spin_unlock(&rx->lock); in tipc_crypto_key_rcv()
2342 if (likely(skey && queue_delayed_work(tx->wq, &rx->work, 0))) in tipc_crypto_key_rcv()
2349 * tipc_crypto_work_rx - Scheduled RX works handler
2350 * @work: the struct RX work
2352 * The function processes the previous scheduled works i.e. distributing TX key
2353 * or attaching a received session key on RX crypto.
2358 struct tipc_crypto *rx = container_of(dwork, struct tipc_crypto, work); in tipc_crypto_work_rx() local
2359 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; in tipc_crypto_work_rx() local
2365 /* Case 1: Distribute TX key to peer if scheduled */ in tipc_crypto_work_rx()
2366 if (atomic_cmpxchg(&rx->key_distr, in tipc_crypto_work_rx()
2370 key = tx->key.pending ?: tx->key.active; in tipc_crypto_work_rx()
2371 rc = tipc_crypto_key_distr(tx, key, rx->node); in tipc_crypto_work_rx()
2374 tx->name, key, tipc_node_get_id_str(rx->node), in tipc_crypto_work_rx()
2380 atomic_cmpxchg(&rx->key_distr, KEY_DISTR_COMPL, 0); in tipc_crypto_work_rx()
2384 if (rx->skey) { in tipc_crypto_work_rx()
2385 rc = tipc_crypto_key_init(rx, rx->skey, rx->skey_mode, false); in tipc_crypto_work_rx()
2388 rx->name, rc); in tipc_crypto_work_rx()
2390 case -EBUSY: in tipc_crypto_work_rx()
2391 case -ENOMEM: in tipc_crypto_work_rx()
2397 kfree(rx->skey); in tipc_crypto_work_rx()
2398 rx->skey = NULL; in tipc_crypto_work_rx()
2403 if (resched && queue_delayed_work(tx->wq, &rx->work, delay)) in tipc_crypto_work_rx()
2406 tipc_node_put(rx->node); in tipc_crypto_work_rx()
2410 * tipc_crypto_rekeying_sched - (Re)schedule rekeying w/o new interval
2411 * @tx: TX crypto
2415 void tipc_crypto_rekeying_sched(struct tipc_crypto *tx, bool changed, in tipc_crypto_rekeying_sched() argument
2425 tx->rekeying_intv = new_intv; in tipc_crypto_rekeying_sched()
2426 cancel_delayed_work_sync(&tx->work); in tipc_crypto_rekeying_sched()
2429 if (tx->rekeying_intv || now) { in tipc_crypto_rekeying_sched()
2430 delay = (now) ? 0 : tx->rekeying_intv * 60 * 1000; in tipc_crypto_rekeying_sched()
2431 queue_delayed_work(tx->wq, &tx->work, msecs_to_jiffies(delay)); in tipc_crypto_rekeying_sched()
2436 * tipc_crypto_work_tx - Scheduled TX works handler
2437 * @work: the struct TX work
2441 * TX crypto and finally distributing it to peers. It also re-schedules the
2447 struct tipc_crypto *tx = container_of(dwork, struct tipc_crypto, work); in tipc_crypto_work_tx() local
2449 struct tipc_key key = tx->key; in tipc_crypto_work_tx()
2451 int rc = -ENOMEM; in tipc_crypto_work_tx()
2458 aead = rcu_dereference(tx->aead[key.active ?: KEY_MASTER]); in tipc_crypto_work_tx()
2466 skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC); in tipc_crypto_work_tx()
2472 tipc_crypto_key_init(tx, skey, PER_NODE_KEY, false); in tipc_crypto_work_tx()
2474 rc = tipc_crypto_key_distr(tx, rc, NULL); in tipc_crypto_work_tx()
2479 pr_warn_ratelimited("%s: rekeying returns %d\n", tx->name, rc); in tipc_crypto_work_tx()
2482 /* Re-schedule rekeying if any */ in tipc_crypto_work_tx()
2483 tipc_crypto_rekeying_sched(tx, false, 0); in tipc_crypto_work_tx()