1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * drivers/net/macsec.c - MACsec device
4 *
5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
6 */
7
8 #include <linux/types.h>
9 #include <linux/skbuff.h>
10 #include <linux/socket.h>
11 #include <linux/module.h>
12 #include <crypto/aead.h>
13 #include <linux/etherdevice.h>
14 #include <linux/netdevice.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/refcount.h>
17 #include <net/genetlink.h>
18 #include <net/sock.h>
19 #include <net/gro_cells.h>
20 #include <net/macsec.h>
21 #include <net/dst_metadata.h>
22 #include <net/netdev_lock.h>
23 #include <linux/phy.h>
24 #include <linux/byteorder/generic.h>
25 #include <linux/if_arp.h>
26
27 #include <uapi/linux/if_macsec.h>
28
29 static struct workqueue_struct *macsec_wq;
30
31 /* SecTAG length = macsec_eth_header without the optional SCI */
32 #define MACSEC_TAG_LEN 6
33
34 struct macsec_eth_header {
35 struct ethhdr eth;
36 /* SecTAG */
37 u8 tci_an;
38 #if defined(__LITTLE_ENDIAN_BITFIELD)
39 u8 short_length:6,
40 unused:2;
41 #elif defined(__BIG_ENDIAN_BITFIELD)
42 u8 unused:2,
43 short_length:6;
44 #else
45 #error "Please fix <asm/byteorder.h>"
46 #endif
47 __be32 packet_number;
48 u8 secure_channel_id[8]; /* optional */
49 } __packed;
50
51 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
52 #define MIN_NON_SHORT_LEN 48
53
54 #define GCM_AES_IV_LEN 12
55
56 #define for_each_rxsc(secy, sc) \
57 for (sc = rcu_dereference_bh(secy->rx_sc); \
58 sc; \
59 sc = rcu_dereference_bh(sc->next))
60 #define for_each_rxsc_rtnl(secy, sc) \
61 for (sc = rtnl_dereference(secy->rx_sc); \
62 sc; \
63 sc = rtnl_dereference(sc->next))
64
65 #define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
66
67 struct gcm_iv_xpn {
68 union {
69 u8 short_secure_channel_id[4];
70 ssci_t ssci;
71 };
72 __be64 pn;
73 } __packed;
74
75 struct gcm_iv {
76 union {
77 u8 secure_channel_id[8];
78 sci_t sci;
79 };
80 __be32 pn;
81 };
82
83 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
84
85 struct pcpu_secy_stats {
86 struct macsec_dev_stats stats;
87 struct u64_stats_sync syncp;
88 };
89
90 /**
91 * struct macsec_dev - private data
92 * @secy: SecY config
93 * @real_dev: pointer to underlying netdevice
94 * @dev_tracker: refcount tracker for @real_dev reference
95 * @stats: MACsec device stats
96 * @secys: linked list of SecY's on the underlying device
97 * @gro_cells: pointer to the Generic Receive Offload cell
98 * @offload: status of offloading on the MACsec device
99 * @insert_tx_tag: when offloading, device requires to insert an
100 * additional tag
101 */
102 struct macsec_dev {
103 struct macsec_secy secy;
104 struct net_device *real_dev;
105 netdevice_tracker dev_tracker;
106 struct pcpu_secy_stats __percpu *stats;
107 struct list_head secys;
108 struct gro_cells gro_cells;
109 enum macsec_offload offload;
110 bool insert_tx_tag;
111 };
112
113 /**
114 * struct macsec_rxh_data - rx_handler private argument
115 * @secys: linked list of SecY's on this underlying device
116 */
117 struct macsec_rxh_data {
118 struct list_head secys;
119 };
120
macsec_priv(const struct net_device * dev)121 static struct macsec_dev *macsec_priv(const struct net_device *dev)
122 {
123 return (struct macsec_dev *)netdev_priv(dev);
124 }
125
macsec_data_rcu(const struct net_device * dev)126 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev)
127 {
128 return rcu_dereference_bh(dev->rx_handler_data);
129 }
130
macsec_data_rtnl(const struct net_device * dev)131 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev)
132 {
133 return rtnl_dereference(dev->rx_handler_data);
134 }
135
136 struct macsec_cb {
137 struct aead_request *req;
138 union {
139 struct macsec_tx_sa *tx_sa;
140 struct macsec_rx_sa *rx_sa;
141 };
142 u8 assoc_num;
143 bool valid;
144 bool has_sci;
145 };
146
macsec_rxsa_get(struct macsec_rx_sa __rcu * ptr)147 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
148 {
149 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr);
150
151 if (!sa || !sa->active)
152 return NULL;
153
154 if (!refcount_inc_not_zero(&sa->refcnt))
155 return NULL;
156
157 return sa;
158 }
159
free_rx_sc_rcu(struct rcu_head * head)160 static void free_rx_sc_rcu(struct rcu_head *head)
161 {
162 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
163
164 free_percpu(rx_sc->stats);
165 kfree(rx_sc);
166 }
167
macsec_rxsc_get(struct macsec_rx_sc * sc)168 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
169 {
170 return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL;
171 }
172
macsec_rxsc_put(struct macsec_rx_sc * sc)173 static void macsec_rxsc_put(struct macsec_rx_sc *sc)
174 {
175 if (refcount_dec_and_test(&sc->refcnt))
176 call_rcu(&sc->rcu_head, free_rx_sc_rcu);
177 }
178
free_rxsa_work(struct work_struct * work)179 static void free_rxsa_work(struct work_struct *work)
180 {
181 struct macsec_rx_sa *sa =
182 container_of(to_rcu_work(work), struct macsec_rx_sa, destroy_work);
183
184 crypto_free_aead(sa->key.tfm);
185 free_percpu(sa->stats);
186 kfree(sa);
187 }
188
macsec_rxsa_put(struct macsec_rx_sa * sa)189 static void macsec_rxsa_put(struct macsec_rx_sa *sa)
190 {
191 if (refcount_dec_and_test(&sa->refcnt))
192 queue_rcu_work(macsec_wq, &sa->destroy_work);
193 }
194
macsec_txsa_get(struct macsec_tx_sa __rcu * ptr)195 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
196 {
197 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr);
198
199 if (!sa || !sa->active)
200 return NULL;
201
202 if (!refcount_inc_not_zero(&sa->refcnt))
203 return NULL;
204
205 return sa;
206 }
207
free_txsa_work(struct work_struct * work)208 static void free_txsa_work(struct work_struct *work)
209 {
210 struct macsec_tx_sa *sa =
211 container_of(to_rcu_work(work), struct macsec_tx_sa, destroy_work);
212
213 crypto_free_aead(sa->key.tfm);
214 free_percpu(sa->stats);
215 kfree(sa);
216 }
217
macsec_txsa_put(struct macsec_tx_sa * sa)218 static void macsec_txsa_put(struct macsec_tx_sa *sa)
219 {
220 if (refcount_dec_and_test(&sa->refcnt))
221 queue_rcu_work(macsec_wq, &sa->destroy_work);
222 }
223
macsec_skb_cb(struct sk_buff * skb)224 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
225 {
226 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
227 return (struct macsec_cb *)skb->cb;
228 }
229
230 #define MACSEC_PORT_SCB (0x0000)
231 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
232 #define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff)
233
234 #define MACSEC_GCM_AES_128_SAK_LEN 16
235 #define MACSEC_GCM_AES_256_SAK_LEN 32
236
237 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
238 #define DEFAULT_XPN false
239 #define DEFAULT_SEND_SCI true
240 #define DEFAULT_ENCRYPT false
241 #define DEFAULT_ENCODING_SA 0
242 #define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1))
243
make_sci(const u8 * addr,__be16 port)244 static sci_t make_sci(const u8 *addr, __be16 port)
245 {
246 sci_t sci;
247
248 memcpy(&sci, addr, ETH_ALEN);
249 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port));
250
251 return sci;
252 }
253
macsec_active_sci(struct macsec_secy * secy)254 static sci_t macsec_active_sci(struct macsec_secy *secy)
255 {
256 struct macsec_rx_sc *rx_sc = rcu_dereference_bh(secy->rx_sc);
257
258 /* Case single RX SC */
259 if (rx_sc && !rcu_dereference_bh(rx_sc->next))
260 return (rx_sc->active) ? rx_sc->sci : 0;
261 /* Case no RX SC or multiple */
262 else
263 return 0;
264 }
265
macsec_frame_sci(struct macsec_eth_header * hdr,bool sci_present,struct macsec_rxh_data * rxd)266 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present,
267 struct macsec_rxh_data *rxd)
268 {
269 struct macsec_dev *macsec;
270 sci_t sci = 0;
271
272 /* SC = 1 */
273 if (sci_present) {
274 memcpy(&sci, hdr->secure_channel_id,
275 sizeof(hdr->secure_channel_id));
276 /* SC = 0; ES = 0 */
277 } else if ((!(hdr->tci_an & (MACSEC_TCI_ES | MACSEC_TCI_SC))) &&
278 (list_is_singular(&rxd->secys))) {
279 /* Only one SECY should exist on this scenario */
280 macsec = list_first_or_null_rcu(&rxd->secys, struct macsec_dev,
281 secys);
282 if (macsec)
283 return macsec_active_sci(&macsec->secy);
284 } else {
285 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
286 }
287
288 return sci;
289 }
290
macsec_sectag_len(bool sci_present)291 static unsigned int macsec_sectag_len(bool sci_present)
292 {
293 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
294 }
295
macsec_hdr_len(bool sci_present)296 static unsigned int macsec_hdr_len(bool sci_present)
297 {
298 return macsec_sectag_len(sci_present) + ETH_HLEN;
299 }
300
macsec_extra_len(bool sci_present)301 static unsigned int macsec_extra_len(bool sci_present)
302 {
303 return macsec_sectag_len(sci_present) + sizeof(__be16);
304 }
305
306 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
macsec_fill_sectag(struct macsec_eth_header * h,const struct macsec_secy * secy,u32 pn,bool sci_present)307 static void macsec_fill_sectag(struct macsec_eth_header *h,
308 const struct macsec_secy *secy, u32 pn,
309 bool sci_present)
310 {
311 const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
312
313 memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
314 h->eth.h_proto = htons(ETH_P_MACSEC);
315
316 if (sci_present) {
317 h->tci_an |= MACSEC_TCI_SC;
318 memcpy(&h->secure_channel_id, &secy->sci,
319 sizeof(h->secure_channel_id));
320 } else {
321 if (tx_sc->end_station)
322 h->tci_an |= MACSEC_TCI_ES;
323 if (tx_sc->scb)
324 h->tci_an |= MACSEC_TCI_SCB;
325 }
326
327 h->packet_number = htonl(pn);
328
329 /* with GCM, C/E clear for !encrypt, both set for encrypt */
330 if (tx_sc->encrypt)
331 h->tci_an |= MACSEC_TCI_CONFID;
332 else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
333 h->tci_an |= MACSEC_TCI_C;
334
335 h->tci_an |= tx_sc->encoding_sa;
336 }
337
macsec_set_shortlen(struct macsec_eth_header * h,size_t data_len)338 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
339 {
340 if (data_len < MIN_NON_SHORT_LEN)
341 h->short_length = data_len;
342 }
343
344 /* Checks if a MACsec interface is being offloaded to an hardware engine */
macsec_is_offloaded(struct macsec_dev * macsec)345 static bool macsec_is_offloaded(struct macsec_dev *macsec)
346 {
347 if (macsec->offload == MACSEC_OFFLOAD_MAC ||
348 macsec->offload == MACSEC_OFFLOAD_PHY)
349 return true;
350
351 return false;
352 }
353
354 /* Checks if underlying layers implement MACsec offloading functions. */
macsec_check_offload(enum macsec_offload offload,struct macsec_dev * macsec)355 static bool macsec_check_offload(enum macsec_offload offload,
356 struct macsec_dev *macsec)
357 {
358 if (!macsec || !macsec->real_dev)
359 return false;
360
361 if (offload == MACSEC_OFFLOAD_PHY)
362 return macsec->real_dev->phydev &&
363 macsec->real_dev->phydev->macsec_ops;
364 else if (offload == MACSEC_OFFLOAD_MAC)
365 return macsec->real_dev->features & NETIF_F_HW_MACSEC &&
366 macsec->real_dev->macsec_ops;
367
368 return false;
369 }
370
__macsec_get_ops(enum macsec_offload offload,struct macsec_dev * macsec,struct macsec_context * ctx)371 static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
372 struct macsec_dev *macsec,
373 struct macsec_context *ctx)
374 {
375 if (ctx) {
376 memset(ctx, 0, sizeof(*ctx));
377 ctx->offload = offload;
378
379 if (offload == MACSEC_OFFLOAD_PHY)
380 ctx->phydev = macsec->real_dev->phydev;
381 else if (offload == MACSEC_OFFLOAD_MAC)
382 ctx->netdev = macsec->real_dev;
383 }
384
385 if (offload == MACSEC_OFFLOAD_PHY)
386 return macsec->real_dev->phydev->macsec_ops;
387 else
388 return macsec->real_dev->macsec_ops;
389 }
390
391 /* Returns a pointer to the MACsec ops struct if any and updates the MACsec
392 * context device reference if provided.
393 */
macsec_get_ops(struct macsec_dev * macsec,struct macsec_context * ctx)394 static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
395 struct macsec_context *ctx)
396 {
397 if (!macsec_check_offload(macsec->offload, macsec))
398 return NULL;
399
400 return __macsec_get_ops(macsec->offload, macsec, ctx);
401 }
402
403 /* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
macsec_validate_skb(struct sk_buff * skb,u16 icv_len,bool xpn)404 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn)
405 {
406 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
407 int len = skb->len - 2 * ETH_ALEN;
408 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len;
409
410 /* a) It comprises at least 17 octets */
411 if (skb->len <= 16)
412 return false;
413
414 /* b) MACsec EtherType: already checked */
415
416 /* c) V bit is clear */
417 if (h->tci_an & MACSEC_TCI_VERSION)
418 return false;
419
420 /* d) ES or SCB => !SC */
421 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) &&
422 (h->tci_an & MACSEC_TCI_SC))
423 return false;
424
425 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
426 if (h->unused)
427 return false;
428
429 /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
430 if (!h->packet_number && !xpn)
431 return false;
432
433 /* length check, f) g) h) i) */
434 if (h->short_length)
435 return len == extra_len + h->short_length;
436 return len >= extra_len + MIN_NON_SHORT_LEN;
437 }
438
439 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
440 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
441
macsec_fill_iv_xpn(unsigned char * iv,ssci_t ssci,u64 pn,salt_t salt)442 static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn,
443 salt_t salt)
444 {
445 struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv;
446
447 gcm_iv->ssci = ssci ^ salt.ssci;
448 gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn;
449 }
450
macsec_fill_iv(unsigned char * iv,sci_t sci,u32 pn)451 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
452 {
453 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
454
455 gcm_iv->sci = sci;
456 gcm_iv->pn = htonl(pn);
457 }
458
macsec_ethhdr(struct sk_buff * skb)459 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
460 {
461 return (struct macsec_eth_header *)skb_mac_header(skb);
462 }
463
__macsec_pn_wrapped(struct macsec_secy * secy,struct macsec_tx_sa * tx_sa)464 static void __macsec_pn_wrapped(struct macsec_secy *secy,
465 struct macsec_tx_sa *tx_sa)
466 {
467 pr_debug("PN wrapped, transitioning to !oper\n");
468 tx_sa->active = false;
469 if (secy->protect_frames)
470 secy->operational = false;
471 }
472
macsec_pn_wrapped(struct macsec_secy * secy,struct macsec_tx_sa * tx_sa)473 void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
474 {
475 spin_lock_bh(&tx_sa->lock);
476 __macsec_pn_wrapped(secy, tx_sa);
477 spin_unlock_bh(&tx_sa->lock);
478 }
479 EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
480
tx_sa_update_pn(struct macsec_tx_sa * tx_sa,struct macsec_secy * secy)481 static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa,
482 struct macsec_secy *secy)
483 {
484 pn_t pn;
485
486 spin_lock_bh(&tx_sa->lock);
487
488 pn = tx_sa->next_pn_halves;
489 if (secy->xpn)
490 tx_sa->next_pn++;
491 else
492 tx_sa->next_pn_halves.lower++;
493
494 if (tx_sa->next_pn == 0)
495 __macsec_pn_wrapped(secy, tx_sa);
496 spin_unlock_bh(&tx_sa->lock);
497
498 return pn;
499 }
500
macsec_encrypt_finish(struct sk_buff * skb,struct net_device * dev)501 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
502 {
503 struct macsec_dev *macsec = netdev_priv(dev);
504
505 skb->dev = macsec->real_dev;
506 skb_reset_mac_header(skb);
507 skb->protocol = eth_hdr(skb)->h_proto;
508 }
509
macsec_msdu_len(struct sk_buff * skb)510 static unsigned int macsec_msdu_len(struct sk_buff *skb)
511 {
512 struct macsec_dev *macsec = macsec_priv(skb->dev);
513 struct macsec_secy *secy = &macsec->secy;
514 bool sci_present = macsec_skb_cb(skb)->has_sci;
515
516 return skb->len - macsec_hdr_len(sci_present) - secy->icv_len;
517 }
518
macsec_count_tx(struct sk_buff * skb,struct macsec_tx_sc * tx_sc,struct macsec_tx_sa * tx_sa)519 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
520 struct macsec_tx_sa *tx_sa)
521 {
522 unsigned int msdu_len = macsec_msdu_len(skb);
523 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
524
525 u64_stats_update_begin(&txsc_stats->syncp);
526 if (tx_sc->encrypt) {
527 txsc_stats->stats.OutOctetsEncrypted += msdu_len;
528 txsc_stats->stats.OutPktsEncrypted++;
529 this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
530 } else {
531 txsc_stats->stats.OutOctetsProtected += msdu_len;
532 txsc_stats->stats.OutPktsProtected++;
533 this_cpu_inc(tx_sa->stats->OutPktsProtected);
534 }
535 u64_stats_update_end(&txsc_stats->syncp);
536 }
537
count_tx(struct net_device * dev,int ret,int len)538 static void count_tx(struct net_device *dev, int ret, int len)
539 {
540 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN))
541 dev_sw_netstats_tx_add(dev, 1, len);
542 }
543
macsec_encrypt_done(void * data,int err)544 static void macsec_encrypt_done(void *data, int err)
545 {
546 struct sk_buff *skb = data;
547 struct net_device *dev = skb->dev;
548 struct macsec_dev *macsec = macsec_priv(dev);
549 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
550 int len, ret;
551
552 aead_request_free(macsec_skb_cb(skb)->req);
553
554 rcu_read_lock_bh();
555 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
556 /* packet is encrypted/protected so tx_bytes must be calculated */
557 len = macsec_msdu_len(skb) + 2 * ETH_ALEN;
558 macsec_encrypt_finish(skb, dev);
559 ret = dev_queue_xmit(skb);
560 count_tx(dev, ret, len);
561 rcu_read_unlock_bh();
562
563 macsec_txsa_put(sa);
564 dev_put(dev);
565 }
566
macsec_alloc_req(struct crypto_aead * tfm,unsigned char ** iv,struct scatterlist ** sg,int num_frags)567 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
568 unsigned char **iv,
569 struct scatterlist **sg,
570 int num_frags)
571 {
572 size_t size, iv_offset, sg_offset;
573 struct aead_request *req;
574 void *tmp;
575
576 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
577 iv_offset = size;
578 size += GCM_AES_IV_LEN;
579
580 size = ALIGN(size, __alignof__(struct scatterlist));
581 sg_offset = size;
582 size += sizeof(struct scatterlist) * num_frags;
583
584 tmp = kmalloc(size, GFP_ATOMIC);
585 if (!tmp)
586 return NULL;
587
588 *iv = (unsigned char *)(tmp + iv_offset);
589 *sg = (struct scatterlist *)(tmp + sg_offset);
590 req = tmp;
591
592 aead_request_set_tfm(req, tfm);
593
594 return req;
595 }
596
macsec_encrypt(struct sk_buff * skb,struct net_device * dev)597 static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
598 struct net_device *dev)
599 {
600 int ret;
601 struct scatterlist *sg;
602 struct sk_buff *trailer;
603 unsigned char *iv;
604 struct ethhdr *eth;
605 struct macsec_eth_header *hh;
606 size_t unprotected_len;
607 struct aead_request *req;
608 struct macsec_secy *secy;
609 struct macsec_tx_sc *tx_sc;
610 struct macsec_tx_sa *tx_sa;
611 struct macsec_dev *macsec = macsec_priv(dev);
612 bool sci_present;
613 pn_t pn;
614
615 secy = &macsec->secy;
616 tx_sc = &secy->tx_sc;
617
618 /* 10.5.1 TX SA assignment */
619 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
620 if (!tx_sa) {
621 secy->operational = false;
622 kfree_skb(skb);
623 return ERR_PTR(-EINVAL);
624 }
625
626 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
627 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
628 struct sk_buff *nskb = skb_copy_expand(skb,
629 MACSEC_NEEDED_HEADROOM,
630 MACSEC_NEEDED_TAILROOM,
631 GFP_ATOMIC);
632 if (likely(nskb)) {
633 consume_skb(skb);
634 skb = nskb;
635 } else {
636 macsec_txsa_put(tx_sa);
637 kfree_skb(skb);
638 return ERR_PTR(-ENOMEM);
639 }
640 } else {
641 skb = skb_unshare(skb, GFP_ATOMIC);
642 if (!skb) {
643 macsec_txsa_put(tx_sa);
644 return ERR_PTR(-ENOMEM);
645 }
646 }
647
648 unprotected_len = skb->len;
649 eth = eth_hdr(skb);
650 sci_present = macsec_send_sci(secy);
651 hh = skb_push(skb, macsec_extra_len(sci_present));
652 memmove(hh, eth, 2 * ETH_ALEN);
653
654 pn = tx_sa_update_pn(tx_sa, secy);
655 if (pn.full64 == 0) {
656 macsec_txsa_put(tx_sa);
657 kfree_skb(skb);
658 return ERR_PTR(-ENOLINK);
659 }
660 macsec_fill_sectag(hh, secy, pn.lower, sci_present);
661 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
662
663 skb_put(skb, secy->icv_len);
664
665 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
666 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
667
668 u64_stats_update_begin(&secy_stats->syncp);
669 secy_stats->stats.OutPktsTooLong++;
670 u64_stats_update_end(&secy_stats->syncp);
671
672 macsec_txsa_put(tx_sa);
673 kfree_skb(skb);
674 return ERR_PTR(-EINVAL);
675 }
676
677 ret = skb_cow_data(skb, 0, &trailer);
678 if (unlikely(ret < 0)) {
679 macsec_txsa_put(tx_sa);
680 kfree_skb(skb);
681 return ERR_PTR(ret);
682 }
683
684 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
685 if (!req) {
686 macsec_txsa_put(tx_sa);
687 kfree_skb(skb);
688 return ERR_PTR(-ENOMEM);
689 }
690
691 if (secy->xpn)
692 macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt);
693 else
694 macsec_fill_iv(iv, secy->sci, pn.lower);
695
696 sg_init_table(sg, ret);
697 ret = skb_to_sgvec(skb, sg, 0, skb->len);
698 if (unlikely(ret < 0)) {
699 aead_request_free(req);
700 macsec_txsa_put(tx_sa);
701 kfree_skb(skb);
702 return ERR_PTR(ret);
703 }
704
705 if (tx_sc->encrypt) {
706 int len = skb->len - macsec_hdr_len(sci_present) -
707 secy->icv_len;
708 aead_request_set_crypt(req, sg, sg, len, iv);
709 aead_request_set_ad(req, macsec_hdr_len(sci_present));
710 } else {
711 aead_request_set_crypt(req, sg, sg, 0, iv);
712 aead_request_set_ad(req, skb->len - secy->icv_len);
713 }
714
715 macsec_skb_cb(skb)->req = req;
716 macsec_skb_cb(skb)->tx_sa = tx_sa;
717 macsec_skb_cb(skb)->has_sci = sci_present;
718 aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
719
720 dev_hold(skb->dev);
721 ret = crypto_aead_encrypt(req);
722 if (ret == -EINPROGRESS) {
723 return ERR_PTR(ret);
724 } else if (ret != 0) {
725 dev_put(skb->dev);
726 kfree_skb(skb);
727 aead_request_free(req);
728 macsec_txsa_put(tx_sa);
729 return ERR_PTR(-EINVAL);
730 }
731
732 dev_put(skb->dev);
733 aead_request_free(req);
734 macsec_txsa_put(tx_sa);
735
736 return skb;
737 }
738
macsec_post_decrypt(struct sk_buff * skb,struct macsec_secy * secy,u32 pn)739 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
740 {
741 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
742 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats);
743 struct macsec_eth_header *hdr = macsec_ethhdr(skb);
744 u32 lowest_pn = 0;
745
746 spin_lock(&rx_sa->lock);
747 if (rx_sa->next_pn_halves.lower >= secy->replay_window)
748 lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window;
749
750 /* Now perform replay protection check again
751 * (see IEEE 802.1AE-2006 figure 10-5)
752 */
753 if (secy->replay_protect && pn < lowest_pn &&
754 (!secy->xpn || pn_same_half(pn, lowest_pn))) {
755 spin_unlock(&rx_sa->lock);
756 u64_stats_update_begin(&rxsc_stats->syncp);
757 rxsc_stats->stats.InPktsLate++;
758 u64_stats_update_end(&rxsc_stats->syncp);
759 DEV_STATS_INC(secy->netdev, rx_dropped);
760 return false;
761 }
762
763 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
764 unsigned int msdu_len = macsec_msdu_len(skb);
765 u64_stats_update_begin(&rxsc_stats->syncp);
766 if (hdr->tci_an & MACSEC_TCI_E)
767 rxsc_stats->stats.InOctetsDecrypted += msdu_len;
768 else
769 rxsc_stats->stats.InOctetsValidated += msdu_len;
770 u64_stats_update_end(&rxsc_stats->syncp);
771 }
772
773 if (!macsec_skb_cb(skb)->valid) {
774 spin_unlock(&rx_sa->lock);
775
776 /* 10.6.5 */
777 if (hdr->tci_an & MACSEC_TCI_C ||
778 secy->validate_frames == MACSEC_VALIDATE_STRICT) {
779 u64_stats_update_begin(&rxsc_stats->syncp);
780 rxsc_stats->stats.InPktsNotValid++;
781 u64_stats_update_end(&rxsc_stats->syncp);
782 this_cpu_inc(rx_sa->stats->InPktsNotValid);
783 DEV_STATS_INC(secy->netdev, rx_errors);
784 return false;
785 }
786
787 u64_stats_update_begin(&rxsc_stats->syncp);
788 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) {
789 rxsc_stats->stats.InPktsInvalid++;
790 this_cpu_inc(rx_sa->stats->InPktsInvalid);
791 } else if (pn < lowest_pn) {
792 rxsc_stats->stats.InPktsDelayed++;
793 } else {
794 rxsc_stats->stats.InPktsUnchecked++;
795 }
796 u64_stats_update_end(&rxsc_stats->syncp);
797 } else {
798 u64_stats_update_begin(&rxsc_stats->syncp);
799 if (pn < lowest_pn) {
800 rxsc_stats->stats.InPktsDelayed++;
801 } else {
802 rxsc_stats->stats.InPktsOK++;
803 this_cpu_inc(rx_sa->stats->InPktsOK);
804 }
805 u64_stats_update_end(&rxsc_stats->syncp);
806
807 // Instead of "pn >=" - to support pn overflow in xpn
808 if (pn + 1 > rx_sa->next_pn_halves.lower) {
809 rx_sa->next_pn_halves.lower = pn + 1;
810 } else if (secy->xpn &&
811 !pn_same_half(pn, rx_sa->next_pn_halves.lower)) {
812 rx_sa->next_pn_halves.upper++;
813 rx_sa->next_pn_halves.lower = pn + 1;
814 }
815
816 spin_unlock(&rx_sa->lock);
817 }
818
819 return true;
820 }
821
macsec_reset_skb(struct sk_buff * skb,struct net_device * dev)822 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
823 {
824 skb->pkt_type = PACKET_HOST;
825 skb->protocol = eth_type_trans(skb, dev);
826
827 skb_reset_network_header(skb);
828 if (!skb_transport_header_was_set(skb))
829 skb_reset_transport_header(skb);
830 skb_reset_mac_len(skb);
831 }
832
macsec_finalize_skb(struct sk_buff * skb,u8 icv_len,u8 hdr_len)833 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
834 {
835 skb->ip_summed = CHECKSUM_NONE;
836 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
837 skb_pull(skb, hdr_len);
838 pskb_trim_unique(skb, skb->len - icv_len);
839 }
840
count_rx(struct net_device * dev,int len)841 static void count_rx(struct net_device *dev, int len)
842 {
843 dev_sw_netstats_rx_add(dev, len);
844 }
845
macsec_decrypt_done(void * data,int err)846 static void macsec_decrypt_done(void *data, int err)
847 {
848 struct sk_buff *skb = data;
849 struct net_device *dev = skb->dev;
850 struct macsec_dev *macsec = macsec_priv(dev);
851 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
852 struct macsec_rx_sc *rx_sc = rx_sa->sc;
853 int len;
854 u32 pn;
855
856 aead_request_free(macsec_skb_cb(skb)->req);
857
858 if (!err)
859 macsec_skb_cb(skb)->valid = true;
860
861 rcu_read_lock_bh();
862 pn = ntohl(macsec_ethhdr(skb)->packet_number);
863 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
864 rcu_read_unlock_bh();
865 kfree_skb(skb);
866 goto out;
867 }
868
869 macsec_finalize_skb(skb, macsec->secy.icv_len,
870 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
871 len = skb->len;
872 macsec_reset_skb(skb, macsec->secy.netdev);
873
874 if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
875 count_rx(dev, len);
876
877 rcu_read_unlock_bh();
878
879 out:
880 macsec_rxsa_put(rx_sa);
881 macsec_rxsc_put(rx_sc);
882 dev_put(dev);
883 }
884
macsec_decrypt(struct sk_buff * skb,struct net_device * dev,struct macsec_rx_sa * rx_sa,sci_t sci,struct macsec_secy * secy)885 static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
886 struct net_device *dev,
887 struct macsec_rx_sa *rx_sa,
888 sci_t sci,
889 struct macsec_secy *secy)
890 {
891 int ret;
892 struct scatterlist *sg;
893 struct sk_buff *trailer;
894 unsigned char *iv;
895 struct aead_request *req;
896 struct macsec_eth_header *hdr;
897 u32 hdr_pn;
898 u16 icv_len = secy->icv_len;
899
900 macsec_skb_cb(skb)->valid = false;
901 skb = skb_share_check(skb, GFP_ATOMIC);
902 if (!skb)
903 return ERR_PTR(-ENOMEM);
904
905 ret = skb_cow_data(skb, 0, &trailer);
906 if (unlikely(ret < 0)) {
907 kfree_skb(skb);
908 return ERR_PTR(ret);
909 }
910 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
911 if (!req) {
912 kfree_skb(skb);
913 return ERR_PTR(-ENOMEM);
914 }
915
916 hdr = (struct macsec_eth_header *)skb->data;
917 hdr_pn = ntohl(hdr->packet_number);
918
919 if (secy->xpn) {
920 pn_t recovered_pn = rx_sa->next_pn_halves;
921
922 recovered_pn.lower = hdr_pn;
923 if (hdr_pn < rx_sa->next_pn_halves.lower &&
924 !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower))
925 recovered_pn.upper++;
926
927 macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64,
928 rx_sa->key.salt);
929 } else {
930 macsec_fill_iv(iv, sci, hdr_pn);
931 }
932
933 sg_init_table(sg, ret);
934 ret = skb_to_sgvec(skb, sg, 0, skb->len);
935 if (unlikely(ret < 0)) {
936 aead_request_free(req);
937 kfree_skb(skb);
938 return ERR_PTR(ret);
939 }
940
941 if (hdr->tci_an & MACSEC_TCI_E) {
942 /* confidentiality: ethernet + macsec header
943 * authenticated, encrypted payload
944 */
945 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
946
947 aead_request_set_crypt(req, sg, sg, len, iv);
948 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
949 skb = skb_unshare(skb, GFP_ATOMIC);
950 if (!skb) {
951 aead_request_free(req);
952 return ERR_PTR(-ENOMEM);
953 }
954 } else {
955 /* integrity only: all headers + data authenticated */
956 aead_request_set_crypt(req, sg, sg, icv_len, iv);
957 aead_request_set_ad(req, skb->len - icv_len);
958 }
959
960 macsec_skb_cb(skb)->req = req;
961 skb->dev = dev;
962 aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
963
964 dev_hold(dev);
965 ret = crypto_aead_decrypt(req);
966 if (ret == -EINPROGRESS) {
967 return ERR_PTR(ret);
968 } else if (ret != 0) {
969 /* decryption/authentication failed
970 * 10.6 if validateFrames is disabled, deliver anyway
971 */
972 if (ret != -EBADMSG) {
973 kfree_skb(skb);
974 skb = ERR_PTR(ret);
975 }
976 } else {
977 macsec_skb_cb(skb)->valid = true;
978 }
979 dev_put(dev);
980
981 aead_request_free(req);
982
983 return skb;
984 }
985
find_rx_sc(struct macsec_secy * secy,sci_t sci)986 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci)
987 {
988 struct macsec_rx_sc *rx_sc;
989
990 for_each_rxsc(secy, rx_sc) {
991 if (rx_sc->sci == sci)
992 return rx_sc;
993 }
994
995 return NULL;
996 }
997
find_rx_sc_rtnl(struct macsec_secy * secy,sci_t sci)998 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
999 {
1000 struct macsec_rx_sc *rx_sc;
1001
1002 for_each_rxsc_rtnl(secy, rx_sc) {
1003 if (rx_sc->sci == sci)
1004 return rx_sc;
1005 }
1006
1007 return NULL;
1008 }
1009
handle_not_macsec(struct sk_buff * skb)1010 static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
1011 {
1012 /* Deliver to the uncontrolled port by default */
1013 enum rx_handler_result ret = RX_HANDLER_PASS;
1014 struct ethhdr *hdr = eth_hdr(skb);
1015 struct metadata_dst *md_dst;
1016 struct macsec_rxh_data *rxd;
1017 struct macsec_dev *macsec;
1018 bool is_macsec_md_dst;
1019
1020 rcu_read_lock();
1021 rxd = macsec_data_rcu(skb->dev);
1022 md_dst = skb_metadata_dst(skb);
1023 is_macsec_md_dst = md_dst && md_dst->type == METADATA_MACSEC;
1024
1025 list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1026 struct sk_buff *nskb;
1027 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
1028 struct net_device *ndev = macsec->secy.netdev;
1029
1030 /* If h/w offloading is enabled, HW decodes frames and strips
1031 * the SecTAG, so we have to deduce which port to deliver to.
1032 */
1033 if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
1034 const struct macsec_ops *ops;
1035
1036 ops = macsec_get_ops(macsec, NULL);
1037
1038 if (ops->rx_uses_md_dst && !is_macsec_md_dst)
1039 continue;
1040
1041 if (is_macsec_md_dst) {
1042 struct macsec_rx_sc *rx_sc;
1043
1044 /* All drivers that implement MACsec offload
1045 * support using skb metadata destinations must
1046 * indicate that they do so.
1047 */
1048 DEBUG_NET_WARN_ON_ONCE(!ops->rx_uses_md_dst);
1049 rx_sc = find_rx_sc(&macsec->secy,
1050 md_dst->u.macsec_info.sci);
1051 if (!rx_sc)
1052 continue;
1053 /* device indicated macsec offload occurred */
1054 skb->dev = ndev;
1055 skb->pkt_type = PACKET_HOST;
1056 eth_skb_pkt_type(skb, ndev);
1057 ret = RX_HANDLER_ANOTHER;
1058 goto out;
1059 }
1060
1061 /* This datapath is insecure because it is unable to
1062 * enforce isolation of broadcast/multicast traffic and
1063 * unicast traffic with promiscuous mode on the macsec
1064 * netdev. Since the core stack has no mechanism to
1065 * check that the hardware did indeed receive MACsec
1066 * traffic, it is possible that the response handling
1067 * done by the MACsec port was to a plaintext packet.
1068 * This violates the MACsec protocol standard.
1069 */
1070 if (ether_addr_equal_64bits(hdr->h_dest,
1071 ndev->dev_addr)) {
1072 /* exact match, divert skb to this port */
1073 skb->dev = ndev;
1074 skb->pkt_type = PACKET_HOST;
1075 ret = RX_HANDLER_ANOTHER;
1076 goto out;
1077 } else if (is_multicast_ether_addr_64bits(
1078 hdr->h_dest)) {
1079 /* multicast frame, deliver on this port too */
1080 nskb = skb_clone(skb, GFP_ATOMIC);
1081 if (!nskb)
1082 break;
1083
1084 nskb->dev = ndev;
1085 eth_skb_pkt_type(nskb, ndev);
1086
1087 __netif_rx(nskb);
1088 } else if (ndev->flags & IFF_PROMISC) {
1089 skb->dev = ndev;
1090 skb->pkt_type = PACKET_HOST;
1091 ret = RX_HANDLER_ANOTHER;
1092 goto out;
1093 }
1094
1095 continue;
1096 }
1097
1098 /* 10.6 If the management control validateFrames is not
1099 * Strict, frames without a SecTAG are received, counted, and
1100 * delivered to the Controlled Port
1101 */
1102 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1103 u64_stats_update_begin(&secy_stats->syncp);
1104 secy_stats->stats.InPktsNoTag++;
1105 u64_stats_update_end(&secy_stats->syncp);
1106 DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
1107 continue;
1108 }
1109
1110 /* deliver on this port */
1111 nskb = skb_clone(skb, GFP_ATOMIC);
1112 if (!nskb)
1113 break;
1114
1115 nskb->dev = ndev;
1116
1117 if (__netif_rx(nskb) == NET_RX_SUCCESS) {
1118 u64_stats_update_begin(&secy_stats->syncp);
1119 secy_stats->stats.InPktsUntagged++;
1120 u64_stats_update_end(&secy_stats->syncp);
1121 }
1122 }
1123
1124 out:
1125 rcu_read_unlock();
1126 return ret;
1127 }
1128
macsec_handle_frame(struct sk_buff ** pskb)1129 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
1130 {
1131 struct sk_buff *skb = *pskb;
1132 struct net_device *dev = skb->dev;
1133 struct macsec_eth_header *hdr;
1134 struct macsec_secy *secy = NULL;
1135 struct macsec_rx_sc *rx_sc;
1136 struct macsec_rx_sa *rx_sa;
1137 struct macsec_rxh_data *rxd;
1138 struct macsec_dev *macsec;
1139 unsigned int len;
1140 sci_t sci = 0;
1141 u32 hdr_pn;
1142 bool cbit;
1143 struct pcpu_rx_sc_stats *rxsc_stats;
1144 struct pcpu_secy_stats *secy_stats;
1145 bool pulled_sci;
1146 int ret;
1147
1148 if (skb_headroom(skb) < ETH_HLEN)
1149 goto drop_direct;
1150
1151 hdr = macsec_ethhdr(skb);
1152 if (hdr->eth.h_proto != htons(ETH_P_MACSEC))
1153 return handle_not_macsec(skb);
1154
1155 skb = skb_unshare(skb, GFP_ATOMIC);
1156 *pskb = skb;
1157 if (!skb)
1158 return RX_HANDLER_CONSUMED;
1159
1160 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
1161 if (!pulled_sci) {
1162 if (!pskb_may_pull(skb, macsec_extra_len(false)))
1163 goto drop_direct;
1164 }
1165
1166 hdr = macsec_ethhdr(skb);
1167
1168 /* Frames with a SecTAG that has the TCI E bit set but the C
1169 * bit clear are discarded, as this reserved encoding is used
1170 * to identify frames with a SecTAG that are not to be
1171 * delivered to the Controlled Port.
1172 */
1173 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E)
1174 return RX_HANDLER_PASS;
1175
1176 /* now, pull the extra length */
1177 if (hdr->tci_an & MACSEC_TCI_SC) {
1178 if (!pulled_sci)
1179 goto drop_direct;
1180 }
1181
1182 /* ethernet header is part of crypto processing */
1183 skb_push(skb, ETH_HLEN);
1184
1185 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
1186 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
1187
1188 rcu_read_lock();
1189 rxd = macsec_data_rcu(skb->dev);
1190
1191 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci, rxd);
1192 if (!sci)
1193 goto drop_nosc;
1194
1195 list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1196 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
1197
1198 sc = sc ? macsec_rxsc_get(sc) : NULL;
1199
1200 if (sc) {
1201 secy = &macsec->secy;
1202 rx_sc = sc;
1203 break;
1204 }
1205 }
1206
1207 if (!secy)
1208 goto nosci;
1209
1210 dev = secy->netdev;
1211 macsec = macsec_priv(dev);
1212 secy_stats = this_cpu_ptr(macsec->stats);
1213 rxsc_stats = this_cpu_ptr(rx_sc->stats);
1214
1215 if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) {
1216 u64_stats_update_begin(&secy_stats->syncp);
1217 secy_stats->stats.InPktsBadTag++;
1218 u64_stats_update_end(&secy_stats->syncp);
1219 DEV_STATS_INC(secy->netdev, rx_errors);
1220 goto drop_nosa;
1221 }
1222
1223 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
1224 if (!rx_sa) {
1225 /* 10.6.1 if the SA is not in use */
1226
1227 /* If validateFrames is Strict or the C bit in the
1228 * SecTAG is set, discard
1229 */
1230 if (hdr->tci_an & MACSEC_TCI_C ||
1231 secy->validate_frames == MACSEC_VALIDATE_STRICT) {
1232 u64_stats_update_begin(&rxsc_stats->syncp);
1233 rxsc_stats->stats.InPktsNotUsingSA++;
1234 u64_stats_update_end(&rxsc_stats->syncp);
1235 DEV_STATS_INC(secy->netdev, rx_errors);
1236 goto drop_nosa;
1237 }
1238
1239 /* not Strict, the frame (with the SecTAG and ICV
1240 * removed) is delivered to the Controlled Port.
1241 */
1242 u64_stats_update_begin(&rxsc_stats->syncp);
1243 rxsc_stats->stats.InPktsUnusedSA++;
1244 u64_stats_update_end(&rxsc_stats->syncp);
1245 goto deliver;
1246 }
1247
1248 /* First, PN check to avoid decrypting obviously wrong packets */
1249 hdr_pn = ntohl(hdr->packet_number);
1250 if (secy->replay_protect) {
1251 bool late;
1252
1253 spin_lock(&rx_sa->lock);
1254 late = rx_sa->next_pn_halves.lower >= secy->replay_window &&
1255 hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window);
1256
1257 if (secy->xpn)
1258 late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn);
1259 spin_unlock(&rx_sa->lock);
1260
1261 if (late) {
1262 u64_stats_update_begin(&rxsc_stats->syncp);
1263 rxsc_stats->stats.InPktsLate++;
1264 u64_stats_update_end(&rxsc_stats->syncp);
1265 DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
1266 goto drop;
1267 }
1268 }
1269
1270 macsec_skb_cb(skb)->rx_sa = rx_sa;
1271
1272 /* Disabled && !changed text => skip validation */
1273 if (hdr->tci_an & MACSEC_TCI_C ||
1274 secy->validate_frames != MACSEC_VALIDATE_DISABLED)
1275 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
1276
1277 if (IS_ERR(skb)) {
1278 /* the decrypt callback needs the reference */
1279 if (PTR_ERR(skb) != -EINPROGRESS) {
1280 macsec_rxsa_put(rx_sa);
1281 macsec_rxsc_put(rx_sc);
1282 }
1283 rcu_read_unlock();
1284 *pskb = NULL;
1285 return RX_HANDLER_CONSUMED;
1286 }
1287
1288 if (!macsec_post_decrypt(skb, secy, hdr_pn))
1289 goto drop;
1290
1291 deliver:
1292 macsec_finalize_skb(skb, secy->icv_len,
1293 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1294 len = skb->len;
1295 macsec_reset_skb(skb, secy->netdev);
1296
1297 if (rx_sa)
1298 macsec_rxsa_put(rx_sa);
1299 macsec_rxsc_put(rx_sc);
1300
1301 skb_orphan(skb);
1302 ret = gro_cells_receive(&macsec->gro_cells, skb);
1303 if (ret == NET_RX_SUCCESS)
1304 count_rx(dev, len);
1305 else
1306 DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
1307
1308 rcu_read_unlock();
1309
1310 *pskb = NULL;
1311 return RX_HANDLER_CONSUMED;
1312
1313 drop:
1314 macsec_rxsa_put(rx_sa);
1315 drop_nosa:
1316 macsec_rxsc_put(rx_sc);
1317 drop_nosc:
1318 rcu_read_unlock();
1319 drop_direct:
1320 kfree_skb(skb);
1321 *pskb = NULL;
1322 return RX_HANDLER_CONSUMED;
1323
1324 nosci:
1325 /* 10.6.1 if the SC is not found */
1326 cbit = !!(hdr->tci_an & MACSEC_TCI_C);
1327 if (!cbit)
1328 macsec_finalize_skb(skb, MACSEC_DEFAULT_ICV_LEN,
1329 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1330
1331 list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1332 struct sk_buff *nskb;
1333
1334 secy_stats = this_cpu_ptr(macsec->stats);
1335
1336 /* If validateFrames is Strict or the C bit in the
1337 * SecTAG is set, discard
1338 */
1339 if (cbit ||
1340 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1341 u64_stats_update_begin(&secy_stats->syncp);
1342 secy_stats->stats.InPktsNoSCI++;
1343 u64_stats_update_end(&secy_stats->syncp);
1344 DEV_STATS_INC(macsec->secy.netdev, rx_errors);
1345 continue;
1346 }
1347
1348 /* not strict, the frame (with the SecTAG and ICV
1349 * removed) is delivered to the Controlled Port.
1350 */
1351 nskb = skb_clone(skb, GFP_ATOMIC);
1352 if (!nskb)
1353 break;
1354
1355 macsec_reset_skb(nskb, macsec->secy.netdev);
1356
1357 ret = __netif_rx(nskb);
1358 if (ret == NET_RX_SUCCESS) {
1359 u64_stats_update_begin(&secy_stats->syncp);
1360 secy_stats->stats.InPktsUnknownSCI++;
1361 u64_stats_update_end(&secy_stats->syncp);
1362 } else {
1363 DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
1364 }
1365 }
1366
1367 rcu_read_unlock();
1368 *pskb = skb;
1369 return RX_HANDLER_PASS;
1370 }
1371
macsec_alloc_tfm(char * key,int key_len,int icv_len)1372 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
1373 {
1374 struct crypto_aead *tfm;
1375 int ret;
1376
1377 tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
1378
1379 if (IS_ERR(tfm))
1380 return tfm;
1381
1382 ret = crypto_aead_setkey(tfm, key, key_len);
1383 if (ret < 0)
1384 goto fail;
1385
1386 ret = crypto_aead_setauthsize(tfm, icv_len);
1387 if (ret < 0)
1388 goto fail;
1389
1390 return tfm;
1391 fail:
1392 crypto_free_aead(tfm);
1393 return ERR_PTR(ret);
1394 }
1395
init_rx_sa(struct macsec_rx_sa * rx_sa,char * sak,int key_len,int icv_len)1396 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
1397 int icv_len)
1398 {
1399 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
1400 if (!rx_sa->stats)
1401 return -ENOMEM;
1402
1403 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1404 if (IS_ERR(rx_sa->key.tfm)) {
1405 free_percpu(rx_sa->stats);
1406 return PTR_ERR(rx_sa->key.tfm);
1407 }
1408
1409 rx_sa->ssci = MACSEC_UNDEF_SSCI;
1410 rx_sa->active = false;
1411 rx_sa->next_pn = 1;
1412 refcount_set(&rx_sa->refcnt, 1);
1413 spin_lock_init(&rx_sa->lock);
1414 INIT_RCU_WORK(&rx_sa->destroy_work, free_rxsa_work);
1415
1416 return 0;
1417 }
1418
clear_rx_sa(struct macsec_rx_sa * rx_sa)1419 static void clear_rx_sa(struct macsec_rx_sa *rx_sa)
1420 {
1421 rx_sa->active = false;
1422
1423 macsec_rxsa_put(rx_sa);
1424 }
1425
free_rx_sc(struct macsec_rx_sc * rx_sc)1426 static void free_rx_sc(struct macsec_rx_sc *rx_sc)
1427 {
1428 int i;
1429
1430 for (i = 0; i < MACSEC_NUM_AN; i++) {
1431 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]);
1432
1433 RCU_INIT_POINTER(rx_sc->sa[i], NULL);
1434 if (sa)
1435 clear_rx_sa(sa);
1436 }
1437
1438 macsec_rxsc_put(rx_sc);
1439 }
1440
del_rx_sc(struct macsec_secy * secy,sci_t sci)1441 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
1442 {
1443 struct macsec_rx_sc *rx_sc, __rcu **rx_scp;
1444
1445 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp);
1446 rx_sc;
1447 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) {
1448 if (rx_sc->sci == sci) {
1449 if (rx_sc->active)
1450 secy->n_rx_sc--;
1451 rcu_assign_pointer(*rx_scp, rx_sc->next);
1452 return rx_sc;
1453 }
1454 }
1455
1456 return NULL;
1457 }
1458
create_rx_sc(struct net_device * dev,sci_t sci,bool active)1459 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci,
1460 bool active)
1461 {
1462 struct macsec_rx_sc *rx_sc;
1463 struct macsec_dev *macsec;
1464 struct net_device *real_dev = macsec_priv(dev)->real_dev;
1465 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
1466 struct macsec_secy *secy;
1467
1468 list_for_each_entry(macsec, &rxd->secys, secys) {
1469 if (find_rx_sc_rtnl(&macsec->secy, sci))
1470 return ERR_PTR(-EEXIST);
1471 }
1472
1473 rx_sc = kzalloc_obj(*rx_sc);
1474 if (!rx_sc)
1475 return ERR_PTR(-ENOMEM);
1476
1477 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats);
1478 if (!rx_sc->stats) {
1479 kfree(rx_sc);
1480 return ERR_PTR(-ENOMEM);
1481 }
1482
1483 rx_sc->sci = sci;
1484 rx_sc->active = active;
1485 refcount_set(&rx_sc->refcnt, 1);
1486
1487 secy = &macsec_priv(dev)->secy;
1488 rcu_assign_pointer(rx_sc->next, secy->rx_sc);
1489 rcu_assign_pointer(secy->rx_sc, rx_sc);
1490
1491 if (rx_sc->active)
1492 secy->n_rx_sc++;
1493
1494 return rx_sc;
1495 }
1496
init_tx_sa(struct macsec_tx_sa * tx_sa,char * sak,int key_len,int icv_len)1497 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
1498 int icv_len)
1499 {
1500 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
1501 if (!tx_sa->stats)
1502 return -ENOMEM;
1503
1504 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1505 if (IS_ERR(tx_sa->key.tfm)) {
1506 free_percpu(tx_sa->stats);
1507 return PTR_ERR(tx_sa->key.tfm);
1508 }
1509
1510 tx_sa->ssci = MACSEC_UNDEF_SSCI;
1511 tx_sa->active = false;
1512 refcount_set(&tx_sa->refcnt, 1);
1513 spin_lock_init(&tx_sa->lock);
1514 INIT_RCU_WORK(&tx_sa->destroy_work, free_txsa_work);
1515
1516 return 0;
1517 }
1518
clear_tx_sa(struct macsec_tx_sa * tx_sa)1519 static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
1520 {
1521 tx_sa->active = false;
1522
1523 macsec_txsa_put(tx_sa);
1524 }
1525
1526 static struct genl_family macsec_fam;
1527
get_dev_from_nl(struct net * net,struct nlattr ** attrs)1528 static struct net_device *get_dev_from_nl(struct net *net,
1529 struct nlattr **attrs)
1530 {
1531 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]);
1532 struct net_device *dev;
1533
1534 dev = __dev_get_by_index(net, ifindex);
1535 if (!dev)
1536 return ERR_PTR(-ENODEV);
1537
1538 if (!netif_is_macsec(dev))
1539 return ERR_PTR(-ENODEV);
1540
1541 return dev;
1542 }
1543
nla_get_offload(const struct nlattr * nla)1544 static enum macsec_offload nla_get_offload(const struct nlattr *nla)
1545 {
1546 return (__force enum macsec_offload)nla_get_u8(nla);
1547 }
1548
nla_get_sci(const struct nlattr * nla)1549 static sci_t nla_get_sci(const struct nlattr *nla)
1550 {
1551 return (__force sci_t)nla_get_u64(nla);
1552 }
1553
nla_put_sci(struct sk_buff * skb,int attrtype,sci_t value,int padattr)1554 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
1555 int padattr)
1556 {
1557 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
1558 }
1559
nla_get_ssci(const struct nlattr * nla)1560 static ssci_t nla_get_ssci(const struct nlattr *nla)
1561 {
1562 return (__force ssci_t)nla_get_u32(nla);
1563 }
1564
nla_put_ssci(struct sk_buff * skb,int attrtype,ssci_t value)1565 static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value)
1566 {
1567 return nla_put_u32(skb, attrtype, (__force u64)value);
1568 }
1569
get_txsa_from_nl(struct net * net,struct nlattr ** attrs,struct nlattr ** tb_sa,struct net_device ** devp,struct macsec_secy ** secyp,struct macsec_tx_sc ** scp,u8 * assoc_num)1570 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
1571 struct nlattr **attrs,
1572 struct nlattr **tb_sa,
1573 struct net_device **devp,
1574 struct macsec_secy **secyp,
1575 struct macsec_tx_sc **scp,
1576 u8 *assoc_num)
1577 {
1578 struct net_device *dev;
1579 struct macsec_secy *secy;
1580 struct macsec_tx_sc *tx_sc;
1581 struct macsec_tx_sa *tx_sa;
1582
1583 if (!tb_sa[MACSEC_SA_ATTR_AN])
1584 return ERR_PTR(-EINVAL);
1585
1586 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1587
1588 dev = get_dev_from_nl(net, attrs);
1589 if (IS_ERR(dev))
1590 return ERR_CAST(dev);
1591
1592 secy = &macsec_priv(dev)->secy;
1593 tx_sc = &secy->tx_sc;
1594
1595 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]);
1596 if (!tx_sa)
1597 return ERR_PTR(-ENODEV);
1598
1599 *devp = dev;
1600 *scp = tx_sc;
1601 *secyp = secy;
1602 return tx_sa;
1603 }
1604
get_rxsc_from_nl(struct net * net,struct nlattr ** attrs,struct nlattr ** tb_rxsc,struct net_device ** devp,struct macsec_secy ** secyp)1605 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net,
1606 struct nlattr **attrs,
1607 struct nlattr **tb_rxsc,
1608 struct net_device **devp,
1609 struct macsec_secy **secyp)
1610 {
1611 struct net_device *dev;
1612 struct macsec_secy *secy;
1613 struct macsec_rx_sc *rx_sc;
1614 sci_t sci;
1615
1616 dev = get_dev_from_nl(net, attrs);
1617 if (IS_ERR(dev))
1618 return ERR_CAST(dev);
1619
1620 secy = &macsec_priv(dev)->secy;
1621
1622 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
1623 return ERR_PTR(-EINVAL);
1624
1625 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1626 rx_sc = find_rx_sc_rtnl(secy, sci);
1627 if (!rx_sc)
1628 return ERR_PTR(-ENODEV);
1629
1630 *secyp = secy;
1631 *devp = dev;
1632
1633 return rx_sc;
1634 }
1635
get_rxsa_from_nl(struct net * net,struct nlattr ** attrs,struct nlattr ** tb_rxsc,struct nlattr ** tb_sa,struct net_device ** devp,struct macsec_secy ** secyp,struct macsec_rx_sc ** scp,u8 * assoc_num)1636 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
1637 struct nlattr **attrs,
1638 struct nlattr **tb_rxsc,
1639 struct nlattr **tb_sa,
1640 struct net_device **devp,
1641 struct macsec_secy **secyp,
1642 struct macsec_rx_sc **scp,
1643 u8 *assoc_num)
1644 {
1645 struct macsec_rx_sc *rx_sc;
1646 struct macsec_rx_sa *rx_sa;
1647
1648 if (!tb_sa[MACSEC_SA_ATTR_AN])
1649 return ERR_PTR(-EINVAL);
1650
1651 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1652
1653 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
1654 if (IS_ERR(rx_sc))
1655 return ERR_CAST(rx_sc);
1656
1657 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]);
1658 if (!rx_sa)
1659 return ERR_PTR(-ENODEV);
1660
1661 *scp = rx_sc;
1662 return rx_sa;
1663 }
1664
1665 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
1666 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
1667 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
1668 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
1669 [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED },
1670 };
1671
1672 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
1673 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
1674 [MACSEC_RXSC_ATTR_ACTIVE] = NLA_POLICY_MAX(NLA_U8, 1),
1675 };
1676
1677 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
1678 [MACSEC_SA_ATTR_AN] = NLA_POLICY_MAX(NLA_U8, MACSEC_NUM_AN - 1),
1679 [MACSEC_SA_ATTR_ACTIVE] = NLA_POLICY_MAX(NLA_U8, 1),
1680 [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN(NLA_UINT, 1),
1681 [MACSEC_SA_ATTR_KEYID] = NLA_POLICY_EXACT_LEN(MACSEC_KEYID_LEN),
1682 [MACSEC_SA_ATTR_KEY] = NLA_POLICY_MAX_LEN(MACSEC_MAX_KEY_LEN),
1683 [MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 },
1684 [MACSEC_SA_ATTR_SALT] = NLA_POLICY_EXACT_LEN(MACSEC_SALT_LEN),
1685 };
1686
1687 static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
1688 [MACSEC_OFFLOAD_ATTR_TYPE] = NLA_POLICY_MAX(NLA_U8, MACSEC_OFFLOAD_MAX),
1689 };
1690
1691 /* Offloads an operation to a device driver */
macsec_offload(int (* const func)(struct macsec_context *),struct macsec_context * ctx)1692 static int macsec_offload(int (* const func)(struct macsec_context *),
1693 struct macsec_context *ctx)
1694 {
1695 int ret;
1696
1697 if (unlikely(!func))
1698 return 0;
1699
1700 if (ctx->offload == MACSEC_OFFLOAD_PHY)
1701 mutex_lock(&ctx->phydev->lock);
1702
1703 ret = (*func)(ctx);
1704
1705 if (ctx->offload == MACSEC_OFFLOAD_PHY)
1706 mutex_unlock(&ctx->phydev->lock);
1707
1708 return ret;
1709 }
1710
parse_sa_config(struct nlattr ** attrs,struct nlattr ** tb_sa)1711 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
1712 {
1713 if (!attrs[MACSEC_ATTR_SA_CONFIG])
1714 return -EINVAL;
1715
1716 if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL))
1717 return -EINVAL;
1718
1719 return 0;
1720 }
1721
parse_rxsc_config(struct nlattr ** attrs,struct nlattr ** tb_rxsc)1722 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
1723 {
1724 if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
1725 return -EINVAL;
1726
1727 if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL))
1728 return -EINVAL;
1729
1730 return 0;
1731 }
1732
validate_add_rxsa(struct nlattr ** attrs)1733 static bool validate_add_rxsa(struct nlattr **attrs)
1734 {
1735 if (!attrs[MACSEC_SA_ATTR_AN] ||
1736 !attrs[MACSEC_SA_ATTR_KEY] ||
1737 !attrs[MACSEC_SA_ATTR_KEYID])
1738 return false;
1739
1740 return true;
1741 }
1742
macsec_add_rxsa(struct sk_buff * skb,struct genl_info * info)1743 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
1744 {
1745 struct net_device *dev;
1746 struct nlattr **attrs = info->attrs;
1747 struct macsec_secy *secy;
1748 struct macsec_rx_sc *rx_sc;
1749 struct macsec_rx_sa *rx_sa;
1750 unsigned char assoc_num;
1751 int pn_len;
1752 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1753 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1754 int err;
1755
1756 if (!attrs[MACSEC_ATTR_IFINDEX])
1757 return -EINVAL;
1758
1759 if (parse_sa_config(attrs, tb_sa))
1760 return -EINVAL;
1761
1762 if (parse_rxsc_config(attrs, tb_rxsc))
1763 return -EINVAL;
1764
1765 if (!validate_add_rxsa(tb_sa))
1766 return -EINVAL;
1767
1768 rtnl_lock();
1769 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
1770 if (IS_ERR(rx_sc)) {
1771 rtnl_unlock();
1772 return PTR_ERR(rx_sc);
1773 }
1774
1775 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1776
1777 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1778 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1779 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1780 rtnl_unlock();
1781 return -EINVAL;
1782 }
1783
1784 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
1785 if (tb_sa[MACSEC_SA_ATTR_PN] &&
1786 nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
1787 pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
1788 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
1789 rtnl_unlock();
1790 return -EINVAL;
1791 }
1792
1793 if (secy->xpn) {
1794 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
1795 rtnl_unlock();
1796 return -EINVAL;
1797 }
1798 }
1799
1800 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
1801 if (rx_sa) {
1802 rtnl_unlock();
1803 return -EBUSY;
1804 }
1805
1806 rx_sa = kmalloc_obj(*rx_sa);
1807 if (!rx_sa) {
1808 rtnl_unlock();
1809 return -ENOMEM;
1810 }
1811
1812 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1813 secy->key_len, secy->icv_len);
1814 if (err < 0) {
1815 kfree(rx_sa);
1816 rtnl_unlock();
1817 return err;
1818 }
1819
1820 if (tb_sa[MACSEC_SA_ATTR_PN]) {
1821 spin_lock_bh(&rx_sa->lock);
1822 rx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
1823 spin_unlock_bh(&rx_sa->lock);
1824 }
1825
1826 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1827 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1828
1829 rx_sa->sc = rx_sc;
1830
1831 if (secy->xpn) {
1832 rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
1833 nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
1834 MACSEC_SALT_LEN);
1835 }
1836
1837 /* If h/w offloading is available, propagate to the device */
1838 if (macsec_is_offloaded(netdev_priv(dev))) {
1839 const struct macsec_ops *ops;
1840 struct macsec_context ctx;
1841
1842 ops = macsec_get_ops(netdev_priv(dev), &ctx);
1843 if (!ops) {
1844 err = -EOPNOTSUPP;
1845 goto cleanup;
1846 }
1847
1848 ctx.sa.assoc_num = assoc_num;
1849 ctx.sa.rx_sa = rx_sa;
1850 ctx.secy = secy;
1851 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1852 secy->key_len);
1853
1854 err = macsec_offload(ops->mdo_add_rxsa, &ctx);
1855 memzero_explicit(ctx.sa.key, secy->key_len);
1856 if (err)
1857 goto cleanup;
1858 }
1859
1860 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
1861 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
1862
1863 rtnl_unlock();
1864
1865 return 0;
1866
1867 cleanup:
1868 macsec_rxsa_put(rx_sa);
1869 rtnl_unlock();
1870 return err;
1871 }
1872
macsec_add_rxsc(struct sk_buff * skb,struct genl_info * info)1873 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
1874 {
1875 struct net_device *dev;
1876 sci_t sci = MACSEC_UNDEF_SCI;
1877 struct nlattr **attrs = info->attrs;
1878 struct macsec_rx_sc *rx_sc;
1879 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1880 struct macsec_secy *secy;
1881 bool active = true;
1882 int ret;
1883
1884 if (!attrs[MACSEC_ATTR_IFINDEX])
1885 return -EINVAL;
1886
1887 if (parse_rxsc_config(attrs, tb_rxsc))
1888 return -EINVAL;
1889
1890 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
1891 return -EINVAL;
1892
1893 rtnl_lock();
1894 dev = get_dev_from_nl(genl_info_net(info), attrs);
1895 if (IS_ERR(dev)) {
1896 rtnl_unlock();
1897 return PTR_ERR(dev);
1898 }
1899
1900 secy = &macsec_priv(dev)->secy;
1901 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1902
1903 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
1904 active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
1905
1906 rx_sc = create_rx_sc(dev, sci, active);
1907 if (IS_ERR(rx_sc)) {
1908 rtnl_unlock();
1909 return PTR_ERR(rx_sc);
1910 }
1911
1912 if (macsec_is_offloaded(netdev_priv(dev))) {
1913 const struct macsec_ops *ops;
1914 struct macsec_context ctx;
1915
1916 ops = macsec_get_ops(netdev_priv(dev), &ctx);
1917 if (!ops) {
1918 ret = -EOPNOTSUPP;
1919 goto cleanup;
1920 }
1921
1922 ctx.rx_sc = rx_sc;
1923 ctx.secy = secy;
1924
1925 ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
1926 if (ret)
1927 goto cleanup;
1928 }
1929
1930 rtnl_unlock();
1931
1932 return 0;
1933
1934 cleanup:
1935 del_rx_sc(secy, sci);
1936 free_rx_sc(rx_sc);
1937 rtnl_unlock();
1938 return ret;
1939 }
1940
validate_add_txsa(struct nlattr ** attrs)1941 static bool validate_add_txsa(struct nlattr **attrs)
1942 {
1943 if (!attrs[MACSEC_SA_ATTR_AN] ||
1944 !attrs[MACSEC_SA_ATTR_PN] ||
1945 !attrs[MACSEC_SA_ATTR_KEY] ||
1946 !attrs[MACSEC_SA_ATTR_KEYID])
1947 return false;
1948
1949 return true;
1950 }
1951
macsec_add_txsa(struct sk_buff * skb,struct genl_info * info)1952 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
1953 {
1954 struct net_device *dev;
1955 struct nlattr **attrs = info->attrs;
1956 struct macsec_secy *secy;
1957 struct macsec_tx_sc *tx_sc;
1958 struct macsec_tx_sa *tx_sa;
1959 unsigned char assoc_num;
1960 int pn_len;
1961 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1962 bool was_operational;
1963 int err;
1964
1965 if (!attrs[MACSEC_ATTR_IFINDEX])
1966 return -EINVAL;
1967
1968 if (parse_sa_config(attrs, tb_sa))
1969 return -EINVAL;
1970
1971 if (!validate_add_txsa(tb_sa))
1972 return -EINVAL;
1973
1974 rtnl_lock();
1975 dev = get_dev_from_nl(genl_info_net(info), attrs);
1976 if (IS_ERR(dev)) {
1977 rtnl_unlock();
1978 return PTR_ERR(dev);
1979 }
1980
1981 secy = &macsec_priv(dev)->secy;
1982 tx_sc = &secy->tx_sc;
1983
1984 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1985
1986 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1987 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
1988 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1989 rtnl_unlock();
1990 return -EINVAL;
1991 }
1992
1993 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
1994 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
1995 pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n",
1996 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
1997 rtnl_unlock();
1998 return -EINVAL;
1999 }
2000
2001 if (secy->xpn) {
2002 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
2003 rtnl_unlock();
2004 return -EINVAL;
2005 }
2006 }
2007
2008 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
2009 if (tx_sa) {
2010 rtnl_unlock();
2011 return -EBUSY;
2012 }
2013
2014 tx_sa = kmalloc_obj(*tx_sa);
2015 if (!tx_sa) {
2016 rtnl_unlock();
2017 return -ENOMEM;
2018 }
2019
2020 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2021 secy->key_len, secy->icv_len);
2022 if (err < 0) {
2023 kfree(tx_sa);
2024 rtnl_unlock();
2025 return err;
2026 }
2027
2028 spin_lock_bh(&tx_sa->lock);
2029 tx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
2030 spin_unlock_bh(&tx_sa->lock);
2031
2032 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2033 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2034
2035 was_operational = secy->operational;
2036 if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
2037 secy->operational = true;
2038
2039 if (secy->xpn) {
2040 tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
2041 nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
2042 MACSEC_SALT_LEN);
2043 }
2044
2045 /* If h/w offloading is available, propagate to the device */
2046 if (macsec_is_offloaded(netdev_priv(dev))) {
2047 const struct macsec_ops *ops;
2048 struct macsec_context ctx;
2049
2050 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2051 if (!ops) {
2052 err = -EOPNOTSUPP;
2053 goto cleanup;
2054 }
2055
2056 ctx.sa.assoc_num = assoc_num;
2057 ctx.sa.tx_sa = tx_sa;
2058 ctx.secy = secy;
2059 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2060 secy->key_len);
2061
2062 err = macsec_offload(ops->mdo_add_txsa, &ctx);
2063 memzero_explicit(ctx.sa.key, secy->key_len);
2064 if (err)
2065 goto cleanup;
2066 }
2067
2068 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
2069 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
2070
2071 rtnl_unlock();
2072
2073 return 0;
2074
2075 cleanup:
2076 secy->operational = was_operational;
2077 macsec_txsa_put(tx_sa);
2078 rtnl_unlock();
2079 return err;
2080 }
2081
macsec_del_rxsa(struct sk_buff * skb,struct genl_info * info)2082 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
2083 {
2084 struct nlattr **attrs = info->attrs;
2085 struct net_device *dev;
2086 struct macsec_secy *secy;
2087 struct macsec_rx_sc *rx_sc;
2088 struct macsec_rx_sa *rx_sa;
2089 u8 assoc_num;
2090 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2091 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2092 int ret;
2093
2094 if (!attrs[MACSEC_ATTR_IFINDEX])
2095 return -EINVAL;
2096
2097 if (parse_sa_config(attrs, tb_sa))
2098 return -EINVAL;
2099
2100 if (parse_rxsc_config(attrs, tb_rxsc))
2101 return -EINVAL;
2102
2103 rtnl_lock();
2104 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2105 &dev, &secy, &rx_sc, &assoc_num);
2106 if (IS_ERR(rx_sa)) {
2107 rtnl_unlock();
2108 return PTR_ERR(rx_sa);
2109 }
2110
2111 if (rx_sa->active) {
2112 rtnl_unlock();
2113 return -EBUSY;
2114 }
2115
2116 /* If h/w offloading is available, propagate to the device */
2117 if (macsec_is_offloaded(netdev_priv(dev))) {
2118 const struct macsec_ops *ops;
2119 struct macsec_context ctx;
2120
2121 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2122 if (!ops) {
2123 ret = -EOPNOTSUPP;
2124 goto cleanup;
2125 }
2126
2127 ctx.sa.assoc_num = assoc_num;
2128 ctx.sa.rx_sa = rx_sa;
2129 ctx.secy = secy;
2130
2131 ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
2132 if (ret)
2133 goto cleanup;
2134 }
2135
2136 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
2137 clear_rx_sa(rx_sa);
2138
2139 rtnl_unlock();
2140
2141 return 0;
2142
2143 cleanup:
2144 rtnl_unlock();
2145 return ret;
2146 }
2147
macsec_del_rxsc(struct sk_buff * skb,struct genl_info * info)2148 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
2149 {
2150 struct nlattr **attrs = info->attrs;
2151 struct net_device *dev;
2152 struct macsec_secy *secy;
2153 struct macsec_rx_sc *rx_sc;
2154 sci_t sci;
2155 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2156 int ret;
2157
2158 if (!attrs[MACSEC_ATTR_IFINDEX])
2159 return -EINVAL;
2160
2161 if (parse_rxsc_config(attrs, tb_rxsc))
2162 return -EINVAL;
2163
2164 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
2165 return -EINVAL;
2166
2167 rtnl_lock();
2168 dev = get_dev_from_nl(genl_info_net(info), info->attrs);
2169 if (IS_ERR(dev)) {
2170 rtnl_unlock();
2171 return PTR_ERR(dev);
2172 }
2173
2174 secy = &macsec_priv(dev)->secy;
2175 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
2176
2177 rx_sc = del_rx_sc(secy, sci);
2178 if (!rx_sc) {
2179 rtnl_unlock();
2180 return -ENODEV;
2181 }
2182
2183 /* If h/w offloading is available, propagate to the device */
2184 if (macsec_is_offloaded(netdev_priv(dev))) {
2185 const struct macsec_ops *ops;
2186 struct macsec_context ctx;
2187
2188 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2189 if (!ops) {
2190 ret = -EOPNOTSUPP;
2191 goto cleanup;
2192 }
2193
2194 ctx.rx_sc = rx_sc;
2195 ctx.secy = secy;
2196 ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
2197 if (ret)
2198 goto cleanup;
2199 }
2200
2201 free_rx_sc(rx_sc);
2202 rtnl_unlock();
2203
2204 return 0;
2205
2206 cleanup:
2207 rtnl_unlock();
2208 return ret;
2209 }
2210
macsec_del_txsa(struct sk_buff * skb,struct genl_info * info)2211 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
2212 {
2213 struct nlattr **attrs = info->attrs;
2214 struct net_device *dev;
2215 struct macsec_secy *secy;
2216 struct macsec_tx_sc *tx_sc;
2217 struct macsec_tx_sa *tx_sa;
2218 u8 assoc_num;
2219 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2220 int ret;
2221
2222 if (!attrs[MACSEC_ATTR_IFINDEX])
2223 return -EINVAL;
2224
2225 if (parse_sa_config(attrs, tb_sa))
2226 return -EINVAL;
2227
2228 rtnl_lock();
2229 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2230 &dev, &secy, &tx_sc, &assoc_num);
2231 if (IS_ERR(tx_sa)) {
2232 rtnl_unlock();
2233 return PTR_ERR(tx_sa);
2234 }
2235
2236 if (tx_sa->active) {
2237 rtnl_unlock();
2238 return -EBUSY;
2239 }
2240
2241 /* If h/w offloading is available, propagate to the device */
2242 if (macsec_is_offloaded(netdev_priv(dev))) {
2243 const struct macsec_ops *ops;
2244 struct macsec_context ctx;
2245
2246 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2247 if (!ops) {
2248 ret = -EOPNOTSUPP;
2249 goto cleanup;
2250 }
2251
2252 ctx.sa.assoc_num = assoc_num;
2253 ctx.sa.tx_sa = tx_sa;
2254 ctx.secy = secy;
2255
2256 ret = macsec_offload(ops->mdo_del_txsa, &ctx);
2257 if (ret)
2258 goto cleanup;
2259 }
2260
2261 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
2262 clear_tx_sa(tx_sa);
2263
2264 rtnl_unlock();
2265
2266 return 0;
2267
2268 cleanup:
2269 rtnl_unlock();
2270 return ret;
2271 }
2272
validate_upd_sa(struct nlattr ** attrs)2273 static bool validate_upd_sa(struct nlattr **attrs)
2274 {
2275 if (!attrs[MACSEC_SA_ATTR_AN] ||
2276 attrs[MACSEC_SA_ATTR_KEY] ||
2277 attrs[MACSEC_SA_ATTR_KEYID] ||
2278 attrs[MACSEC_SA_ATTR_SSCI] ||
2279 attrs[MACSEC_SA_ATTR_SALT])
2280 return false;
2281
2282 return true;
2283 }
2284
macsec_upd_txsa(struct sk_buff * skb,struct genl_info * info)2285 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
2286 {
2287 struct nlattr **attrs = info->attrs;
2288 struct net_device *dev;
2289 struct macsec_secy *secy;
2290 struct macsec_tx_sc *tx_sc;
2291 struct macsec_tx_sa *tx_sa;
2292 u8 assoc_num;
2293 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2294 bool was_operational, was_active;
2295 pn_t prev_pn;
2296 int ret = 0;
2297
2298 prev_pn.full64 = 0;
2299
2300 if (!attrs[MACSEC_ATTR_IFINDEX])
2301 return -EINVAL;
2302
2303 if (parse_sa_config(attrs, tb_sa))
2304 return -EINVAL;
2305
2306 if (!validate_upd_sa(tb_sa))
2307 return -EINVAL;
2308
2309 rtnl_lock();
2310 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2311 &dev, &secy, &tx_sc, &assoc_num);
2312 if (IS_ERR(tx_sa)) {
2313 rtnl_unlock();
2314 return PTR_ERR(tx_sa);
2315 }
2316
2317 if (tb_sa[MACSEC_SA_ATTR_PN]) {
2318 int pn_len;
2319
2320 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2321 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2322 pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n",
2323 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2324 rtnl_unlock();
2325 return -EINVAL;
2326 }
2327
2328 spin_lock_bh(&tx_sa->lock);
2329 prev_pn = tx_sa->next_pn_halves;
2330 tx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
2331 spin_unlock_bh(&tx_sa->lock);
2332 }
2333
2334 was_active = tx_sa->active;
2335 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2336 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2337
2338 was_operational = secy->operational;
2339 if (assoc_num == tx_sc->encoding_sa)
2340 secy->operational = tx_sa->active;
2341
2342 /* If h/w offloading is available, propagate to the device */
2343 if (macsec_is_offloaded(netdev_priv(dev))) {
2344 const struct macsec_ops *ops;
2345 struct macsec_context ctx;
2346
2347 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2348 if (!ops) {
2349 ret = -EOPNOTSUPP;
2350 goto cleanup;
2351 }
2352
2353 ctx.sa.assoc_num = assoc_num;
2354 ctx.sa.tx_sa = tx_sa;
2355 ctx.sa.update_pn = !!prev_pn.full64;
2356 ctx.secy = secy;
2357
2358 ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
2359 if (ret)
2360 goto cleanup;
2361 }
2362
2363 rtnl_unlock();
2364
2365 return 0;
2366
2367 cleanup:
2368 if (tb_sa[MACSEC_SA_ATTR_PN]) {
2369 spin_lock_bh(&tx_sa->lock);
2370 tx_sa->next_pn_halves = prev_pn;
2371 spin_unlock_bh(&tx_sa->lock);
2372 }
2373 tx_sa->active = was_active;
2374 secy->operational = was_operational;
2375 rtnl_unlock();
2376 return ret;
2377 }
2378
macsec_upd_rxsa(struct sk_buff * skb,struct genl_info * info)2379 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
2380 {
2381 struct nlattr **attrs = info->attrs;
2382 struct net_device *dev;
2383 struct macsec_secy *secy;
2384 struct macsec_rx_sc *rx_sc;
2385 struct macsec_rx_sa *rx_sa;
2386 u8 assoc_num;
2387 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2388 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2389 bool was_active;
2390 pn_t prev_pn;
2391 int ret = 0;
2392
2393 prev_pn.full64 = 0;
2394
2395 if (!attrs[MACSEC_ATTR_IFINDEX])
2396 return -EINVAL;
2397
2398 if (parse_rxsc_config(attrs, tb_rxsc))
2399 return -EINVAL;
2400
2401 if (parse_sa_config(attrs, tb_sa))
2402 return -EINVAL;
2403
2404 if (!validate_upd_sa(tb_sa))
2405 return -EINVAL;
2406
2407 rtnl_lock();
2408 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2409 &dev, &secy, &rx_sc, &assoc_num);
2410 if (IS_ERR(rx_sa)) {
2411 rtnl_unlock();
2412 return PTR_ERR(rx_sa);
2413 }
2414
2415 if (tb_sa[MACSEC_SA_ATTR_PN]) {
2416 int pn_len;
2417
2418 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2419 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2420 pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n",
2421 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2422 rtnl_unlock();
2423 return -EINVAL;
2424 }
2425
2426 spin_lock_bh(&rx_sa->lock);
2427 prev_pn = rx_sa->next_pn_halves;
2428 rx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
2429 spin_unlock_bh(&rx_sa->lock);
2430 }
2431
2432 was_active = rx_sa->active;
2433 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2434 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2435
2436 /* If h/w offloading is available, propagate to the device */
2437 if (macsec_is_offloaded(netdev_priv(dev))) {
2438 const struct macsec_ops *ops;
2439 struct macsec_context ctx;
2440
2441 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2442 if (!ops) {
2443 ret = -EOPNOTSUPP;
2444 goto cleanup;
2445 }
2446
2447 ctx.sa.assoc_num = assoc_num;
2448 ctx.sa.rx_sa = rx_sa;
2449 ctx.sa.update_pn = !!prev_pn.full64;
2450 ctx.secy = secy;
2451
2452 ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
2453 if (ret)
2454 goto cleanup;
2455 }
2456
2457 rtnl_unlock();
2458 return 0;
2459
2460 cleanup:
2461 if (tb_sa[MACSEC_SA_ATTR_PN]) {
2462 spin_lock_bh(&rx_sa->lock);
2463 rx_sa->next_pn_halves = prev_pn;
2464 spin_unlock_bh(&rx_sa->lock);
2465 }
2466 rx_sa->active = was_active;
2467 rtnl_unlock();
2468 return ret;
2469 }
2470
macsec_upd_rxsc(struct sk_buff * skb,struct genl_info * info)2471 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
2472 {
2473 struct nlattr **attrs = info->attrs;
2474 struct net_device *dev;
2475 struct macsec_secy *secy;
2476 struct macsec_rx_sc *rx_sc;
2477 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2478 unsigned int prev_n_rx_sc;
2479 bool was_active;
2480 int ret;
2481
2482 if (!attrs[MACSEC_ATTR_IFINDEX])
2483 return -EINVAL;
2484
2485 if (parse_rxsc_config(attrs, tb_rxsc))
2486 return -EINVAL;
2487
2488 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
2489 return -EINVAL;
2490
2491 rtnl_lock();
2492 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
2493 if (IS_ERR(rx_sc)) {
2494 rtnl_unlock();
2495 return PTR_ERR(rx_sc);
2496 }
2497
2498 was_active = rx_sc->active;
2499 prev_n_rx_sc = secy->n_rx_sc;
2500 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
2501 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
2502
2503 if (rx_sc->active != new)
2504 secy->n_rx_sc += new ? 1 : -1;
2505
2506 rx_sc->active = new;
2507 }
2508
2509 /* If h/w offloading is available, propagate to the device */
2510 if (macsec_is_offloaded(netdev_priv(dev))) {
2511 const struct macsec_ops *ops;
2512 struct macsec_context ctx;
2513
2514 ops = macsec_get_ops(netdev_priv(dev), &ctx);
2515 if (!ops) {
2516 ret = -EOPNOTSUPP;
2517 goto cleanup;
2518 }
2519
2520 ctx.rx_sc = rx_sc;
2521 ctx.secy = secy;
2522
2523 ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
2524 if (ret)
2525 goto cleanup;
2526 }
2527
2528 rtnl_unlock();
2529
2530 return 0;
2531
2532 cleanup:
2533 secy->n_rx_sc = prev_n_rx_sc;
2534 rx_sc->active = was_active;
2535 rtnl_unlock();
2536 return ret;
2537 }
2538
macsec_is_configured(struct macsec_dev * macsec)2539 static bool macsec_is_configured(struct macsec_dev *macsec)
2540 {
2541 struct macsec_secy *secy = &macsec->secy;
2542 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2543 int i;
2544
2545 if (secy->rx_sc)
2546 return true;
2547
2548 for (i = 0; i < MACSEC_NUM_AN; i++)
2549 if (tx_sc->sa[i])
2550 return true;
2551
2552 return false;
2553 }
2554
macsec_needs_tx_tag(struct macsec_dev * macsec,const struct macsec_ops * ops)2555 static bool macsec_needs_tx_tag(struct macsec_dev *macsec,
2556 const struct macsec_ops *ops)
2557 {
2558 return macsec->offload == MACSEC_OFFLOAD_PHY &&
2559 ops->mdo_insert_tx_tag;
2560 }
2561
macsec_set_head_tail_room(struct net_device * dev)2562 static void macsec_set_head_tail_room(struct net_device *dev)
2563 {
2564 struct macsec_dev *macsec = macsec_priv(dev);
2565 struct net_device *real_dev = macsec->real_dev;
2566 int needed_headroom, needed_tailroom;
2567 const struct macsec_ops *ops;
2568
2569 ops = macsec_get_ops(macsec, NULL);
2570 if (ops) {
2571 needed_headroom = ops->needed_headroom;
2572 needed_tailroom = ops->needed_tailroom;
2573 } else {
2574 needed_headroom = MACSEC_NEEDED_HEADROOM;
2575 needed_tailroom = MACSEC_NEEDED_TAILROOM;
2576 }
2577
2578 dev->needed_headroom = real_dev->needed_headroom + needed_headroom;
2579 dev->needed_tailroom = real_dev->needed_tailroom + needed_tailroom;
2580 }
2581
macsec_inherit_tso_max(struct net_device * dev)2582 static void macsec_inherit_tso_max(struct net_device *dev)
2583 {
2584 struct macsec_dev *macsec = macsec_priv(dev);
2585
2586 /* if macsec is offloaded, we need to follow the lower
2587 * device's capabilities. otherwise, we can ignore them.
2588 */
2589 if (macsec_is_offloaded(macsec))
2590 netif_inherit_tso_max(dev, macsec->real_dev);
2591 }
2592
macsec_update_offload(struct net_device * dev,enum macsec_offload offload,struct netlink_ext_ack * extack)2593 static int macsec_update_offload(struct net_device *dev,
2594 enum macsec_offload offload,
2595 struct netlink_ext_ack *extack)
2596 {
2597 enum macsec_offload prev_offload;
2598 const struct macsec_ops *ops;
2599 struct macsec_context ctx;
2600 struct macsec_dev *macsec;
2601 int ret = 0;
2602
2603 macsec = macsec_priv(dev);
2604
2605 /* Check if the offloading mode is supported by the underlying layers */
2606 if (offload != MACSEC_OFFLOAD_OFF &&
2607 !macsec_check_offload(offload, macsec))
2608 return -EOPNOTSUPP;
2609
2610 /* Check if the net device is busy. */
2611 if (netif_running(dev))
2612 return -EBUSY;
2613
2614 /* Check if the device already has rules configured: we do not support
2615 * rules migration.
2616 */
2617 if (macsec_is_configured(macsec))
2618 return -EBUSY;
2619
2620 prev_offload = macsec->offload;
2621
2622 ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
2623 macsec, &ctx);
2624 if (!ops)
2625 return -EOPNOTSUPP;
2626
2627 ctx.secy = &macsec->secy;
2628 ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx)
2629 : macsec_offload(ops->mdo_add_secy, &ctx);
2630 if (ret)
2631 return ret;
2632
2633 /* Remove VLAN filters when disabling offload. */
2634 if (offload == MACSEC_OFFLOAD_OFF) {
2635 vlan_drop_rx_ctag_filter_info(dev);
2636 vlan_drop_rx_stag_filter_info(dev);
2637 }
2638 macsec->offload = offload;
2639 /* Add VLAN filters when enabling offload. */
2640 if (prev_offload == MACSEC_OFFLOAD_OFF) {
2641 ret = vlan_get_rx_ctag_filter_info(dev);
2642 if (ret) {
2643 NL_SET_ERR_MSG_FMT(extack,
2644 "adding ctag VLAN filters failed, err %d",
2645 ret);
2646 goto rollback_offload;
2647 }
2648 ret = vlan_get_rx_stag_filter_info(dev);
2649 if (ret) {
2650 NL_SET_ERR_MSG_FMT(extack,
2651 "adding stag VLAN filters failed, err %d",
2652 ret);
2653 vlan_drop_rx_ctag_filter_info(dev);
2654 goto rollback_offload;
2655 }
2656 }
2657
2658 macsec_set_head_tail_room(dev);
2659 macsec->insert_tx_tag = macsec_needs_tx_tag(macsec, ops);
2660
2661 macsec_inherit_tso_max(dev);
2662
2663 netdev_update_features(dev);
2664
2665 return 0;
2666
2667 rollback_offload:
2668 macsec->offload = prev_offload;
2669 macsec_offload(ops->mdo_del_secy, &ctx);
2670
2671 return ret;
2672 }
2673
macsec_upd_offload(struct sk_buff * skb,struct genl_info * info)2674 static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
2675 {
2676 struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
2677 struct nlattr **attrs = info->attrs;
2678 enum macsec_offload offload;
2679 struct macsec_dev *macsec;
2680 struct net_device *dev;
2681 int ret = 0;
2682
2683 if (!attrs[MACSEC_ATTR_IFINDEX])
2684 return -EINVAL;
2685
2686 if (!attrs[MACSEC_ATTR_OFFLOAD])
2687 return -EINVAL;
2688
2689 if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX,
2690 attrs[MACSEC_ATTR_OFFLOAD],
2691 macsec_genl_offload_policy, NULL))
2692 return -EINVAL;
2693
2694 rtnl_lock();
2695
2696 dev = get_dev_from_nl(genl_info_net(info), attrs);
2697 if (IS_ERR(dev)) {
2698 ret = PTR_ERR(dev);
2699 goto out;
2700 }
2701 macsec = macsec_priv(dev);
2702
2703 if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) {
2704 ret = -EINVAL;
2705 goto out;
2706 }
2707
2708 offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
2709
2710 if (macsec->offload != offload)
2711 ret = macsec_update_offload(dev, offload, info->extack);
2712 out:
2713 rtnl_unlock();
2714 return ret;
2715 }
2716
get_tx_sa_stats(struct net_device * dev,int an,struct macsec_tx_sa * tx_sa,struct macsec_tx_sa_stats * sum)2717 static void get_tx_sa_stats(struct net_device *dev, int an,
2718 struct macsec_tx_sa *tx_sa,
2719 struct macsec_tx_sa_stats *sum)
2720 {
2721 struct macsec_dev *macsec = macsec_priv(dev);
2722 int cpu;
2723
2724 /* If h/w offloading is available, propagate to the device */
2725 if (macsec_is_offloaded(macsec)) {
2726 const struct macsec_ops *ops;
2727 struct macsec_context ctx;
2728
2729 ops = macsec_get_ops(macsec, &ctx);
2730 if (ops) {
2731 ctx.sa.assoc_num = an;
2732 ctx.sa.tx_sa = tx_sa;
2733 ctx.stats.tx_sa_stats = sum;
2734 ctx.secy = &macsec_priv(dev)->secy;
2735 macsec_offload(ops->mdo_get_tx_sa_stats, &ctx);
2736 }
2737 return;
2738 }
2739
2740 for_each_possible_cpu(cpu) {
2741 const struct macsec_tx_sa_stats *stats =
2742 per_cpu_ptr(tx_sa->stats, cpu);
2743
2744 sum->OutPktsProtected += stats->OutPktsProtected;
2745 sum->OutPktsEncrypted += stats->OutPktsEncrypted;
2746 }
2747 }
2748
copy_tx_sa_stats(struct sk_buff * skb,struct macsec_tx_sa_stats * sum)2749 static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum)
2750 {
2751 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED,
2752 sum->OutPktsProtected) ||
2753 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2754 sum->OutPktsEncrypted))
2755 return -EMSGSIZE;
2756
2757 return 0;
2758 }
2759
get_rx_sa_stats(struct net_device * dev,struct macsec_rx_sc * rx_sc,int an,struct macsec_rx_sa * rx_sa,struct macsec_rx_sa_stats * sum)2760 static void get_rx_sa_stats(struct net_device *dev,
2761 struct macsec_rx_sc *rx_sc, int an,
2762 struct macsec_rx_sa *rx_sa,
2763 struct macsec_rx_sa_stats *sum)
2764 {
2765 struct macsec_dev *macsec = macsec_priv(dev);
2766 int cpu;
2767
2768 /* If h/w offloading is available, propagate to the device */
2769 if (macsec_is_offloaded(macsec)) {
2770 const struct macsec_ops *ops;
2771 struct macsec_context ctx;
2772
2773 ops = macsec_get_ops(macsec, &ctx);
2774 if (ops) {
2775 ctx.sa.assoc_num = an;
2776 ctx.sa.rx_sa = rx_sa;
2777 ctx.stats.rx_sa_stats = sum;
2778 ctx.secy = &macsec_priv(dev)->secy;
2779 ctx.rx_sc = rx_sc;
2780 macsec_offload(ops->mdo_get_rx_sa_stats, &ctx);
2781 }
2782 return;
2783 }
2784
2785 for_each_possible_cpu(cpu) {
2786 const struct macsec_rx_sa_stats *stats =
2787 per_cpu_ptr(rx_sa->stats, cpu);
2788
2789 sum->InPktsOK += stats->InPktsOK;
2790 sum->InPktsInvalid += stats->InPktsInvalid;
2791 sum->InPktsNotValid += stats->InPktsNotValid;
2792 sum->InPktsNotUsingSA += stats->InPktsNotUsingSA;
2793 sum->InPktsUnusedSA += stats->InPktsUnusedSA;
2794 }
2795 }
2796
copy_rx_sa_stats(struct sk_buff * skb,struct macsec_rx_sa_stats * sum)2797 static int copy_rx_sa_stats(struct sk_buff *skb,
2798 struct macsec_rx_sa_stats *sum)
2799 {
2800 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) ||
2801 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID,
2802 sum->InPktsInvalid) ||
2803 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID,
2804 sum->InPktsNotValid) ||
2805 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2806 sum->InPktsNotUsingSA) ||
2807 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA,
2808 sum->InPktsUnusedSA))
2809 return -EMSGSIZE;
2810
2811 return 0;
2812 }
2813
get_rx_sc_stats(struct net_device * dev,struct macsec_rx_sc * rx_sc,struct macsec_rx_sc_stats * sum)2814 static void get_rx_sc_stats(struct net_device *dev,
2815 struct macsec_rx_sc *rx_sc,
2816 struct macsec_rx_sc_stats *sum)
2817 {
2818 struct macsec_dev *macsec = macsec_priv(dev);
2819 int cpu;
2820
2821 /* If h/w offloading is available, propagate to the device */
2822 if (macsec_is_offloaded(macsec)) {
2823 const struct macsec_ops *ops;
2824 struct macsec_context ctx;
2825
2826 ops = macsec_get_ops(macsec, &ctx);
2827 if (ops) {
2828 ctx.stats.rx_sc_stats = sum;
2829 ctx.secy = &macsec_priv(dev)->secy;
2830 ctx.rx_sc = rx_sc;
2831 macsec_offload(ops->mdo_get_rx_sc_stats, &ctx);
2832 }
2833 return;
2834 }
2835
2836 for_each_possible_cpu(cpu) {
2837 const struct pcpu_rx_sc_stats *stats;
2838 struct macsec_rx_sc_stats tmp;
2839 unsigned int start;
2840
2841 stats = per_cpu_ptr(rx_sc->stats, cpu);
2842 do {
2843 start = u64_stats_fetch_begin(&stats->syncp);
2844 u64_stats_copy(&tmp, &stats->stats, sizeof(tmp));
2845 } while (u64_stats_fetch_retry(&stats->syncp, start));
2846
2847 sum->InOctetsValidated += tmp.InOctetsValidated;
2848 sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
2849 sum->InPktsUnchecked += tmp.InPktsUnchecked;
2850 sum->InPktsDelayed += tmp.InPktsDelayed;
2851 sum->InPktsOK += tmp.InPktsOK;
2852 sum->InPktsInvalid += tmp.InPktsInvalid;
2853 sum->InPktsLate += tmp.InPktsLate;
2854 sum->InPktsNotValid += tmp.InPktsNotValid;
2855 sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA;
2856 sum->InPktsUnusedSA += tmp.InPktsUnusedSA;
2857 }
2858 }
2859
copy_rx_sc_stats(struct sk_buff * skb,struct macsec_rx_sc_stats * sum)2860 static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum)
2861 {
2862 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
2863 sum->InOctetsValidated,
2864 MACSEC_RXSC_STATS_ATTR_PAD) ||
2865 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
2866 sum->InOctetsDecrypted,
2867 MACSEC_RXSC_STATS_ATTR_PAD) ||
2868 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
2869 sum->InPktsUnchecked,
2870 MACSEC_RXSC_STATS_ATTR_PAD) ||
2871 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
2872 sum->InPktsDelayed,
2873 MACSEC_RXSC_STATS_ATTR_PAD) ||
2874 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
2875 sum->InPktsOK,
2876 MACSEC_RXSC_STATS_ATTR_PAD) ||
2877 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
2878 sum->InPktsInvalid,
2879 MACSEC_RXSC_STATS_ATTR_PAD) ||
2880 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
2881 sum->InPktsLate,
2882 MACSEC_RXSC_STATS_ATTR_PAD) ||
2883 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
2884 sum->InPktsNotValid,
2885 MACSEC_RXSC_STATS_ATTR_PAD) ||
2886 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2887 sum->InPktsNotUsingSA,
2888 MACSEC_RXSC_STATS_ATTR_PAD) ||
2889 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
2890 sum->InPktsUnusedSA,
2891 MACSEC_RXSC_STATS_ATTR_PAD))
2892 return -EMSGSIZE;
2893
2894 return 0;
2895 }
2896
get_tx_sc_stats(struct net_device * dev,struct macsec_tx_sc_stats * sum)2897 static void get_tx_sc_stats(struct net_device *dev,
2898 struct macsec_tx_sc_stats *sum)
2899 {
2900 struct macsec_dev *macsec = macsec_priv(dev);
2901 int cpu;
2902
2903 /* If h/w offloading is available, propagate to the device */
2904 if (macsec_is_offloaded(macsec)) {
2905 const struct macsec_ops *ops;
2906 struct macsec_context ctx;
2907
2908 ops = macsec_get_ops(macsec, &ctx);
2909 if (ops) {
2910 ctx.stats.tx_sc_stats = sum;
2911 ctx.secy = &macsec_priv(dev)->secy;
2912 macsec_offload(ops->mdo_get_tx_sc_stats, &ctx);
2913 }
2914 return;
2915 }
2916
2917 for_each_possible_cpu(cpu) {
2918 const struct pcpu_tx_sc_stats *stats;
2919 struct macsec_tx_sc_stats tmp;
2920 unsigned int start;
2921
2922 stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
2923 do {
2924 start = u64_stats_fetch_begin(&stats->syncp);
2925 u64_stats_copy(&tmp, &stats->stats, sizeof(tmp));
2926 } while (u64_stats_fetch_retry(&stats->syncp, start));
2927
2928 sum->OutPktsProtected += tmp.OutPktsProtected;
2929 sum->OutPktsEncrypted += tmp.OutPktsEncrypted;
2930 sum->OutOctetsProtected += tmp.OutOctetsProtected;
2931 sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted;
2932 }
2933 }
2934
copy_tx_sc_stats(struct sk_buff * skb,struct macsec_tx_sc_stats * sum)2935 static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum)
2936 {
2937 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
2938 sum->OutPktsProtected,
2939 MACSEC_TXSC_STATS_ATTR_PAD) ||
2940 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2941 sum->OutPktsEncrypted,
2942 MACSEC_TXSC_STATS_ATTR_PAD) ||
2943 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
2944 sum->OutOctetsProtected,
2945 MACSEC_TXSC_STATS_ATTR_PAD) ||
2946 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
2947 sum->OutOctetsEncrypted,
2948 MACSEC_TXSC_STATS_ATTR_PAD))
2949 return -EMSGSIZE;
2950
2951 return 0;
2952 }
2953
get_secy_stats(struct net_device * dev,struct macsec_dev_stats * sum)2954 static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
2955 {
2956 struct macsec_dev *macsec = macsec_priv(dev);
2957 int cpu;
2958
2959 /* If h/w offloading is available, propagate to the device */
2960 if (macsec_is_offloaded(macsec)) {
2961 const struct macsec_ops *ops;
2962 struct macsec_context ctx;
2963
2964 ops = macsec_get_ops(macsec, &ctx);
2965 if (ops) {
2966 ctx.stats.dev_stats = sum;
2967 ctx.secy = &macsec_priv(dev)->secy;
2968 macsec_offload(ops->mdo_get_dev_stats, &ctx);
2969 }
2970 return;
2971 }
2972
2973 for_each_possible_cpu(cpu) {
2974 const struct pcpu_secy_stats *stats;
2975 struct macsec_dev_stats tmp;
2976 unsigned int start;
2977
2978 stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
2979 do {
2980 start = u64_stats_fetch_begin(&stats->syncp);
2981 u64_stats_copy(&tmp, &stats->stats, sizeof(tmp));
2982 } while (u64_stats_fetch_retry(&stats->syncp, start));
2983
2984 sum->OutPktsUntagged += tmp.OutPktsUntagged;
2985 sum->InPktsUntagged += tmp.InPktsUntagged;
2986 sum->OutPktsTooLong += tmp.OutPktsTooLong;
2987 sum->InPktsNoTag += tmp.InPktsNoTag;
2988 sum->InPktsBadTag += tmp.InPktsBadTag;
2989 sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI;
2990 sum->InPktsNoSCI += tmp.InPktsNoSCI;
2991 sum->InPktsOverrun += tmp.InPktsOverrun;
2992 }
2993 }
2994
copy_secy_stats(struct sk_buff * skb,struct macsec_dev_stats * sum)2995 static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum)
2996 {
2997 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
2998 sum->OutPktsUntagged,
2999 MACSEC_SECY_STATS_ATTR_PAD) ||
3000 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
3001 sum->InPktsUntagged,
3002 MACSEC_SECY_STATS_ATTR_PAD) ||
3003 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
3004 sum->OutPktsTooLong,
3005 MACSEC_SECY_STATS_ATTR_PAD) ||
3006 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
3007 sum->InPktsNoTag,
3008 MACSEC_SECY_STATS_ATTR_PAD) ||
3009 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
3010 sum->InPktsBadTag,
3011 MACSEC_SECY_STATS_ATTR_PAD) ||
3012 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
3013 sum->InPktsUnknownSCI,
3014 MACSEC_SECY_STATS_ATTR_PAD) ||
3015 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
3016 sum->InPktsNoSCI,
3017 MACSEC_SECY_STATS_ATTR_PAD) ||
3018 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
3019 sum->InPktsOverrun,
3020 MACSEC_SECY_STATS_ATTR_PAD))
3021 return -EMSGSIZE;
3022
3023 return 0;
3024 }
3025
nla_put_secy(struct macsec_secy * secy,struct sk_buff * skb)3026 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
3027 {
3028 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3029 struct nlattr *secy_nest = nla_nest_start_noflag(skb,
3030 MACSEC_ATTR_SECY);
3031 u64 csid;
3032
3033 if (!secy_nest)
3034 return 1;
3035
3036 switch (secy->key_len) {
3037 case MACSEC_GCM_AES_128_SAK_LEN:
3038 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
3039 break;
3040 case MACSEC_GCM_AES_256_SAK_LEN:
3041 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
3042 break;
3043 default:
3044 goto cancel;
3045 }
3046
3047 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
3048 MACSEC_SECY_ATTR_PAD) ||
3049 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
3050 csid, MACSEC_SECY_ATTR_PAD) ||
3051 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
3052 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
3053 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
3054 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
3055 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
3056 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
3057 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
3058 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
3059 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
3060 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
3061 goto cancel;
3062
3063 if (secy->replay_protect) {
3064 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
3065 goto cancel;
3066 }
3067
3068 nla_nest_end(skb, secy_nest);
3069 return 0;
3070
3071 cancel:
3072 nla_nest_cancel(skb, secy_nest);
3073 return 1;
3074 }
3075
3076 static noinline_for_stack int
dump_secy(struct macsec_secy * secy,struct net_device * dev,struct sk_buff * skb,struct netlink_callback * cb)3077 dump_secy(struct macsec_secy *secy, struct net_device *dev,
3078 struct sk_buff *skb, struct netlink_callback *cb)
3079 {
3080 struct macsec_tx_sc_stats tx_sc_stats = {0, };
3081 struct macsec_tx_sa_stats tx_sa_stats = {0, };
3082 struct macsec_rx_sc_stats rx_sc_stats = {0, };
3083 struct macsec_rx_sa_stats rx_sa_stats = {0, };
3084 struct macsec_dev *macsec = netdev_priv(dev);
3085 struct macsec_dev_stats dev_stats = {0, };
3086 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3087 struct nlattr *txsa_list, *rxsc_list;
3088 struct macsec_rx_sc *rx_sc;
3089 struct nlattr *attr;
3090 void *hdr;
3091 int i, j;
3092
3093 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3094 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
3095 if (!hdr)
3096 return -EMSGSIZE;
3097
3098 genl_dump_check_consistent(cb, hdr);
3099
3100 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
3101 goto nla_put_failure;
3102
3103 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD);
3104 if (!attr)
3105 goto nla_put_failure;
3106 if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload))
3107 goto nla_put_failure;
3108 nla_nest_end(skb, attr);
3109
3110 if (nla_put_secy(secy, skb))
3111 goto nla_put_failure;
3112
3113 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
3114 if (!attr)
3115 goto nla_put_failure;
3116
3117 get_tx_sc_stats(dev, &tx_sc_stats);
3118 if (copy_tx_sc_stats(skb, &tx_sc_stats)) {
3119 nla_nest_cancel(skb, attr);
3120 goto nla_put_failure;
3121 }
3122 nla_nest_end(skb, attr);
3123
3124 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
3125 if (!attr)
3126 goto nla_put_failure;
3127 get_secy_stats(dev, &dev_stats);
3128 if (copy_secy_stats(skb, &dev_stats)) {
3129 nla_nest_cancel(skb, attr);
3130 goto nla_put_failure;
3131 }
3132 nla_nest_end(skb, attr);
3133
3134 txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST);
3135 if (!txsa_list)
3136 goto nla_put_failure;
3137 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
3138 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
3139 struct nlattr *txsa_nest;
3140 u64 pn;
3141 int pn_len;
3142
3143 if (!tx_sa)
3144 continue;
3145
3146 txsa_nest = nla_nest_start_noflag(skb, j++);
3147 if (!txsa_nest) {
3148 nla_nest_cancel(skb, txsa_list);
3149 goto nla_put_failure;
3150 }
3151
3152 attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
3153 if (!attr) {
3154 nla_nest_cancel(skb, txsa_nest);
3155 nla_nest_cancel(skb, txsa_list);
3156 goto nla_put_failure;
3157 }
3158 memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
3159 get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats);
3160 if (copy_tx_sa_stats(skb, &tx_sa_stats)) {
3161 nla_nest_cancel(skb, attr);
3162 nla_nest_cancel(skb, txsa_nest);
3163 nla_nest_cancel(skb, txsa_list);
3164 goto nla_put_failure;
3165 }
3166 nla_nest_end(skb, attr);
3167
3168 if (secy->xpn) {
3169 pn = tx_sa->next_pn;
3170 pn_len = MACSEC_XPN_PN_LEN;
3171 } else {
3172 pn = tx_sa->next_pn_halves.lower;
3173 pn_len = MACSEC_DEFAULT_PN_LEN;
3174 }
3175
3176 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
3177 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
3178 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
3179 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) ||
3180 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
3181 nla_nest_cancel(skb, txsa_nest);
3182 nla_nest_cancel(skb, txsa_list);
3183 goto nla_put_failure;
3184 }
3185
3186 nla_nest_end(skb, txsa_nest);
3187 }
3188 nla_nest_end(skb, txsa_list);
3189
3190 rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST);
3191 if (!rxsc_list)
3192 goto nla_put_failure;
3193
3194 j = 1;
3195 for_each_rxsc_rtnl(secy, rx_sc) {
3196 int k;
3197 struct nlattr *rxsa_list;
3198 struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++);
3199
3200 if (!rxsc_nest) {
3201 nla_nest_cancel(skb, rxsc_list);
3202 goto nla_put_failure;
3203 }
3204
3205 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
3206 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
3207 MACSEC_RXSC_ATTR_PAD)) {
3208 nla_nest_cancel(skb, rxsc_nest);
3209 nla_nest_cancel(skb, rxsc_list);
3210 goto nla_put_failure;
3211 }
3212
3213 attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS);
3214 if (!attr) {
3215 nla_nest_cancel(skb, rxsc_nest);
3216 nla_nest_cancel(skb, rxsc_list);
3217 goto nla_put_failure;
3218 }
3219 memset(&rx_sc_stats, 0, sizeof(rx_sc_stats));
3220 get_rx_sc_stats(dev, rx_sc, &rx_sc_stats);
3221 if (copy_rx_sc_stats(skb, &rx_sc_stats)) {
3222 nla_nest_cancel(skb, attr);
3223 nla_nest_cancel(skb, rxsc_nest);
3224 nla_nest_cancel(skb, rxsc_list);
3225 goto nla_put_failure;
3226 }
3227 nla_nest_end(skb, attr);
3228
3229 rxsa_list = nla_nest_start_noflag(skb,
3230 MACSEC_RXSC_ATTR_SA_LIST);
3231 if (!rxsa_list) {
3232 nla_nest_cancel(skb, rxsc_nest);
3233 nla_nest_cancel(skb, rxsc_list);
3234 goto nla_put_failure;
3235 }
3236
3237 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
3238 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
3239 struct nlattr *rxsa_nest;
3240 u64 pn;
3241 int pn_len;
3242
3243 if (!rx_sa)
3244 continue;
3245
3246 rxsa_nest = nla_nest_start_noflag(skb, k++);
3247 if (!rxsa_nest) {
3248 nla_nest_cancel(skb, rxsa_list);
3249 nla_nest_cancel(skb, rxsc_nest);
3250 nla_nest_cancel(skb, rxsc_list);
3251 goto nla_put_failure;
3252 }
3253
3254 attr = nla_nest_start_noflag(skb,
3255 MACSEC_SA_ATTR_STATS);
3256 if (!attr) {
3257 nla_nest_cancel(skb, rxsa_list);
3258 nla_nest_cancel(skb, rxsc_nest);
3259 nla_nest_cancel(skb, rxsc_list);
3260 goto nla_put_failure;
3261 }
3262 memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
3263 get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats);
3264 if (copy_rx_sa_stats(skb, &rx_sa_stats)) {
3265 nla_nest_cancel(skb, attr);
3266 nla_nest_cancel(skb, rxsa_list);
3267 nla_nest_cancel(skb, rxsc_nest);
3268 nla_nest_cancel(skb, rxsc_list);
3269 goto nla_put_failure;
3270 }
3271 nla_nest_end(skb, attr);
3272
3273 if (secy->xpn) {
3274 pn = rx_sa->next_pn;
3275 pn_len = MACSEC_XPN_PN_LEN;
3276 } else {
3277 pn = rx_sa->next_pn_halves.lower;
3278 pn_len = MACSEC_DEFAULT_PN_LEN;
3279 }
3280
3281 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
3282 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
3283 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
3284 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) ||
3285 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
3286 nla_nest_cancel(skb, rxsa_nest);
3287 nla_nest_cancel(skb, rxsc_nest);
3288 nla_nest_cancel(skb, rxsc_list);
3289 goto nla_put_failure;
3290 }
3291 nla_nest_end(skb, rxsa_nest);
3292 }
3293
3294 nla_nest_end(skb, rxsa_list);
3295 nla_nest_end(skb, rxsc_nest);
3296 }
3297
3298 nla_nest_end(skb, rxsc_list);
3299
3300 genlmsg_end(skb, hdr);
3301
3302 return 0;
3303
3304 nla_put_failure:
3305 genlmsg_cancel(skb, hdr);
3306 return -EMSGSIZE;
3307 }
3308
3309 static int macsec_generation = 1; /* protected by RTNL */
3310
macsec_dump_txsc(struct sk_buff * skb,struct netlink_callback * cb)3311 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
3312 {
3313 struct net *net = sock_net(skb->sk);
3314 struct net_device *dev;
3315 int dev_idx, d;
3316
3317 dev_idx = cb->args[0];
3318
3319 d = 0;
3320 rtnl_lock();
3321
3322 cb->seq = macsec_generation;
3323
3324 for_each_netdev(net, dev) {
3325 struct macsec_secy *secy;
3326
3327 if (d < dev_idx)
3328 goto next;
3329
3330 if (!netif_is_macsec(dev))
3331 goto next;
3332
3333 secy = &macsec_priv(dev)->secy;
3334 if (dump_secy(secy, dev, skb, cb) < 0)
3335 goto done;
3336 next:
3337 d++;
3338 }
3339
3340 done:
3341 rtnl_unlock();
3342 cb->args[0] = d;
3343 return skb->len;
3344 }
3345
3346 static const struct genl_small_ops macsec_genl_ops[] = {
3347 {
3348 .cmd = MACSEC_CMD_GET_TXSC,
3349 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3350 .dumpit = macsec_dump_txsc,
3351 },
3352 {
3353 .cmd = MACSEC_CMD_ADD_RXSC,
3354 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3355 .doit = macsec_add_rxsc,
3356 .flags = GENL_ADMIN_PERM,
3357 },
3358 {
3359 .cmd = MACSEC_CMD_DEL_RXSC,
3360 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3361 .doit = macsec_del_rxsc,
3362 .flags = GENL_ADMIN_PERM,
3363 },
3364 {
3365 .cmd = MACSEC_CMD_UPD_RXSC,
3366 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3367 .doit = macsec_upd_rxsc,
3368 .flags = GENL_ADMIN_PERM,
3369 },
3370 {
3371 .cmd = MACSEC_CMD_ADD_TXSA,
3372 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3373 .doit = macsec_add_txsa,
3374 .flags = GENL_ADMIN_PERM,
3375 },
3376 {
3377 .cmd = MACSEC_CMD_DEL_TXSA,
3378 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3379 .doit = macsec_del_txsa,
3380 .flags = GENL_ADMIN_PERM,
3381 },
3382 {
3383 .cmd = MACSEC_CMD_UPD_TXSA,
3384 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3385 .doit = macsec_upd_txsa,
3386 .flags = GENL_ADMIN_PERM,
3387 },
3388 {
3389 .cmd = MACSEC_CMD_ADD_RXSA,
3390 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3391 .doit = macsec_add_rxsa,
3392 .flags = GENL_ADMIN_PERM,
3393 },
3394 {
3395 .cmd = MACSEC_CMD_DEL_RXSA,
3396 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3397 .doit = macsec_del_rxsa,
3398 .flags = GENL_ADMIN_PERM,
3399 },
3400 {
3401 .cmd = MACSEC_CMD_UPD_RXSA,
3402 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3403 .doit = macsec_upd_rxsa,
3404 .flags = GENL_ADMIN_PERM,
3405 },
3406 {
3407 .cmd = MACSEC_CMD_UPD_OFFLOAD,
3408 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3409 .doit = macsec_upd_offload,
3410 .flags = GENL_ADMIN_PERM,
3411 },
3412 };
3413
3414 static struct genl_family macsec_fam __ro_after_init = {
3415 .name = MACSEC_GENL_NAME,
3416 .hdrsize = 0,
3417 .version = MACSEC_GENL_VERSION,
3418 .maxattr = MACSEC_ATTR_MAX,
3419 .policy = macsec_genl_policy,
3420 .netnsok = true,
3421 .module = THIS_MODULE,
3422 .small_ops = macsec_genl_ops,
3423 .n_small_ops = ARRAY_SIZE(macsec_genl_ops),
3424 .resv_start_op = MACSEC_CMD_UPD_OFFLOAD + 1,
3425 };
3426
macsec_insert_tx_tag(struct sk_buff * skb,struct net_device * dev)3427 static struct sk_buff *macsec_insert_tx_tag(struct sk_buff *skb,
3428 struct net_device *dev)
3429 {
3430 struct macsec_dev *macsec = macsec_priv(dev);
3431 const struct macsec_ops *ops;
3432 struct phy_device *phydev;
3433 struct macsec_context ctx;
3434 int skb_final_len;
3435 int err;
3436
3437 ops = macsec_get_ops(macsec, &ctx);
3438 skb_final_len = skb->len - ETH_HLEN + ops->needed_headroom +
3439 ops->needed_tailroom;
3440 if (unlikely(skb_final_len > macsec->real_dev->mtu)) {
3441 err = -EINVAL;
3442 goto cleanup;
3443 }
3444
3445 phydev = macsec->real_dev->phydev;
3446
3447 err = skb_ensure_writable_head_tail(skb, dev);
3448 if (unlikely(err < 0))
3449 goto cleanup;
3450
3451 err = ops->mdo_insert_tx_tag(phydev, skb);
3452 if (unlikely(err))
3453 goto cleanup;
3454
3455 return skb;
3456 cleanup:
3457 kfree_skb(skb);
3458 return ERR_PTR(err);
3459 }
3460
macsec_start_xmit(struct sk_buff * skb,struct net_device * dev)3461 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
3462 struct net_device *dev)
3463 {
3464 struct macsec_dev *macsec = netdev_priv(dev);
3465 struct macsec_secy *secy = &macsec->secy;
3466 struct pcpu_secy_stats *secy_stats;
3467 int ret, len;
3468
3469 if (macsec_is_offloaded(netdev_priv(dev))) {
3470 struct metadata_dst *md_dst = secy->tx_sc.md_dst;
3471
3472 skb_dst_drop(skb);
3473 dst_hold(&md_dst->dst);
3474 skb_dst_set(skb, &md_dst->dst);
3475
3476 if (macsec->insert_tx_tag) {
3477 skb = macsec_insert_tx_tag(skb, dev);
3478 if (IS_ERR(skb)) {
3479 DEV_STATS_INC(dev, tx_dropped);
3480 return NETDEV_TX_OK;
3481 }
3482 }
3483
3484 skb->dev = macsec->real_dev;
3485 return dev_queue_xmit(skb);
3486 }
3487
3488 /* 10.5 */
3489 if (!secy->protect_frames) {
3490 secy_stats = this_cpu_ptr(macsec->stats);
3491 u64_stats_update_begin(&secy_stats->syncp);
3492 secy_stats->stats.OutPktsUntagged++;
3493 u64_stats_update_end(&secy_stats->syncp);
3494 skb->dev = macsec->real_dev;
3495 len = skb->len;
3496 ret = dev_queue_xmit(skb);
3497 count_tx(dev, ret, len);
3498 return ret;
3499 }
3500
3501 if (!secy->operational) {
3502 kfree_skb(skb);
3503 DEV_STATS_INC(dev, tx_dropped);
3504 return NETDEV_TX_OK;
3505 }
3506
3507 len = skb->len;
3508 skb = macsec_encrypt(skb, dev);
3509 if (IS_ERR(skb)) {
3510 if (PTR_ERR(skb) != -EINPROGRESS)
3511 DEV_STATS_INC(dev, tx_dropped);
3512 return NETDEV_TX_OK;
3513 }
3514
3515 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
3516
3517 macsec_encrypt_finish(skb, dev);
3518 ret = dev_queue_xmit(skb);
3519 count_tx(dev, ret, len);
3520 return ret;
3521 }
3522
3523 #define MACSEC_FEATURES \
3524 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
3525 NETIF_F_HW_VLAN_STAG_FILTER | NETIF_F_HW_VLAN_CTAG_FILTER)
3526
3527 #define MACSEC_OFFLOAD_FEATURES \
3528 (MACSEC_FEATURES | NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES | \
3529 NETIF_F_LRO | NETIF_F_RXHASH | NETIF_F_CSUM_MASK | NETIF_F_RXCSUM)
3530
macsec_dev_init(struct net_device * dev)3531 static int macsec_dev_init(struct net_device *dev)
3532 {
3533 struct macsec_dev *macsec = macsec_priv(dev);
3534 struct net_device *real_dev = macsec->real_dev;
3535 int err;
3536
3537 err = gro_cells_init(&macsec->gro_cells, dev);
3538 if (err)
3539 return err;
3540
3541 macsec_inherit_tso_max(dev);
3542
3543 dev->hw_features = real_dev->hw_features & MACSEC_OFFLOAD_FEATURES;
3544 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
3545
3546 dev->features = real_dev->features & MACSEC_OFFLOAD_FEATURES;
3547 dev->features |= NETIF_F_GSO_SOFTWARE;
3548 dev->lltx = true;
3549 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
3550
3551 macsec_set_head_tail_room(dev);
3552
3553 if (is_zero_ether_addr(dev->dev_addr))
3554 eth_hw_addr_inherit(dev, real_dev);
3555 if (is_zero_ether_addr(dev->broadcast))
3556 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
3557
3558 /* Get macsec's reference to real_dev */
3559 netdev_hold(real_dev, &macsec->dev_tracker, GFP_KERNEL);
3560
3561 return 0;
3562 }
3563
macsec_dev_uninit(struct net_device * dev)3564 static void macsec_dev_uninit(struct net_device *dev)
3565 {
3566 struct macsec_dev *macsec = macsec_priv(dev);
3567
3568 gro_cells_destroy(&macsec->gro_cells);
3569 }
3570
macsec_fix_features(struct net_device * dev,netdev_features_t features)3571 static netdev_features_t macsec_fix_features(struct net_device *dev,
3572 netdev_features_t features)
3573 {
3574 struct macsec_dev *macsec = macsec_priv(dev);
3575 struct net_device *real_dev = macsec->real_dev;
3576 netdev_features_t mask;
3577
3578 mask = macsec_is_offloaded(macsec) ? MACSEC_OFFLOAD_FEATURES
3579 : MACSEC_FEATURES;
3580
3581 features &= (real_dev->features & mask) |
3582 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
3583
3584 return features;
3585 }
3586
macsec_dev_open(struct net_device * dev)3587 static int macsec_dev_open(struct net_device *dev)
3588 {
3589 struct macsec_dev *macsec = macsec_priv(dev);
3590 struct net_device *real_dev = macsec->real_dev;
3591 int err;
3592
3593 err = dev_uc_add(real_dev, dev->dev_addr);
3594 if (err < 0)
3595 return err;
3596
3597 if (dev->flags & IFF_ALLMULTI) {
3598 err = dev_set_allmulti(real_dev, 1);
3599 if (err < 0)
3600 goto del_unicast;
3601 }
3602
3603 if (dev->flags & IFF_PROMISC) {
3604 err = dev_set_promiscuity(real_dev, 1);
3605 if (err < 0)
3606 goto clear_allmulti;
3607 }
3608
3609 /* If h/w offloading is available, propagate to the device */
3610 if (macsec_is_offloaded(macsec)) {
3611 const struct macsec_ops *ops;
3612 struct macsec_context ctx;
3613
3614 ops = macsec_get_ops(netdev_priv(dev), &ctx);
3615 if (!ops) {
3616 err = -EOPNOTSUPP;
3617 goto clear_allmulti;
3618 }
3619
3620 ctx.secy = &macsec->secy;
3621 err = macsec_offload(ops->mdo_dev_open, &ctx);
3622 if (err)
3623 goto clear_allmulti;
3624 }
3625
3626 if (netif_carrier_ok(real_dev))
3627 netif_carrier_on(dev);
3628
3629 return 0;
3630 clear_allmulti:
3631 if (dev->flags & IFF_ALLMULTI)
3632 dev_set_allmulti(real_dev, -1);
3633 del_unicast:
3634 dev_uc_del(real_dev, dev->dev_addr);
3635 netif_carrier_off(dev);
3636 return err;
3637 }
3638
macsec_dev_stop(struct net_device * dev)3639 static int macsec_dev_stop(struct net_device *dev)
3640 {
3641 struct macsec_dev *macsec = macsec_priv(dev);
3642 struct net_device *real_dev = macsec->real_dev;
3643
3644 netif_carrier_off(dev);
3645
3646 /* If h/w offloading is available, propagate to the device */
3647 if (macsec_is_offloaded(macsec)) {
3648 const struct macsec_ops *ops;
3649 struct macsec_context ctx;
3650
3651 ops = macsec_get_ops(macsec, &ctx);
3652 if (ops) {
3653 ctx.secy = &macsec->secy;
3654 macsec_offload(ops->mdo_dev_stop, &ctx);
3655 }
3656 }
3657
3658 dev_mc_unsync(real_dev, dev);
3659 dev_uc_unsync(real_dev, dev);
3660
3661 if (dev->flags & IFF_ALLMULTI)
3662 dev_set_allmulti(real_dev, -1);
3663
3664 if (dev->flags & IFF_PROMISC)
3665 dev_set_promiscuity(real_dev, -1);
3666
3667 dev_uc_del(real_dev, dev->dev_addr);
3668
3669 return 0;
3670 }
3671
macsec_dev_change_rx_flags(struct net_device * dev,int change)3672 static void macsec_dev_change_rx_flags(struct net_device *dev, int change)
3673 {
3674 struct net_device *real_dev = macsec_priv(dev)->real_dev;
3675
3676 if (!(dev->flags & IFF_UP))
3677 return;
3678
3679 if (change & IFF_ALLMULTI)
3680 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
3681
3682 if (change & IFF_PROMISC)
3683 dev_set_promiscuity(real_dev,
3684 dev->flags & IFF_PROMISC ? 1 : -1);
3685 }
3686
macsec_dev_set_rx_mode(struct net_device * dev)3687 static void macsec_dev_set_rx_mode(struct net_device *dev)
3688 {
3689 struct net_device *real_dev = macsec_priv(dev)->real_dev;
3690
3691 dev_mc_sync(real_dev, dev);
3692 dev_uc_sync(real_dev, dev);
3693 }
3694
macsec_set_mac_address(struct net_device * dev,void * p)3695 static int macsec_set_mac_address(struct net_device *dev, void *p)
3696 {
3697 struct macsec_dev *macsec = macsec_priv(dev);
3698 struct net_device *real_dev = macsec->real_dev;
3699 struct sockaddr *addr = p;
3700 u8 old_addr[ETH_ALEN];
3701 int err;
3702
3703 if (!is_valid_ether_addr(addr->sa_data))
3704 return -EADDRNOTAVAIL;
3705
3706 if (dev->flags & IFF_UP) {
3707 err = dev_uc_add(real_dev, addr->sa_data);
3708 if (err < 0)
3709 return err;
3710 }
3711
3712 ether_addr_copy(old_addr, dev->dev_addr);
3713 eth_hw_addr_set(dev, addr->sa_data);
3714
3715 /* If h/w offloading is available, propagate to the device */
3716 if (macsec_is_offloaded(macsec)) {
3717 const struct macsec_ops *ops;
3718 struct macsec_context ctx;
3719
3720 ops = macsec_get_ops(macsec, &ctx);
3721 if (!ops) {
3722 err = -EOPNOTSUPP;
3723 goto restore_old_addr;
3724 }
3725
3726 ctx.secy = &macsec->secy;
3727 err = macsec_offload(ops->mdo_upd_secy, &ctx);
3728 if (err)
3729 goto restore_old_addr;
3730 }
3731
3732 if (dev->flags & IFF_UP)
3733 dev_uc_del(real_dev, old_addr);
3734
3735 return 0;
3736
3737 restore_old_addr:
3738 if (dev->flags & IFF_UP)
3739 dev_uc_del(real_dev, addr->sa_data);
3740
3741 eth_hw_addr_set(dev, old_addr);
3742
3743 return err;
3744 }
3745
macsec_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)3746 static int macsec_vlan_rx_add_vid(struct net_device *dev,
3747 __be16 proto, u16 vid)
3748 {
3749 struct macsec_dev *macsec = netdev_priv(dev);
3750
3751 if (!macsec_is_offloaded(macsec))
3752 return 0;
3753
3754 return vlan_vid_add(macsec->real_dev, proto, vid);
3755 }
3756
macsec_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)3757 static int macsec_vlan_rx_kill_vid(struct net_device *dev,
3758 __be16 proto, u16 vid)
3759 {
3760 struct macsec_dev *macsec = netdev_priv(dev);
3761
3762 if (!macsec_is_offloaded(macsec))
3763 return 0;
3764
3765 vlan_vid_del(macsec->real_dev, proto, vid);
3766 return 0;
3767 }
3768
macsec_change_mtu(struct net_device * dev,int new_mtu)3769 static int macsec_change_mtu(struct net_device *dev, int new_mtu)
3770 {
3771 struct macsec_dev *macsec = macsec_priv(dev);
3772 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true);
3773
3774 if (macsec->real_dev->mtu - extra < new_mtu)
3775 return -ERANGE;
3776
3777 WRITE_ONCE(dev->mtu, new_mtu);
3778
3779 return 0;
3780 }
3781
macsec_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * s)3782 static void macsec_get_stats64(struct net_device *dev,
3783 struct rtnl_link_stats64 *s)
3784 {
3785 if (!dev->tstats)
3786 return;
3787
3788 dev_fetch_sw_netstats(s, dev->tstats);
3789
3790 s->rx_dropped = DEV_STATS_READ(dev, rx_dropped);
3791 s->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
3792 s->rx_errors = DEV_STATS_READ(dev, rx_errors);
3793 }
3794
macsec_get_iflink(const struct net_device * dev)3795 static int macsec_get_iflink(const struct net_device *dev)
3796 {
3797 return READ_ONCE(macsec_priv(dev)->real_dev->ifindex);
3798 }
3799
3800 static const struct net_device_ops macsec_netdev_ops = {
3801 .ndo_init = macsec_dev_init,
3802 .ndo_uninit = macsec_dev_uninit,
3803 .ndo_open = macsec_dev_open,
3804 .ndo_stop = macsec_dev_stop,
3805 .ndo_fix_features = macsec_fix_features,
3806 .ndo_change_mtu = macsec_change_mtu,
3807 .ndo_set_rx_mode = macsec_dev_set_rx_mode,
3808 .ndo_change_rx_flags = macsec_dev_change_rx_flags,
3809 .ndo_set_mac_address = macsec_set_mac_address,
3810 .ndo_vlan_rx_add_vid = macsec_vlan_rx_add_vid,
3811 .ndo_vlan_rx_kill_vid = macsec_vlan_rx_kill_vid,
3812 .ndo_start_xmit = macsec_start_xmit,
3813 .ndo_get_stats64 = macsec_get_stats64,
3814 .ndo_get_iflink = macsec_get_iflink,
3815 };
3816
3817 static const struct device_type macsec_type = {
3818 .name = "macsec",
3819 };
3820
3821 static int validate_cipher_suite(const struct nlattr *attr,
3822 struct netlink_ext_ack *extack);
3823 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
3824 [IFLA_MACSEC_SCI] = { .type = NLA_U64 },
3825 [IFLA_MACSEC_PORT] = { .type = NLA_U16 },
3826 [IFLA_MACSEC_ICV_LEN] = NLA_POLICY_RANGE(NLA_U8, MACSEC_MIN_ICV_LEN, MACSEC_STD_ICV_LEN),
3827 [IFLA_MACSEC_CIPHER_SUITE] = NLA_POLICY_VALIDATE_FN(NLA_U64, validate_cipher_suite),
3828 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
3829 [IFLA_MACSEC_ENCODING_SA] = NLA_POLICY_MAX(NLA_U8, MACSEC_NUM_AN - 1),
3830 [IFLA_MACSEC_ENCRYPT] = NLA_POLICY_MAX(NLA_U8, 1),
3831 [IFLA_MACSEC_PROTECT] = NLA_POLICY_MAX(NLA_U8, 1),
3832 [IFLA_MACSEC_INC_SCI] = NLA_POLICY_MAX(NLA_U8, 1),
3833 [IFLA_MACSEC_ES] = NLA_POLICY_MAX(NLA_U8, 1),
3834 [IFLA_MACSEC_SCB] = NLA_POLICY_MAX(NLA_U8, 1),
3835 [IFLA_MACSEC_REPLAY_PROTECT] = NLA_POLICY_MAX(NLA_U8, 1),
3836 [IFLA_MACSEC_VALIDATION] = NLA_POLICY_MAX(NLA_U8, MACSEC_VALIDATE_MAX),
3837 [IFLA_MACSEC_OFFLOAD] = NLA_POLICY_MAX(NLA_U8, MACSEC_OFFLOAD_MAX),
3838 };
3839
macsec_free_netdev(struct net_device * dev)3840 static void macsec_free_netdev(struct net_device *dev)
3841 {
3842 struct macsec_dev *macsec = macsec_priv(dev);
3843
3844 dst_release(&macsec->secy.tx_sc.md_dst->dst);
3845 free_percpu(macsec->stats);
3846 free_percpu(macsec->secy.tx_sc.stats);
3847
3848 /* Get rid of the macsec's reference to real_dev */
3849 netdev_put(macsec->real_dev, &macsec->dev_tracker);
3850 }
3851
macsec_setup(struct net_device * dev)3852 static void macsec_setup(struct net_device *dev)
3853 {
3854 ether_setup(dev);
3855 dev->min_mtu = 0;
3856 dev->max_mtu = ETH_MAX_MTU;
3857 dev->priv_flags |= IFF_NO_QUEUE | IFF_UNICAST_FLT;
3858 dev->netdev_ops = &macsec_netdev_ops;
3859 dev->needs_free_netdev = true;
3860 dev->priv_destructor = macsec_free_netdev;
3861 SET_NETDEV_DEVTYPE(dev, &macsec_type);
3862
3863 eth_zero_addr(dev->broadcast);
3864 }
3865
macsec_changelink_common(struct net_device * dev,struct nlattr * data[])3866 static int macsec_changelink_common(struct net_device *dev,
3867 struct nlattr *data[])
3868 {
3869 struct macsec_secy *secy;
3870 struct macsec_tx_sc *tx_sc;
3871
3872 secy = &macsec_priv(dev)->secy;
3873 tx_sc = &secy->tx_sc;
3874
3875 if (data[IFLA_MACSEC_ENCODING_SA]) {
3876 struct macsec_tx_sa *tx_sa;
3877
3878 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]);
3879 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]);
3880
3881 secy->operational = tx_sa && tx_sa->active;
3882 }
3883
3884 if (data[IFLA_MACSEC_ENCRYPT])
3885 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
3886
3887 if (data[IFLA_MACSEC_PROTECT])
3888 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]);
3889
3890 if (data[IFLA_MACSEC_INC_SCI])
3891 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
3892
3893 if (data[IFLA_MACSEC_ES])
3894 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]);
3895
3896 if (data[IFLA_MACSEC_SCB])
3897 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]);
3898
3899 if (data[IFLA_MACSEC_REPLAY_PROTECT])
3900 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]);
3901
3902 if (data[IFLA_MACSEC_VALIDATION])
3903 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
3904
3905 if (data[IFLA_MACSEC_CIPHER_SUITE]) {
3906 switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
3907 case MACSEC_CIPHER_ID_GCM_AES_128:
3908 case MACSEC_DEFAULT_CIPHER_ID:
3909 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3910 secy->xpn = false;
3911 break;
3912 case MACSEC_CIPHER_ID_GCM_AES_256:
3913 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3914 secy->xpn = false;
3915 break;
3916 case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
3917 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3918 secy->xpn = true;
3919 break;
3920 case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
3921 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3922 secy->xpn = true;
3923 break;
3924 default:
3925 return -EINVAL;
3926 }
3927 }
3928
3929 if (data[IFLA_MACSEC_WINDOW]) {
3930 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
3931
3932 /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window
3933 * for XPN cipher suites */
3934 if (secy->xpn &&
3935 secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW)
3936 return -EINVAL;
3937 }
3938
3939 return 0;
3940 }
3941
macsec_changelink(struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)3942 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
3943 struct nlattr *data[],
3944 struct netlink_ext_ack *extack)
3945 {
3946 struct macsec_dev *macsec = macsec_priv(dev);
3947 bool macsec_offload_state_change = false;
3948 enum macsec_offload offload;
3949 struct macsec_tx_sc tx_sc;
3950 struct macsec_secy secy;
3951 int ret;
3952
3953 if (!data)
3954 return 0;
3955
3956 if (data[IFLA_MACSEC_CIPHER_SUITE] ||
3957 data[IFLA_MACSEC_ICV_LEN] ||
3958 data[IFLA_MACSEC_SCI] ||
3959 data[IFLA_MACSEC_PORT])
3960 return -EINVAL;
3961
3962 /* Keep a copy of unmodified secy and tx_sc, in case the offload
3963 * propagation fails, to revert macsec_changelink_common.
3964 */
3965 memcpy(&secy, &macsec->secy, sizeof(secy));
3966 memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc));
3967
3968 ret = macsec_changelink_common(dev, data);
3969 if (ret)
3970 goto cleanup;
3971
3972 if (data[IFLA_MACSEC_OFFLOAD]) {
3973 offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]);
3974 if (macsec->offload != offload) {
3975 macsec_offload_state_change = true;
3976 ret = macsec_update_offload(dev, offload, extack);
3977 if (ret)
3978 goto cleanup;
3979 }
3980 }
3981
3982 /* If h/w offloading is available, propagate to the device */
3983 if (!macsec_offload_state_change && macsec_is_offloaded(macsec)) {
3984 const struct macsec_ops *ops;
3985 struct macsec_context ctx;
3986
3987 ops = macsec_get_ops(netdev_priv(dev), &ctx);
3988 if (!ops) {
3989 ret = -EOPNOTSUPP;
3990 goto cleanup;
3991 }
3992
3993 ctx.secy = &macsec->secy;
3994 ret = macsec_offload(ops->mdo_upd_secy, &ctx);
3995 if (ret)
3996 goto cleanup;
3997 }
3998
3999 return 0;
4000
4001 cleanup:
4002 memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc));
4003 memcpy(&macsec->secy, &secy, sizeof(secy));
4004
4005 return ret;
4006 }
4007
macsec_del_dev(struct macsec_dev * macsec)4008 static void macsec_del_dev(struct macsec_dev *macsec)
4009 {
4010 int i;
4011
4012 while (macsec->secy.rx_sc) {
4013 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc);
4014
4015 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next);
4016 free_rx_sc(rx_sc);
4017 }
4018
4019 for (i = 0; i < MACSEC_NUM_AN; i++) {
4020 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]);
4021
4022 if (sa) {
4023 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL);
4024 clear_tx_sa(sa);
4025 }
4026 }
4027 }
4028
macsec_common_dellink(struct net_device * dev,struct list_head * head)4029 static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
4030 {
4031 struct macsec_dev *macsec = macsec_priv(dev);
4032 struct net_device *real_dev = macsec->real_dev;
4033
4034 /* If h/w offloading is available, propagate to the device */
4035 if (macsec_is_offloaded(macsec)) {
4036 const struct macsec_ops *ops;
4037 struct macsec_context ctx;
4038
4039 ops = macsec_get_ops(netdev_priv(dev), &ctx);
4040 if (ops) {
4041 ctx.secy = &macsec->secy;
4042 macsec_offload(ops->mdo_del_secy, &ctx);
4043 }
4044 }
4045
4046 unregister_netdevice_queue(dev, head);
4047 list_del_rcu(&macsec->secys);
4048 macsec_del_dev(macsec);
4049 netdev_upper_dev_unlink(real_dev, dev);
4050
4051 macsec_generation++;
4052 }
4053
macsec_dellink(struct net_device * dev,struct list_head * head)4054 static void macsec_dellink(struct net_device *dev, struct list_head *head)
4055 {
4056 struct macsec_dev *macsec = macsec_priv(dev);
4057 struct net_device *real_dev = macsec->real_dev;
4058 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
4059
4060 macsec_common_dellink(dev, head);
4061
4062 if (list_empty(&rxd->secys)) {
4063 netdev_rx_handler_unregister(real_dev);
4064 kfree(rxd);
4065 }
4066 }
4067
register_macsec_dev(struct net_device * real_dev,struct net_device * dev)4068 static int register_macsec_dev(struct net_device *real_dev,
4069 struct net_device *dev)
4070 {
4071 struct macsec_dev *macsec = macsec_priv(dev);
4072 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
4073
4074 if (!rxd) {
4075 int err;
4076
4077 rxd = kmalloc_obj(*rxd);
4078 if (!rxd)
4079 return -ENOMEM;
4080
4081 INIT_LIST_HEAD(&rxd->secys);
4082
4083 err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
4084 rxd);
4085 if (err < 0) {
4086 kfree(rxd);
4087 return err;
4088 }
4089 }
4090
4091 list_add_tail_rcu(&macsec->secys, &rxd->secys);
4092 return 0;
4093 }
4094
sci_exists(struct net_device * dev,sci_t sci)4095 static bool sci_exists(struct net_device *dev, sci_t sci)
4096 {
4097 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev);
4098 struct macsec_dev *macsec;
4099
4100 list_for_each_entry(macsec, &rxd->secys, secys) {
4101 if (macsec->secy.sci == sci)
4102 return true;
4103 }
4104
4105 return false;
4106 }
4107
dev_to_sci(struct net_device * dev,__be16 port)4108 static sci_t dev_to_sci(struct net_device *dev, __be16 port)
4109 {
4110 return make_sci(dev->dev_addr, port);
4111 }
4112
macsec_add_dev(struct net_device * dev,sci_t sci,u8 icv_len)4113 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
4114 {
4115 struct macsec_dev *macsec = macsec_priv(dev);
4116 struct macsec_secy *secy = &macsec->secy;
4117
4118 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats);
4119 if (!macsec->stats)
4120 return -ENOMEM;
4121
4122 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
4123 if (!secy->tx_sc.stats)
4124 return -ENOMEM;
4125
4126 secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
4127 if (!secy->tx_sc.md_dst)
4128 /* macsec and secy percpu stats will be freed when unregistering
4129 * net_device in macsec_free_netdev()
4130 */
4131 return -ENOMEM;
4132
4133 if (sci == MACSEC_UNDEF_SCI)
4134 sci = dev_to_sci(dev, MACSEC_PORT_ES);
4135
4136 secy->netdev = dev;
4137 secy->operational = true;
4138 secy->key_len = DEFAULT_SAK_LEN;
4139 secy->icv_len = icv_len;
4140 secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
4141 secy->protect_frames = true;
4142 secy->replay_protect = false;
4143 secy->xpn = DEFAULT_XPN;
4144
4145 secy->sci = sci;
4146 secy->tx_sc.md_dst->u.macsec_info.sci = sci;
4147 secy->tx_sc.active = true;
4148 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
4149 secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
4150 secy->tx_sc.send_sci = DEFAULT_SEND_SCI;
4151 secy->tx_sc.end_station = false;
4152 secy->tx_sc.scb = false;
4153
4154 return 0;
4155 }
4156
4157 static struct lock_class_key macsec_netdev_addr_lock_key;
4158
macsec_newlink(struct net_device * dev,struct rtnl_newlink_params * params,struct netlink_ext_ack * extack)4159 static int macsec_newlink(struct net_device *dev,
4160 struct rtnl_newlink_params *params,
4161 struct netlink_ext_ack *extack)
4162 {
4163 struct net *link_net = rtnl_newlink_link_net(params);
4164 struct macsec_dev *macsec = macsec_priv(dev);
4165 struct nlattr **data = params->data;
4166 struct nlattr **tb = params->tb;
4167 rx_handler_func_t *rx_handler;
4168 u8 icv_len = MACSEC_DEFAULT_ICV_LEN;
4169 struct net_device *real_dev;
4170 int err, mtu;
4171 sci_t sci;
4172
4173 if (!tb[IFLA_LINK])
4174 return -EINVAL;
4175 real_dev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK]));
4176 if (!real_dev)
4177 return -ENODEV;
4178 if (real_dev->type != ARPHRD_ETHER)
4179 return -EINVAL;
4180
4181 dev->priv_flags |= IFF_MACSEC;
4182
4183 macsec->real_dev = real_dev;
4184
4185 if (data && data[IFLA_MACSEC_OFFLOAD])
4186 macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]);
4187 else
4188 /* MACsec offloading is off by default */
4189 macsec->offload = MACSEC_OFFLOAD_OFF;
4190
4191 /* Check if the offloading mode is supported by the underlying layers */
4192 if (macsec->offload != MACSEC_OFFLOAD_OFF &&
4193 !macsec_check_offload(macsec->offload, macsec))
4194 return -EOPNOTSUPP;
4195
4196 /* send_sci must be set to true when transmit sci explicitly is set */
4197 if ((data && data[IFLA_MACSEC_SCI]) &&
4198 (data && data[IFLA_MACSEC_INC_SCI])) {
4199 u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
4200
4201 if (!send_sci)
4202 return -EINVAL;
4203 }
4204
4205 if (data && data[IFLA_MACSEC_ICV_LEN])
4206 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
4207 mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
4208 if (mtu < 0)
4209 dev->mtu = 0;
4210 else
4211 dev->mtu = mtu;
4212
4213 rx_handler = rtnl_dereference(real_dev->rx_handler);
4214 if (rx_handler && rx_handler != macsec_handle_frame)
4215 return -EBUSY;
4216
4217 err = register_netdevice(dev);
4218 if (err < 0)
4219 return err;
4220
4221 netdev_lockdep_set_classes(dev);
4222 lockdep_set_class(&dev->addr_list_lock,
4223 &macsec_netdev_addr_lock_key);
4224
4225 err = netdev_upper_dev_link(real_dev, dev, extack);
4226 if (err < 0)
4227 goto unregister;
4228
4229 /* need to be already registered so that ->init has run and
4230 * the MAC addr is set
4231 */
4232 if (data && data[IFLA_MACSEC_SCI])
4233 sci = nla_get_sci(data[IFLA_MACSEC_SCI]);
4234 else if (data && data[IFLA_MACSEC_PORT])
4235 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT]));
4236 else
4237 sci = dev_to_sci(dev, MACSEC_PORT_ES);
4238
4239 if (rx_handler && sci_exists(real_dev, sci)) {
4240 err = -EBUSY;
4241 goto unlink;
4242 }
4243
4244 err = macsec_add_dev(dev, sci, icv_len);
4245 if (err)
4246 goto unlink;
4247
4248 if (data) {
4249 err = macsec_changelink_common(dev, data);
4250 if (err)
4251 goto del_dev;
4252 }
4253
4254 /* If h/w offloading is available, propagate to the device */
4255 if (macsec_is_offloaded(macsec)) {
4256 const struct macsec_ops *ops;
4257 struct macsec_context ctx;
4258
4259 ops = macsec_get_ops(macsec, &ctx);
4260 if (ops) {
4261 ctx.secy = &macsec->secy;
4262 err = macsec_offload(ops->mdo_add_secy, &ctx);
4263 if (err)
4264 goto del_dev;
4265
4266 macsec->insert_tx_tag =
4267 macsec_needs_tx_tag(macsec, ops);
4268 }
4269 }
4270
4271 err = register_macsec_dev(real_dev, dev);
4272 if (err < 0)
4273 goto del_dev;
4274
4275 netdev_update_features(dev);
4276 netif_stacked_transfer_operstate(real_dev, dev);
4277 linkwatch_fire_event(dev);
4278
4279 macsec_generation++;
4280
4281 return 0;
4282
4283 del_dev:
4284 macsec_del_dev(macsec);
4285 unlink:
4286 netdev_upper_dev_unlink(real_dev, dev);
4287 unregister:
4288 unregister_netdevice(dev);
4289 return err;
4290 }
4291
validate_cipher_suite(const struct nlattr * attr,struct netlink_ext_ack * extack)4292 static int validate_cipher_suite(const struct nlattr *attr,
4293 struct netlink_ext_ack *extack)
4294 {
4295 switch (nla_get_u64(attr)) {
4296 case MACSEC_CIPHER_ID_GCM_AES_128:
4297 case MACSEC_CIPHER_ID_GCM_AES_256:
4298 case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
4299 case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
4300 case MACSEC_DEFAULT_CIPHER_ID:
4301 return 0;
4302 default:
4303 return -EINVAL;
4304 }
4305 }
4306
macsec_validate_attr(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)4307 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
4308 struct netlink_ext_ack *extack)
4309 {
4310 u8 icv_len = MACSEC_DEFAULT_ICV_LEN;
4311 bool es, scb, sci;
4312
4313 if (!data)
4314 return 0;
4315
4316 if (data[IFLA_MACSEC_ICV_LEN]) {
4317 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
4318 if (icv_len != MACSEC_DEFAULT_ICV_LEN) {
4319 char dummy_key[DEFAULT_SAK_LEN] = { 0 };
4320 struct crypto_aead *dummy_tfm;
4321
4322 dummy_tfm = macsec_alloc_tfm(dummy_key,
4323 DEFAULT_SAK_LEN,
4324 icv_len);
4325 if (IS_ERR(dummy_tfm))
4326 return PTR_ERR(dummy_tfm);
4327 crypto_free_aead(dummy_tfm);
4328 }
4329 }
4330
4331 es = nla_get_u8_default(data[IFLA_MACSEC_ES], false);
4332 sci = nla_get_u8_default(data[IFLA_MACSEC_INC_SCI], false);
4333 scb = nla_get_u8_default(data[IFLA_MACSEC_SCB], false);
4334
4335 if ((sci && (scb || es)) || (scb && es))
4336 return -EINVAL;
4337
4338 if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
4339 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
4340 !data[IFLA_MACSEC_WINDOW])
4341 return -EINVAL;
4342
4343 return 0;
4344 }
4345
macsec_get_link_net(const struct net_device * dev)4346 static struct net *macsec_get_link_net(const struct net_device *dev)
4347 {
4348 return dev_net(macsec_priv(dev)->real_dev);
4349 }
4350
macsec_get_real_dev(const struct net_device * dev)4351 struct net_device *macsec_get_real_dev(const struct net_device *dev)
4352 {
4353 return macsec_priv(dev)->real_dev;
4354 }
4355 EXPORT_SYMBOL_GPL(macsec_get_real_dev);
4356
macsec_netdev_is_offloaded(struct net_device * dev)4357 bool macsec_netdev_is_offloaded(struct net_device *dev)
4358 {
4359 return macsec_is_offloaded(macsec_priv(dev));
4360 }
4361 EXPORT_SYMBOL_GPL(macsec_netdev_is_offloaded);
4362
macsec_get_size(const struct net_device * dev)4363 static size_t macsec_get_size(const struct net_device *dev)
4364 {
4365 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
4366 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
4367 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
4368 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
4369 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
4370 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
4371 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
4372 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
4373 nla_total_size(1) + /* IFLA_MACSEC_ES */
4374 nla_total_size(1) + /* IFLA_MACSEC_SCB */
4375 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
4376 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
4377 nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */
4378 0;
4379 }
4380
macsec_fill_info(struct sk_buff * skb,const struct net_device * dev)4381 static int macsec_fill_info(struct sk_buff *skb,
4382 const struct net_device *dev)
4383 {
4384 struct macsec_tx_sc *tx_sc;
4385 struct macsec_dev *macsec;
4386 struct macsec_secy *secy;
4387 u64 csid;
4388
4389 macsec = macsec_priv(dev);
4390 secy = &macsec->secy;
4391 tx_sc = &secy->tx_sc;
4392
4393 switch (secy->key_len) {
4394 case MACSEC_GCM_AES_128_SAK_LEN:
4395 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
4396 break;
4397 case MACSEC_GCM_AES_256_SAK_LEN:
4398 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
4399 break;
4400 default:
4401 goto nla_put_failure;
4402 }
4403
4404 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
4405 IFLA_MACSEC_PAD) ||
4406 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
4407 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
4408 csid, IFLA_MACSEC_PAD) ||
4409 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
4410 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
4411 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
4412 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
4413 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
4414 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
4415 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
4416 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
4417 nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) ||
4418 0)
4419 goto nla_put_failure;
4420
4421 if (secy->replay_protect) {
4422 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
4423 goto nla_put_failure;
4424 }
4425
4426 return 0;
4427
4428 nla_put_failure:
4429 return -EMSGSIZE;
4430 }
4431
4432 static struct rtnl_link_ops macsec_link_ops __read_mostly = {
4433 .kind = "macsec",
4434 .priv_size = sizeof(struct macsec_dev),
4435 .maxtype = IFLA_MACSEC_MAX,
4436 .policy = macsec_rtnl_policy,
4437 .setup = macsec_setup,
4438 .validate = macsec_validate_attr,
4439 .newlink = macsec_newlink,
4440 .changelink = macsec_changelink,
4441 .dellink = macsec_dellink,
4442 .get_size = macsec_get_size,
4443 .fill_info = macsec_fill_info,
4444 .get_link_net = macsec_get_link_net,
4445 };
4446
is_macsec_master(struct net_device * dev)4447 static bool is_macsec_master(struct net_device *dev)
4448 {
4449 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame;
4450 }
4451
macsec_notify(struct notifier_block * this,unsigned long event,void * ptr)4452 static int macsec_notify(struct notifier_block *this, unsigned long event,
4453 void *ptr)
4454 {
4455 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
4456 struct macsec_rxh_data *rxd;
4457 struct macsec_dev *m, *n;
4458 LIST_HEAD(head);
4459
4460 if (!is_macsec_master(real_dev))
4461 return NOTIFY_DONE;
4462
4463 rxd = macsec_data_rtnl(real_dev);
4464
4465 switch (event) {
4466 case NETDEV_DOWN:
4467 case NETDEV_UP:
4468 case NETDEV_CHANGE:
4469 list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4470 struct net_device *dev = m->secy.netdev;
4471
4472 netif_stacked_transfer_operstate(real_dev, dev);
4473 }
4474 break;
4475 case NETDEV_UNREGISTER:
4476 list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4477 macsec_common_dellink(m->secy.netdev, &head);
4478 }
4479
4480 netdev_rx_handler_unregister(real_dev);
4481 kfree(rxd);
4482
4483 unregister_netdevice_many(&head);
4484 break;
4485 case NETDEV_CHANGEMTU:
4486 list_for_each_entry(m, &rxd->secys, secys) {
4487 struct net_device *dev = m->secy.netdev;
4488 unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
4489 macsec_extra_len(true));
4490
4491 if (dev->mtu > mtu)
4492 dev_set_mtu(dev, mtu);
4493 }
4494 break;
4495 case NETDEV_FEAT_CHANGE:
4496 list_for_each_entry(m, &rxd->secys, secys) {
4497 macsec_inherit_tso_max(m->secy.netdev);
4498 netdev_update_features(m->secy.netdev);
4499 }
4500 break;
4501 }
4502
4503 return NOTIFY_OK;
4504 }
4505
4506 static struct notifier_block macsec_notifier = {
4507 .notifier_call = macsec_notify,
4508 };
4509
macsec_init(void)4510 static int __init macsec_init(void)
4511 {
4512 int err;
4513
4514 macsec_wq = alloc_workqueue("macsec", WQ_UNBOUND, 0);
4515 if (!macsec_wq)
4516 return -ENOMEM;
4517
4518 pr_info("MACsec IEEE 802.1AE\n");
4519 err = register_netdevice_notifier(&macsec_notifier);
4520 if (err)
4521 goto err_destroy_wq;
4522
4523 err = rtnl_link_register(&macsec_link_ops);
4524 if (err)
4525 goto err_notifier;
4526
4527 err = genl_register_family(&macsec_fam);
4528 if (err)
4529 goto err_rtnl;
4530
4531 return 0;
4532
4533 err_rtnl:
4534 rtnl_link_unregister(&macsec_link_ops);
4535 err_notifier:
4536 unregister_netdevice_notifier(&macsec_notifier);
4537 err_destroy_wq:
4538 /* Precautionary, mirrors macsec_exit() to stay safe if work
4539 * ever becomes queueable before this point in the future.
4540 */
4541 rcu_barrier();
4542 destroy_workqueue(macsec_wq);
4543 return err;
4544 }
4545
macsec_exit(void)4546 static void __exit macsec_exit(void)
4547 {
4548 genl_unregister_family(&macsec_fam);
4549 rtnl_link_unregister(&macsec_link_ops);
4550 unregister_netdevice_notifier(&macsec_notifier);
4551 rcu_barrier();
4552 destroy_workqueue(macsec_wq);
4553 }
4554
4555 module_init(macsec_init);
4556 module_exit(macsec_exit);
4557
4558 MODULE_ALIAS_RTNL_LINK("macsec");
4559 MODULE_ALIAS_GENL_FAMILY("macsec");
4560
4561 MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
4562 MODULE_LICENSE("GPL v2");
4563