xref: /linux/drivers/net/macsec.c (revision 071bf69a0220253a44acb8b2a27f7a262b9a46bf)
1 /*
2  * drivers/net/macsec.c - MACsec device
3  *
4  * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11 
12 #include <linux/types.h>
13 #include <linux/skbuff.h>
14 #include <linux/socket.h>
15 #include <linux/module.h>
16 #include <crypto/aead.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rtnetlink.h>
19 #include <net/genetlink.h>
20 #include <net/sock.h>
21 #include <net/gro_cells.h>
22 
23 #include <uapi/linux/if_macsec.h>
24 
25 typedef u64 __bitwise sci_t;
26 
27 #define MACSEC_SCI_LEN 8
28 
29 /* SecTAG length = macsec_eth_header without the optional SCI */
30 #define MACSEC_TAG_LEN 6
31 
32 struct macsec_eth_header {
33 	struct ethhdr eth;
34 	/* SecTAG */
35 	u8  tci_an;
36 #if defined(__LITTLE_ENDIAN_BITFIELD)
37 	u8  short_length:6,
38 		  unused:2;
39 #elif defined(__BIG_ENDIAN_BITFIELD)
40 	u8        unused:2,
41 	    short_length:6;
42 #else
43 #error	"Please fix <asm/byteorder.h>"
44 #endif
45 	__be32 packet_number;
46 	u8 secure_channel_id[8]; /* optional */
47 } __packed;
48 
49 #define MACSEC_TCI_VERSION 0x80
50 #define MACSEC_TCI_ES      0x40 /* end station */
51 #define MACSEC_TCI_SC      0x20 /* SCI present */
52 #define MACSEC_TCI_SCB     0x10 /* epon */
53 #define MACSEC_TCI_E       0x08 /* encryption */
54 #define MACSEC_TCI_C       0x04 /* changed text */
55 #define MACSEC_AN_MASK     0x03 /* association number */
56 #define MACSEC_TCI_CONFID  (MACSEC_TCI_E | MACSEC_TCI_C)
57 
58 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
59 #define MIN_NON_SHORT_LEN 48
60 
61 #define GCM_AES_IV_LEN 12
62 #define DEFAULT_ICV_LEN 16
63 
64 #define MACSEC_NUM_AN 4 /* 2 bits for the association number */
65 
66 #define for_each_rxsc(secy, sc)			\
67 	for (sc = rcu_dereference_bh(secy->rx_sc);	\
68 	     sc;				\
69 	     sc = rcu_dereference_bh(sc->next))
70 #define for_each_rxsc_rtnl(secy, sc)			\
71 	for (sc = rtnl_dereference(secy->rx_sc);	\
72 	     sc;					\
73 	     sc = rtnl_dereference(sc->next))
74 
75 struct gcm_iv {
76 	union {
77 		u8 secure_channel_id[8];
78 		sci_t sci;
79 	};
80 	__be32 pn;
81 };
82 
83 /**
84  * struct macsec_key - SA key
85  * @id: user-provided key identifier
86  * @tfm: crypto struct, key storage
87  */
88 struct macsec_key {
89 	u8 id[MACSEC_KEYID_LEN];
90 	struct crypto_aead *tfm;
91 };
92 
93 struct macsec_rx_sc_stats {
94 	__u64 InOctetsValidated;
95 	__u64 InOctetsDecrypted;
96 	__u64 InPktsUnchecked;
97 	__u64 InPktsDelayed;
98 	__u64 InPktsOK;
99 	__u64 InPktsInvalid;
100 	__u64 InPktsLate;
101 	__u64 InPktsNotValid;
102 	__u64 InPktsNotUsingSA;
103 	__u64 InPktsUnusedSA;
104 };
105 
106 struct macsec_rx_sa_stats {
107 	__u32 InPktsOK;
108 	__u32 InPktsInvalid;
109 	__u32 InPktsNotValid;
110 	__u32 InPktsNotUsingSA;
111 	__u32 InPktsUnusedSA;
112 };
113 
114 struct macsec_tx_sa_stats {
115 	__u32 OutPktsProtected;
116 	__u32 OutPktsEncrypted;
117 };
118 
119 struct macsec_tx_sc_stats {
120 	__u64 OutPktsProtected;
121 	__u64 OutPktsEncrypted;
122 	__u64 OutOctetsProtected;
123 	__u64 OutOctetsEncrypted;
124 };
125 
126 struct macsec_dev_stats {
127 	__u64 OutPktsUntagged;
128 	__u64 InPktsUntagged;
129 	__u64 OutPktsTooLong;
130 	__u64 InPktsNoTag;
131 	__u64 InPktsBadTag;
132 	__u64 InPktsUnknownSCI;
133 	__u64 InPktsNoSCI;
134 	__u64 InPktsOverrun;
135 };
136 
137 /**
138  * struct macsec_rx_sa - receive secure association
139  * @active:
140  * @next_pn: packet number expected for the next packet
141  * @lock: protects next_pn manipulations
142  * @key: key structure
143  * @stats: per-SA stats
144  */
145 struct macsec_rx_sa {
146 	struct macsec_key key;
147 	spinlock_t lock;
148 	u32 next_pn;
149 	atomic_t refcnt;
150 	bool active;
151 	struct macsec_rx_sa_stats __percpu *stats;
152 	struct macsec_rx_sc *sc;
153 	struct rcu_head rcu;
154 };
155 
156 struct pcpu_rx_sc_stats {
157 	struct macsec_rx_sc_stats stats;
158 	struct u64_stats_sync syncp;
159 };
160 
161 /**
162  * struct macsec_rx_sc - receive secure channel
163  * @sci: secure channel identifier for this SC
164  * @active: channel is active
165  * @sa: array of secure associations
166  * @stats: per-SC stats
167  */
168 struct macsec_rx_sc {
169 	struct macsec_rx_sc __rcu *next;
170 	sci_t sci;
171 	bool active;
172 	struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN];
173 	struct pcpu_rx_sc_stats __percpu *stats;
174 	atomic_t refcnt;
175 	struct rcu_head rcu_head;
176 };
177 
178 /**
179  * struct macsec_tx_sa - transmit secure association
180  * @active:
181  * @next_pn: packet number to use for the next packet
182  * @lock: protects next_pn manipulations
183  * @key: key structure
184  * @stats: per-SA stats
185  */
186 struct macsec_tx_sa {
187 	struct macsec_key key;
188 	spinlock_t lock;
189 	u32 next_pn;
190 	atomic_t refcnt;
191 	bool active;
192 	struct macsec_tx_sa_stats __percpu *stats;
193 	struct rcu_head rcu;
194 };
195 
196 struct pcpu_tx_sc_stats {
197 	struct macsec_tx_sc_stats stats;
198 	struct u64_stats_sync syncp;
199 };
200 
201 /**
202  * struct macsec_tx_sc - transmit secure channel
203  * @active:
204  * @encoding_sa: association number of the SA currently in use
205  * @encrypt: encrypt packets on transmit, or authenticate only
206  * @send_sci: always include the SCI in the SecTAG
207  * @end_station:
208  * @scb: single copy broadcast flag
209  * @sa: array of secure associations
210  * @stats: stats for this TXSC
211  */
212 struct macsec_tx_sc {
213 	bool active;
214 	u8 encoding_sa;
215 	bool encrypt;
216 	bool send_sci;
217 	bool end_station;
218 	bool scb;
219 	struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN];
220 	struct pcpu_tx_sc_stats __percpu *stats;
221 };
222 
223 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
224 
225 /**
226  * struct macsec_secy - MACsec Security Entity
227  * @netdev: netdevice for this SecY
228  * @n_rx_sc: number of receive secure channels configured on this SecY
229  * @sci: secure channel identifier used for tx
230  * @key_len: length of keys used by the cipher suite
231  * @icv_len: length of ICV used by the cipher suite
232  * @validate_frames: validation mode
233  * @operational: MAC_Operational flag
234  * @protect_frames: enable protection for this SecY
235  * @replay_protect: enable packet number checks on receive
236  * @replay_window: size of the replay window
237  * @tx_sc: transmit secure channel
238  * @rx_sc: linked list of receive secure channels
239  */
240 struct macsec_secy {
241 	struct net_device *netdev;
242 	unsigned int n_rx_sc;
243 	sci_t sci;
244 	u16 key_len;
245 	u16 icv_len;
246 	enum macsec_validation_type validate_frames;
247 	bool operational;
248 	bool protect_frames;
249 	bool replay_protect;
250 	u32 replay_window;
251 	struct macsec_tx_sc tx_sc;
252 	struct macsec_rx_sc __rcu *rx_sc;
253 };
254 
255 struct pcpu_secy_stats {
256 	struct macsec_dev_stats stats;
257 	struct u64_stats_sync syncp;
258 };
259 
260 /**
261  * struct macsec_dev - private data
262  * @secy: SecY config
263  * @real_dev: pointer to underlying netdevice
264  * @stats: MACsec device stats
265  * @secys: linked list of SecY's on the underlying device
266  */
267 struct macsec_dev {
268 	struct macsec_secy secy;
269 	struct net_device *real_dev;
270 	struct pcpu_secy_stats __percpu *stats;
271 	struct list_head secys;
272 	struct gro_cells gro_cells;
273 };
274 
275 /**
276  * struct macsec_rxh_data - rx_handler private argument
277  * @secys: linked list of SecY's on this underlying device
278  */
279 struct macsec_rxh_data {
280 	struct list_head secys;
281 };
282 
283 static struct macsec_dev *macsec_priv(const struct net_device *dev)
284 {
285 	return (struct macsec_dev *)netdev_priv(dev);
286 }
287 
288 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev)
289 {
290 	return rcu_dereference_bh(dev->rx_handler_data);
291 }
292 
293 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev)
294 {
295 	return rtnl_dereference(dev->rx_handler_data);
296 }
297 
298 struct macsec_cb {
299 	struct aead_request *req;
300 	union {
301 		struct macsec_tx_sa *tx_sa;
302 		struct macsec_rx_sa *rx_sa;
303 	};
304 	u8 assoc_num;
305 	bool valid;
306 	bool has_sci;
307 };
308 
309 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
310 {
311 	struct macsec_rx_sa *sa = rcu_dereference_bh(ptr);
312 
313 	if (!sa || !sa->active)
314 		return NULL;
315 
316 	if (!atomic_inc_not_zero(&sa->refcnt))
317 		return NULL;
318 
319 	return sa;
320 }
321 
322 static void free_rx_sc_rcu(struct rcu_head *head)
323 {
324 	struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
325 
326 	free_percpu(rx_sc->stats);
327 	kfree(rx_sc);
328 }
329 
330 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
331 {
332 	return atomic_inc_not_zero(&sc->refcnt) ? sc : NULL;
333 }
334 
335 static void macsec_rxsc_put(struct macsec_rx_sc *sc)
336 {
337 	if (atomic_dec_and_test(&sc->refcnt))
338 		call_rcu(&sc->rcu_head, free_rx_sc_rcu);
339 }
340 
341 static void free_rxsa(struct rcu_head *head)
342 {
343 	struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
344 
345 	crypto_free_aead(sa->key.tfm);
346 	free_percpu(sa->stats);
347 	kfree(sa);
348 }
349 
350 static void macsec_rxsa_put(struct macsec_rx_sa *sa)
351 {
352 	if (atomic_dec_and_test(&sa->refcnt))
353 		call_rcu(&sa->rcu, free_rxsa);
354 }
355 
356 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
357 {
358 	struct macsec_tx_sa *sa = rcu_dereference_bh(ptr);
359 
360 	if (!sa || !sa->active)
361 		return NULL;
362 
363 	if (!atomic_inc_not_zero(&sa->refcnt))
364 		return NULL;
365 
366 	return sa;
367 }
368 
369 static void free_txsa(struct rcu_head *head)
370 {
371 	struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
372 
373 	crypto_free_aead(sa->key.tfm);
374 	free_percpu(sa->stats);
375 	kfree(sa);
376 }
377 
378 static void macsec_txsa_put(struct macsec_tx_sa *sa)
379 {
380 	if (atomic_dec_and_test(&sa->refcnt))
381 		call_rcu(&sa->rcu, free_txsa);
382 }
383 
384 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
385 {
386 	BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
387 	return (struct macsec_cb *)skb->cb;
388 }
389 
390 #define MACSEC_PORT_ES (htons(0x0001))
391 #define MACSEC_PORT_SCB (0x0000)
392 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
393 
394 #define DEFAULT_SAK_LEN 16
395 #define DEFAULT_SEND_SCI true
396 #define DEFAULT_ENCRYPT false
397 #define DEFAULT_ENCODING_SA 0
398 
399 static sci_t make_sci(u8 *addr, __be16 port)
400 {
401 	sci_t sci;
402 
403 	memcpy(&sci, addr, ETH_ALEN);
404 	memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port));
405 
406 	return sci;
407 }
408 
409 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
410 {
411 	sci_t sci;
412 
413 	if (sci_present)
414 		memcpy(&sci, hdr->secure_channel_id,
415 		       sizeof(hdr->secure_channel_id));
416 	else
417 		sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
418 
419 	return sci;
420 }
421 
422 static unsigned int macsec_sectag_len(bool sci_present)
423 {
424 	return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
425 }
426 
427 static unsigned int macsec_hdr_len(bool sci_present)
428 {
429 	return macsec_sectag_len(sci_present) + ETH_HLEN;
430 }
431 
432 static unsigned int macsec_extra_len(bool sci_present)
433 {
434 	return macsec_sectag_len(sci_present) + sizeof(__be16);
435 }
436 
437 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
438 static void macsec_fill_sectag(struct macsec_eth_header *h,
439 			       const struct macsec_secy *secy, u32 pn)
440 {
441 	const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
442 
443 	memset(&h->tci_an, 0, macsec_sectag_len(tx_sc->send_sci));
444 	h->eth.h_proto = htons(ETH_P_MACSEC);
445 
446 	if (tx_sc->send_sci ||
447 	    (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb)) {
448 		h->tci_an |= MACSEC_TCI_SC;
449 		memcpy(&h->secure_channel_id, &secy->sci,
450 		       sizeof(h->secure_channel_id));
451 	} else {
452 		if (tx_sc->end_station)
453 			h->tci_an |= MACSEC_TCI_ES;
454 		if (tx_sc->scb)
455 			h->tci_an |= MACSEC_TCI_SCB;
456 	}
457 
458 	h->packet_number = htonl(pn);
459 
460 	/* with GCM, C/E clear for !encrypt, both set for encrypt */
461 	if (tx_sc->encrypt)
462 		h->tci_an |= MACSEC_TCI_CONFID;
463 	else if (secy->icv_len != DEFAULT_ICV_LEN)
464 		h->tci_an |= MACSEC_TCI_C;
465 
466 	h->tci_an |= tx_sc->encoding_sa;
467 }
468 
469 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
470 {
471 	if (data_len < MIN_NON_SHORT_LEN)
472 		h->short_length = data_len;
473 }
474 
475 /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
476 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
477 {
478 	struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
479 	int len = skb->len - 2 * ETH_ALEN;
480 	int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len;
481 
482 	/* a) It comprises at least 17 octets */
483 	if (skb->len <= 16)
484 		return false;
485 
486 	/* b) MACsec EtherType: already checked */
487 
488 	/* c) V bit is clear */
489 	if (h->tci_an & MACSEC_TCI_VERSION)
490 		return false;
491 
492 	/* d) ES or SCB => !SC */
493 	if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) &&
494 	    (h->tci_an & MACSEC_TCI_SC))
495 		return false;
496 
497 	/* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
498 	if (h->unused)
499 		return false;
500 
501 	/* rx.pn != 0 (figure 10-5) */
502 	if (!h->packet_number)
503 		return false;
504 
505 	/* length check, f) g) h) i) */
506 	if (h->short_length)
507 		return len == extra_len + h->short_length;
508 	return len >= extra_len + MIN_NON_SHORT_LEN;
509 }
510 
511 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
512 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
513 
514 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
515 {
516 	struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
517 
518 	gcm_iv->sci = sci;
519 	gcm_iv->pn = htonl(pn);
520 }
521 
522 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
523 {
524 	return (struct macsec_eth_header *)skb_mac_header(skb);
525 }
526 
527 static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy)
528 {
529 	u32 pn;
530 
531 	spin_lock_bh(&tx_sa->lock);
532 	pn = tx_sa->next_pn;
533 
534 	tx_sa->next_pn++;
535 	if (tx_sa->next_pn == 0) {
536 		pr_debug("PN wrapped, transitioning to !oper\n");
537 		tx_sa->active = false;
538 		if (secy->protect_frames)
539 			secy->operational = false;
540 	}
541 	spin_unlock_bh(&tx_sa->lock);
542 
543 	return pn;
544 }
545 
546 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
547 {
548 	struct macsec_dev *macsec = netdev_priv(dev);
549 
550 	skb->dev = macsec->real_dev;
551 	skb_reset_mac_header(skb);
552 	skb->protocol = eth_hdr(skb)->h_proto;
553 }
554 
555 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
556 			    struct macsec_tx_sa *tx_sa)
557 {
558 	struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
559 
560 	u64_stats_update_begin(&txsc_stats->syncp);
561 	if (tx_sc->encrypt) {
562 		txsc_stats->stats.OutOctetsEncrypted += skb->len;
563 		txsc_stats->stats.OutPktsEncrypted++;
564 		this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
565 	} else {
566 		txsc_stats->stats.OutOctetsProtected += skb->len;
567 		txsc_stats->stats.OutPktsProtected++;
568 		this_cpu_inc(tx_sa->stats->OutPktsProtected);
569 	}
570 	u64_stats_update_end(&txsc_stats->syncp);
571 }
572 
573 static void count_tx(struct net_device *dev, int ret, int len)
574 {
575 	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
576 		struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
577 
578 		u64_stats_update_begin(&stats->syncp);
579 		stats->tx_packets++;
580 		stats->tx_bytes += len;
581 		u64_stats_update_end(&stats->syncp);
582 	} else {
583 		dev->stats.tx_dropped++;
584 	}
585 }
586 
587 static void macsec_encrypt_done(struct crypto_async_request *base, int err)
588 {
589 	struct sk_buff *skb = base->data;
590 	struct net_device *dev = skb->dev;
591 	struct macsec_dev *macsec = macsec_priv(dev);
592 	struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
593 	int len, ret;
594 
595 	aead_request_free(macsec_skb_cb(skb)->req);
596 
597 	rcu_read_lock_bh();
598 	macsec_encrypt_finish(skb, dev);
599 	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
600 	len = skb->len;
601 	ret = dev_queue_xmit(skb);
602 	count_tx(dev, ret, len);
603 	rcu_read_unlock_bh();
604 
605 	macsec_txsa_put(sa);
606 	dev_put(dev);
607 }
608 
609 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
610 					     unsigned char **iv,
611 					     struct scatterlist **sg)
612 {
613 	size_t size, iv_offset, sg_offset;
614 	struct aead_request *req;
615 	void *tmp;
616 
617 	size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
618 	iv_offset = size;
619 	size += GCM_AES_IV_LEN;
620 
621 	size = ALIGN(size, __alignof__(struct scatterlist));
622 	sg_offset = size;
623 	size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1);
624 
625 	tmp = kmalloc(size, GFP_ATOMIC);
626 	if (!tmp)
627 		return NULL;
628 
629 	*iv = (unsigned char *)(tmp + iv_offset);
630 	*sg = (struct scatterlist *)(tmp + sg_offset);
631 	req = tmp;
632 
633 	aead_request_set_tfm(req, tfm);
634 
635 	return req;
636 }
637 
638 static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
639 				      struct net_device *dev)
640 {
641 	int ret;
642 	struct scatterlist *sg;
643 	unsigned char *iv;
644 	struct ethhdr *eth;
645 	struct macsec_eth_header *hh;
646 	size_t unprotected_len;
647 	struct aead_request *req;
648 	struct macsec_secy *secy;
649 	struct macsec_tx_sc *tx_sc;
650 	struct macsec_tx_sa *tx_sa;
651 	struct macsec_dev *macsec = macsec_priv(dev);
652 	u32 pn;
653 
654 	secy = &macsec->secy;
655 	tx_sc = &secy->tx_sc;
656 
657 	/* 10.5.1 TX SA assignment */
658 	tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
659 	if (!tx_sa) {
660 		secy->operational = false;
661 		kfree_skb(skb);
662 		return ERR_PTR(-EINVAL);
663 	}
664 
665 	if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
666 		     skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
667 		struct sk_buff *nskb = skb_copy_expand(skb,
668 						       MACSEC_NEEDED_HEADROOM,
669 						       MACSEC_NEEDED_TAILROOM,
670 						       GFP_ATOMIC);
671 		if (likely(nskb)) {
672 			consume_skb(skb);
673 			skb = nskb;
674 		} else {
675 			macsec_txsa_put(tx_sa);
676 			kfree_skb(skb);
677 			return ERR_PTR(-ENOMEM);
678 		}
679 	} else {
680 		skb = skb_unshare(skb, GFP_ATOMIC);
681 		if (!skb) {
682 			macsec_txsa_put(tx_sa);
683 			return ERR_PTR(-ENOMEM);
684 		}
685 	}
686 
687 	unprotected_len = skb->len;
688 	eth = eth_hdr(skb);
689 	hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(tx_sc->send_sci));
690 	memmove(hh, eth, 2 * ETH_ALEN);
691 
692 	pn = tx_sa_update_pn(tx_sa, secy);
693 	if (pn == 0) {
694 		macsec_txsa_put(tx_sa);
695 		kfree_skb(skb);
696 		return ERR_PTR(-ENOLINK);
697 	}
698 	macsec_fill_sectag(hh, secy, pn);
699 	macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
700 
701 	skb_put(skb, secy->icv_len);
702 
703 	if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
704 		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
705 
706 		u64_stats_update_begin(&secy_stats->syncp);
707 		secy_stats->stats.OutPktsTooLong++;
708 		u64_stats_update_end(&secy_stats->syncp);
709 
710 		macsec_txsa_put(tx_sa);
711 		kfree_skb(skb);
712 		return ERR_PTR(-EINVAL);
713 	}
714 
715 	req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg);
716 	if (!req) {
717 		macsec_txsa_put(tx_sa);
718 		kfree_skb(skb);
719 		return ERR_PTR(-ENOMEM);
720 	}
721 
722 	macsec_fill_iv(iv, secy->sci, pn);
723 
724 	sg_init_table(sg, MAX_SKB_FRAGS + 1);
725 	skb_to_sgvec(skb, sg, 0, skb->len);
726 
727 	if (tx_sc->encrypt) {
728 		int len = skb->len - macsec_hdr_len(tx_sc->send_sci) -
729 			  secy->icv_len;
730 		aead_request_set_crypt(req, sg, sg, len, iv);
731 		aead_request_set_ad(req, macsec_hdr_len(tx_sc->send_sci));
732 	} else {
733 		aead_request_set_crypt(req, sg, sg, 0, iv);
734 		aead_request_set_ad(req, skb->len - secy->icv_len);
735 	}
736 
737 	macsec_skb_cb(skb)->req = req;
738 	macsec_skb_cb(skb)->tx_sa = tx_sa;
739 	aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
740 
741 	dev_hold(skb->dev);
742 	ret = crypto_aead_encrypt(req);
743 	if (ret == -EINPROGRESS) {
744 		return ERR_PTR(ret);
745 	} else if (ret != 0) {
746 		dev_put(skb->dev);
747 		kfree_skb(skb);
748 		aead_request_free(req);
749 		macsec_txsa_put(tx_sa);
750 		return ERR_PTR(-EINVAL);
751 	}
752 
753 	dev_put(skb->dev);
754 	aead_request_free(req);
755 	macsec_txsa_put(tx_sa);
756 
757 	return skb;
758 }
759 
760 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
761 {
762 	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
763 	struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats);
764 	struct macsec_eth_header *hdr = macsec_ethhdr(skb);
765 	u32 lowest_pn = 0;
766 
767 	spin_lock(&rx_sa->lock);
768 	if (rx_sa->next_pn >= secy->replay_window)
769 		lowest_pn = rx_sa->next_pn - secy->replay_window;
770 
771 	/* Now perform replay protection check again
772 	 * (see IEEE 802.1AE-2006 figure 10-5)
773 	 */
774 	if (secy->replay_protect && pn < lowest_pn) {
775 		spin_unlock(&rx_sa->lock);
776 		u64_stats_update_begin(&rxsc_stats->syncp);
777 		rxsc_stats->stats.InPktsLate++;
778 		u64_stats_update_end(&rxsc_stats->syncp);
779 		return false;
780 	}
781 
782 	if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
783 		u64_stats_update_begin(&rxsc_stats->syncp);
784 		if (hdr->tci_an & MACSEC_TCI_E)
785 			rxsc_stats->stats.InOctetsDecrypted += skb->len;
786 		else
787 			rxsc_stats->stats.InOctetsValidated += skb->len;
788 		u64_stats_update_end(&rxsc_stats->syncp);
789 	}
790 
791 	if (!macsec_skb_cb(skb)->valid) {
792 		spin_unlock(&rx_sa->lock);
793 
794 		/* 10.6.5 */
795 		if (hdr->tci_an & MACSEC_TCI_C ||
796 		    secy->validate_frames == MACSEC_VALIDATE_STRICT) {
797 			u64_stats_update_begin(&rxsc_stats->syncp);
798 			rxsc_stats->stats.InPktsNotValid++;
799 			u64_stats_update_end(&rxsc_stats->syncp);
800 			return false;
801 		}
802 
803 		u64_stats_update_begin(&rxsc_stats->syncp);
804 		if (secy->validate_frames == MACSEC_VALIDATE_CHECK) {
805 			rxsc_stats->stats.InPktsInvalid++;
806 			this_cpu_inc(rx_sa->stats->InPktsInvalid);
807 		} else if (pn < lowest_pn) {
808 			rxsc_stats->stats.InPktsDelayed++;
809 		} else {
810 			rxsc_stats->stats.InPktsUnchecked++;
811 		}
812 		u64_stats_update_end(&rxsc_stats->syncp);
813 	} else {
814 		u64_stats_update_begin(&rxsc_stats->syncp);
815 		if (pn < lowest_pn) {
816 			rxsc_stats->stats.InPktsDelayed++;
817 		} else {
818 			rxsc_stats->stats.InPktsOK++;
819 			this_cpu_inc(rx_sa->stats->InPktsOK);
820 		}
821 		u64_stats_update_end(&rxsc_stats->syncp);
822 
823 		if (pn >= rx_sa->next_pn)
824 			rx_sa->next_pn = pn + 1;
825 		spin_unlock(&rx_sa->lock);
826 	}
827 
828 	return true;
829 }
830 
831 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
832 {
833 	skb->pkt_type = PACKET_HOST;
834 	skb->protocol = eth_type_trans(skb, dev);
835 
836 	skb_reset_network_header(skb);
837 	if (!skb_transport_header_was_set(skb))
838 		skb_reset_transport_header(skb);
839 	skb_reset_mac_len(skb);
840 }
841 
842 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
843 {
844 	memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
845 	skb_pull(skb, hdr_len);
846 	pskb_trim_unique(skb, skb->len - icv_len);
847 }
848 
849 static void count_rx(struct net_device *dev, int len)
850 {
851 	struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
852 
853 	u64_stats_update_begin(&stats->syncp);
854 	stats->rx_packets++;
855 	stats->rx_bytes += len;
856 	u64_stats_update_end(&stats->syncp);
857 }
858 
859 static void macsec_decrypt_done(struct crypto_async_request *base, int err)
860 {
861 	struct sk_buff *skb = base->data;
862 	struct net_device *dev = skb->dev;
863 	struct macsec_dev *macsec = macsec_priv(dev);
864 	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
865 	struct macsec_rx_sc *rx_sc = rx_sa->sc;
866 	int len, ret;
867 	u32 pn;
868 
869 	aead_request_free(macsec_skb_cb(skb)->req);
870 
871 	rcu_read_lock_bh();
872 	pn = ntohl(macsec_ethhdr(skb)->packet_number);
873 	if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
874 		rcu_read_unlock_bh();
875 		kfree_skb(skb);
876 		goto out;
877 	}
878 
879 	macsec_finalize_skb(skb, macsec->secy.icv_len,
880 			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
881 	macsec_reset_skb(skb, macsec->secy.netdev);
882 
883 	len = skb->len;
884 	ret = gro_cells_receive(&macsec->gro_cells, skb);
885 	if (ret == NET_RX_SUCCESS)
886 		count_rx(dev, len);
887 	else
888 		macsec->secy.netdev->stats.rx_dropped++;
889 
890 	rcu_read_unlock_bh();
891 
892 out:
893 	macsec_rxsa_put(rx_sa);
894 	macsec_rxsc_put(rx_sc);
895 	dev_put(dev);
896 }
897 
898 static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
899 				      struct net_device *dev,
900 				      struct macsec_rx_sa *rx_sa,
901 				      sci_t sci,
902 				      struct macsec_secy *secy)
903 {
904 	int ret;
905 	struct scatterlist *sg;
906 	unsigned char *iv;
907 	struct aead_request *req;
908 	struct macsec_eth_header *hdr;
909 	u16 icv_len = secy->icv_len;
910 
911 	macsec_skb_cb(skb)->valid = false;
912 	skb = skb_share_check(skb, GFP_ATOMIC);
913 	if (!skb)
914 		return ERR_PTR(-ENOMEM);
915 
916 	req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg);
917 	if (!req) {
918 		kfree_skb(skb);
919 		return ERR_PTR(-ENOMEM);
920 	}
921 
922 	hdr = (struct macsec_eth_header *)skb->data;
923 	macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
924 
925 	sg_init_table(sg, MAX_SKB_FRAGS + 1);
926 	skb_to_sgvec(skb, sg, 0, skb->len);
927 
928 	if (hdr->tci_an & MACSEC_TCI_E) {
929 		/* confidentiality: ethernet + macsec header
930 		 * authenticated, encrypted payload
931 		 */
932 		int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
933 
934 		aead_request_set_crypt(req, sg, sg, len, iv);
935 		aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
936 		skb = skb_unshare(skb, GFP_ATOMIC);
937 		if (!skb) {
938 			aead_request_free(req);
939 			return ERR_PTR(-ENOMEM);
940 		}
941 	} else {
942 		/* integrity only: all headers + data authenticated */
943 		aead_request_set_crypt(req, sg, sg, icv_len, iv);
944 		aead_request_set_ad(req, skb->len - icv_len);
945 	}
946 
947 	macsec_skb_cb(skb)->req = req;
948 	skb->dev = dev;
949 	aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
950 
951 	dev_hold(dev);
952 	ret = crypto_aead_decrypt(req);
953 	if (ret == -EINPROGRESS) {
954 		return ERR_PTR(ret);
955 	} else if (ret != 0) {
956 		/* decryption/authentication failed
957 		 * 10.6 if validateFrames is disabled, deliver anyway
958 		 */
959 		if (ret != -EBADMSG) {
960 			kfree_skb(skb);
961 			skb = ERR_PTR(ret);
962 		}
963 	} else {
964 		macsec_skb_cb(skb)->valid = true;
965 	}
966 	dev_put(dev);
967 
968 	aead_request_free(req);
969 
970 	return skb;
971 }
972 
973 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci)
974 {
975 	struct macsec_rx_sc *rx_sc;
976 
977 	for_each_rxsc(secy, rx_sc) {
978 		if (rx_sc->sci == sci)
979 			return rx_sc;
980 	}
981 
982 	return NULL;
983 }
984 
985 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
986 {
987 	struct macsec_rx_sc *rx_sc;
988 
989 	for_each_rxsc_rtnl(secy, rx_sc) {
990 		if (rx_sc->sci == sci)
991 			return rx_sc;
992 	}
993 
994 	return NULL;
995 }
996 
997 static void handle_not_macsec(struct sk_buff *skb)
998 {
999 	struct macsec_rxh_data *rxd;
1000 	struct macsec_dev *macsec;
1001 
1002 	rcu_read_lock();
1003 	rxd = macsec_data_rcu(skb->dev);
1004 
1005 	/* 10.6 If the management control validateFrames is not
1006 	 * Strict, frames without a SecTAG are received, counted, and
1007 	 * delivered to the Controlled Port
1008 	 */
1009 	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1010 		struct sk_buff *nskb;
1011 		int ret;
1012 		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
1013 
1014 		if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1015 			u64_stats_update_begin(&secy_stats->syncp);
1016 			secy_stats->stats.InPktsNoTag++;
1017 			u64_stats_update_end(&secy_stats->syncp);
1018 			continue;
1019 		}
1020 
1021 		/* deliver on this port */
1022 		nskb = skb_clone(skb, GFP_ATOMIC);
1023 		if (!nskb)
1024 			break;
1025 
1026 		nskb->dev = macsec->secy.netdev;
1027 
1028 		ret = netif_rx(nskb);
1029 		if (ret == NET_RX_SUCCESS) {
1030 			u64_stats_update_begin(&secy_stats->syncp);
1031 			secy_stats->stats.InPktsUntagged++;
1032 			u64_stats_update_end(&secy_stats->syncp);
1033 		} else {
1034 			macsec->secy.netdev->stats.rx_dropped++;
1035 		}
1036 	}
1037 
1038 	rcu_read_unlock();
1039 }
1040 
1041 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
1042 {
1043 	struct sk_buff *skb = *pskb;
1044 	struct net_device *dev = skb->dev;
1045 	struct macsec_eth_header *hdr;
1046 	struct macsec_secy *secy = NULL;
1047 	struct macsec_rx_sc *rx_sc;
1048 	struct macsec_rx_sa *rx_sa;
1049 	struct macsec_rxh_data *rxd;
1050 	struct macsec_dev *macsec;
1051 	sci_t sci;
1052 	u32 pn;
1053 	bool cbit;
1054 	struct pcpu_rx_sc_stats *rxsc_stats;
1055 	struct pcpu_secy_stats *secy_stats;
1056 	bool pulled_sci;
1057 	int ret;
1058 
1059 	if (skb_headroom(skb) < ETH_HLEN)
1060 		goto drop_direct;
1061 
1062 	hdr = macsec_ethhdr(skb);
1063 	if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) {
1064 		handle_not_macsec(skb);
1065 
1066 		/* and deliver to the uncontrolled port */
1067 		return RX_HANDLER_PASS;
1068 	}
1069 
1070 	skb = skb_unshare(skb, GFP_ATOMIC);
1071 	if (!skb) {
1072 		*pskb = NULL;
1073 		return RX_HANDLER_CONSUMED;
1074 	}
1075 
1076 	pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
1077 	if (!pulled_sci) {
1078 		if (!pskb_may_pull(skb, macsec_extra_len(false)))
1079 			goto drop_direct;
1080 	}
1081 
1082 	hdr = macsec_ethhdr(skb);
1083 
1084 	/* Frames with a SecTAG that has the TCI E bit set but the C
1085 	 * bit clear are discarded, as this reserved encoding is used
1086 	 * to identify frames with a SecTAG that are not to be
1087 	 * delivered to the Controlled Port.
1088 	 */
1089 	if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E)
1090 		return RX_HANDLER_PASS;
1091 
1092 	/* now, pull the extra length */
1093 	if (hdr->tci_an & MACSEC_TCI_SC) {
1094 		if (!pulled_sci)
1095 			goto drop_direct;
1096 	}
1097 
1098 	/* ethernet header is part of crypto processing */
1099 	skb_push(skb, ETH_HLEN);
1100 
1101 	macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
1102 	macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
1103 	sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
1104 
1105 	rcu_read_lock();
1106 	rxd = macsec_data_rcu(skb->dev);
1107 
1108 	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1109 		struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
1110 		sc = sc ? macsec_rxsc_get(sc) : NULL;
1111 
1112 		if (sc) {
1113 			secy = &macsec->secy;
1114 			rx_sc = sc;
1115 			break;
1116 		}
1117 	}
1118 
1119 	if (!secy)
1120 		goto nosci;
1121 
1122 	dev = secy->netdev;
1123 	macsec = macsec_priv(dev);
1124 	secy_stats = this_cpu_ptr(macsec->stats);
1125 	rxsc_stats = this_cpu_ptr(rx_sc->stats);
1126 
1127 	if (!macsec_validate_skb(skb, secy->icv_len)) {
1128 		u64_stats_update_begin(&secy_stats->syncp);
1129 		secy_stats->stats.InPktsBadTag++;
1130 		u64_stats_update_end(&secy_stats->syncp);
1131 		goto drop_nosa;
1132 	}
1133 
1134 	rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
1135 	if (!rx_sa) {
1136 		/* 10.6.1 if the SA is not in use */
1137 
1138 		/* If validateFrames is Strict or the C bit in the
1139 		 * SecTAG is set, discard
1140 		 */
1141 		if (hdr->tci_an & MACSEC_TCI_C ||
1142 		    secy->validate_frames == MACSEC_VALIDATE_STRICT) {
1143 			u64_stats_update_begin(&rxsc_stats->syncp);
1144 			rxsc_stats->stats.InPktsNotUsingSA++;
1145 			u64_stats_update_end(&rxsc_stats->syncp);
1146 			goto drop_nosa;
1147 		}
1148 
1149 		/* not Strict, the frame (with the SecTAG and ICV
1150 		 * removed) is delivered to the Controlled Port.
1151 		 */
1152 		u64_stats_update_begin(&rxsc_stats->syncp);
1153 		rxsc_stats->stats.InPktsUnusedSA++;
1154 		u64_stats_update_end(&rxsc_stats->syncp);
1155 		goto deliver;
1156 	}
1157 
1158 	/* First, PN check to avoid decrypting obviously wrong packets */
1159 	pn = ntohl(hdr->packet_number);
1160 	if (secy->replay_protect) {
1161 		bool late;
1162 
1163 		spin_lock(&rx_sa->lock);
1164 		late = rx_sa->next_pn >= secy->replay_window &&
1165 		       pn < (rx_sa->next_pn - secy->replay_window);
1166 		spin_unlock(&rx_sa->lock);
1167 
1168 		if (late) {
1169 			u64_stats_update_begin(&rxsc_stats->syncp);
1170 			rxsc_stats->stats.InPktsLate++;
1171 			u64_stats_update_end(&rxsc_stats->syncp);
1172 			goto drop;
1173 		}
1174 	}
1175 
1176 	macsec_skb_cb(skb)->rx_sa = rx_sa;
1177 
1178 	/* Disabled && !changed text => skip validation */
1179 	if (hdr->tci_an & MACSEC_TCI_C ||
1180 	    secy->validate_frames != MACSEC_VALIDATE_DISABLED)
1181 		skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
1182 
1183 	if (IS_ERR(skb)) {
1184 		/* the decrypt callback needs the reference */
1185 		if (PTR_ERR(skb) != -EINPROGRESS) {
1186 			macsec_rxsa_put(rx_sa);
1187 			macsec_rxsc_put(rx_sc);
1188 		}
1189 		rcu_read_unlock();
1190 		*pskb = NULL;
1191 		return RX_HANDLER_CONSUMED;
1192 	}
1193 
1194 	if (!macsec_post_decrypt(skb, secy, pn))
1195 		goto drop;
1196 
1197 deliver:
1198 	macsec_finalize_skb(skb, secy->icv_len,
1199 			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1200 	macsec_reset_skb(skb, secy->netdev);
1201 
1202 	if (rx_sa)
1203 		macsec_rxsa_put(rx_sa);
1204 	macsec_rxsc_put(rx_sc);
1205 
1206 	ret = gro_cells_receive(&macsec->gro_cells, skb);
1207 	if (ret == NET_RX_SUCCESS)
1208 		count_rx(dev, skb->len);
1209 	else
1210 		macsec->secy.netdev->stats.rx_dropped++;
1211 
1212 	rcu_read_unlock();
1213 
1214 	*pskb = NULL;
1215 	return RX_HANDLER_CONSUMED;
1216 
1217 drop:
1218 	macsec_rxsa_put(rx_sa);
1219 drop_nosa:
1220 	macsec_rxsc_put(rx_sc);
1221 	rcu_read_unlock();
1222 drop_direct:
1223 	kfree_skb(skb);
1224 	*pskb = NULL;
1225 	return RX_HANDLER_CONSUMED;
1226 
1227 nosci:
1228 	/* 10.6.1 if the SC is not found */
1229 	cbit = !!(hdr->tci_an & MACSEC_TCI_C);
1230 	if (!cbit)
1231 		macsec_finalize_skb(skb, DEFAULT_ICV_LEN,
1232 				    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1233 
1234 	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1235 		struct sk_buff *nskb;
1236 
1237 		secy_stats = this_cpu_ptr(macsec->stats);
1238 
1239 		/* If validateFrames is Strict or the C bit in the
1240 		 * SecTAG is set, discard
1241 		 */
1242 		if (cbit ||
1243 		    macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1244 			u64_stats_update_begin(&secy_stats->syncp);
1245 			secy_stats->stats.InPktsNoSCI++;
1246 			u64_stats_update_end(&secy_stats->syncp);
1247 			continue;
1248 		}
1249 
1250 		/* not strict, the frame (with the SecTAG and ICV
1251 		 * removed) is delivered to the Controlled Port.
1252 		 */
1253 		nskb = skb_clone(skb, GFP_ATOMIC);
1254 		if (!nskb)
1255 			break;
1256 
1257 		macsec_reset_skb(nskb, macsec->secy.netdev);
1258 
1259 		ret = netif_rx(nskb);
1260 		if (ret == NET_RX_SUCCESS) {
1261 			u64_stats_update_begin(&secy_stats->syncp);
1262 			secy_stats->stats.InPktsUnknownSCI++;
1263 			u64_stats_update_end(&secy_stats->syncp);
1264 		} else {
1265 			macsec->secy.netdev->stats.rx_dropped++;
1266 		}
1267 	}
1268 
1269 	rcu_read_unlock();
1270 	*pskb = skb;
1271 	return RX_HANDLER_PASS;
1272 }
1273 
1274 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
1275 {
1276 	struct crypto_aead *tfm;
1277 	int ret;
1278 
1279 	tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
1280 
1281 	if (IS_ERR(tfm))
1282 		return tfm;
1283 
1284 	ret = crypto_aead_setkey(tfm, key, key_len);
1285 	if (ret < 0)
1286 		goto fail;
1287 
1288 	ret = crypto_aead_setauthsize(tfm, icv_len);
1289 	if (ret < 0)
1290 		goto fail;
1291 
1292 	return tfm;
1293 fail:
1294 	crypto_free_aead(tfm);
1295 	return ERR_PTR(ret);
1296 }
1297 
1298 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
1299 		      int icv_len)
1300 {
1301 	rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
1302 	if (!rx_sa->stats)
1303 		return -ENOMEM;
1304 
1305 	rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1306 	if (IS_ERR(rx_sa->key.tfm)) {
1307 		free_percpu(rx_sa->stats);
1308 		return PTR_ERR(rx_sa->key.tfm);
1309 	}
1310 
1311 	rx_sa->active = false;
1312 	rx_sa->next_pn = 1;
1313 	atomic_set(&rx_sa->refcnt, 1);
1314 	spin_lock_init(&rx_sa->lock);
1315 
1316 	return 0;
1317 }
1318 
1319 static void clear_rx_sa(struct macsec_rx_sa *rx_sa)
1320 {
1321 	rx_sa->active = false;
1322 
1323 	macsec_rxsa_put(rx_sa);
1324 }
1325 
1326 static void free_rx_sc(struct macsec_rx_sc *rx_sc)
1327 {
1328 	int i;
1329 
1330 	for (i = 0; i < MACSEC_NUM_AN; i++) {
1331 		struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]);
1332 
1333 		RCU_INIT_POINTER(rx_sc->sa[i], NULL);
1334 		if (sa)
1335 			clear_rx_sa(sa);
1336 	}
1337 
1338 	macsec_rxsc_put(rx_sc);
1339 }
1340 
1341 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
1342 {
1343 	struct macsec_rx_sc *rx_sc, __rcu **rx_scp;
1344 
1345 	for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp);
1346 	     rx_sc;
1347 	     rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) {
1348 		if (rx_sc->sci == sci) {
1349 			if (rx_sc->active)
1350 				secy->n_rx_sc--;
1351 			rcu_assign_pointer(*rx_scp, rx_sc->next);
1352 			return rx_sc;
1353 		}
1354 	}
1355 
1356 	return NULL;
1357 }
1358 
1359 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
1360 {
1361 	struct macsec_rx_sc *rx_sc;
1362 	struct macsec_dev *macsec;
1363 	struct net_device *real_dev = macsec_priv(dev)->real_dev;
1364 	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
1365 	struct macsec_secy *secy;
1366 
1367 	list_for_each_entry(macsec, &rxd->secys, secys) {
1368 		if (find_rx_sc_rtnl(&macsec->secy, sci))
1369 			return ERR_PTR(-EEXIST);
1370 	}
1371 
1372 	rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
1373 	if (!rx_sc)
1374 		return ERR_PTR(-ENOMEM);
1375 
1376 	rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats);
1377 	if (!rx_sc->stats) {
1378 		kfree(rx_sc);
1379 		return ERR_PTR(-ENOMEM);
1380 	}
1381 
1382 	rx_sc->sci = sci;
1383 	rx_sc->active = true;
1384 	atomic_set(&rx_sc->refcnt, 1);
1385 
1386 	secy = &macsec_priv(dev)->secy;
1387 	rcu_assign_pointer(rx_sc->next, secy->rx_sc);
1388 	rcu_assign_pointer(secy->rx_sc, rx_sc);
1389 
1390 	if (rx_sc->active)
1391 		secy->n_rx_sc++;
1392 
1393 	return rx_sc;
1394 }
1395 
1396 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
1397 		      int icv_len)
1398 {
1399 	tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
1400 	if (!tx_sa->stats)
1401 		return -ENOMEM;
1402 
1403 	tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1404 	if (IS_ERR(tx_sa->key.tfm)) {
1405 		free_percpu(tx_sa->stats);
1406 		return PTR_ERR(tx_sa->key.tfm);
1407 	}
1408 
1409 	tx_sa->active = false;
1410 	atomic_set(&tx_sa->refcnt, 1);
1411 	spin_lock_init(&tx_sa->lock);
1412 
1413 	return 0;
1414 }
1415 
1416 static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
1417 {
1418 	tx_sa->active = false;
1419 
1420 	macsec_txsa_put(tx_sa);
1421 }
1422 
1423 static struct genl_family macsec_fam = {
1424 	.id		= GENL_ID_GENERATE,
1425 	.name		= MACSEC_GENL_NAME,
1426 	.hdrsize	= 0,
1427 	.version	= MACSEC_GENL_VERSION,
1428 	.maxattr	= MACSEC_ATTR_MAX,
1429 	.netnsok	= true,
1430 };
1431 
1432 static struct net_device *get_dev_from_nl(struct net *net,
1433 					  struct nlattr **attrs)
1434 {
1435 	int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]);
1436 	struct net_device *dev;
1437 
1438 	dev = __dev_get_by_index(net, ifindex);
1439 	if (!dev)
1440 		return ERR_PTR(-ENODEV);
1441 
1442 	if (!netif_is_macsec(dev))
1443 		return ERR_PTR(-ENODEV);
1444 
1445 	return dev;
1446 }
1447 
1448 static sci_t nla_get_sci(const struct nlattr *nla)
1449 {
1450 	return (__force sci_t)nla_get_u64(nla);
1451 }
1452 
1453 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
1454 		       int padattr)
1455 {
1456 	return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
1457 }
1458 
1459 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
1460 					     struct nlattr **attrs,
1461 					     struct nlattr **tb_sa,
1462 					     struct net_device **devp,
1463 					     struct macsec_secy **secyp,
1464 					     struct macsec_tx_sc **scp,
1465 					     u8 *assoc_num)
1466 {
1467 	struct net_device *dev;
1468 	struct macsec_secy *secy;
1469 	struct macsec_tx_sc *tx_sc;
1470 	struct macsec_tx_sa *tx_sa;
1471 
1472 	if (!tb_sa[MACSEC_SA_ATTR_AN])
1473 		return ERR_PTR(-EINVAL);
1474 
1475 	*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1476 
1477 	dev = get_dev_from_nl(net, attrs);
1478 	if (IS_ERR(dev))
1479 		return ERR_CAST(dev);
1480 
1481 	if (*assoc_num >= MACSEC_NUM_AN)
1482 		return ERR_PTR(-EINVAL);
1483 
1484 	secy = &macsec_priv(dev)->secy;
1485 	tx_sc = &secy->tx_sc;
1486 
1487 	tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]);
1488 	if (!tx_sa)
1489 		return ERR_PTR(-ENODEV);
1490 
1491 	*devp = dev;
1492 	*scp = tx_sc;
1493 	*secyp = secy;
1494 	return tx_sa;
1495 }
1496 
1497 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net,
1498 					     struct nlattr **attrs,
1499 					     struct nlattr **tb_rxsc,
1500 					     struct net_device **devp,
1501 					     struct macsec_secy **secyp)
1502 {
1503 	struct net_device *dev;
1504 	struct macsec_secy *secy;
1505 	struct macsec_rx_sc *rx_sc;
1506 	sci_t sci;
1507 
1508 	dev = get_dev_from_nl(net, attrs);
1509 	if (IS_ERR(dev))
1510 		return ERR_CAST(dev);
1511 
1512 	secy = &macsec_priv(dev)->secy;
1513 
1514 	if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
1515 		return ERR_PTR(-EINVAL);
1516 
1517 	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1518 	rx_sc = find_rx_sc_rtnl(secy, sci);
1519 	if (!rx_sc)
1520 		return ERR_PTR(-ENODEV);
1521 
1522 	*secyp = secy;
1523 	*devp = dev;
1524 
1525 	return rx_sc;
1526 }
1527 
1528 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
1529 					     struct nlattr **attrs,
1530 					     struct nlattr **tb_rxsc,
1531 					     struct nlattr **tb_sa,
1532 					     struct net_device **devp,
1533 					     struct macsec_secy **secyp,
1534 					     struct macsec_rx_sc **scp,
1535 					     u8 *assoc_num)
1536 {
1537 	struct macsec_rx_sc *rx_sc;
1538 	struct macsec_rx_sa *rx_sa;
1539 
1540 	if (!tb_sa[MACSEC_SA_ATTR_AN])
1541 		return ERR_PTR(-EINVAL);
1542 
1543 	*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1544 	if (*assoc_num >= MACSEC_NUM_AN)
1545 		return ERR_PTR(-EINVAL);
1546 
1547 	rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
1548 	if (IS_ERR(rx_sc))
1549 		return ERR_CAST(rx_sc);
1550 
1551 	rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]);
1552 	if (!rx_sa)
1553 		return ERR_PTR(-ENODEV);
1554 
1555 	*scp = rx_sc;
1556 	return rx_sa;
1557 }
1558 
1559 
1560 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
1561 	[MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
1562 	[MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
1563 	[MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
1564 };
1565 
1566 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
1567 	[MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
1568 	[MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
1569 };
1570 
1571 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
1572 	[MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
1573 	[MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
1574 	[MACSEC_SA_ATTR_PN] = { .type = NLA_U32 },
1575 	[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
1576 				   .len = MACSEC_KEYID_LEN, },
1577 	[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
1578 				 .len = MACSEC_MAX_KEY_LEN, },
1579 };
1580 
1581 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
1582 {
1583 	if (!attrs[MACSEC_ATTR_SA_CONFIG])
1584 		return -EINVAL;
1585 
1586 	if (nla_parse_nested(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG],
1587 			     macsec_genl_sa_policy))
1588 		return -EINVAL;
1589 
1590 	return 0;
1591 }
1592 
1593 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
1594 {
1595 	if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
1596 		return -EINVAL;
1597 
1598 	if (nla_parse_nested(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG],
1599 			     macsec_genl_rxsc_policy))
1600 		return -EINVAL;
1601 
1602 	return 0;
1603 }
1604 
1605 static bool validate_add_rxsa(struct nlattr **attrs)
1606 {
1607 	if (!attrs[MACSEC_SA_ATTR_AN] ||
1608 	    !attrs[MACSEC_SA_ATTR_KEY] ||
1609 	    !attrs[MACSEC_SA_ATTR_KEYID])
1610 		return false;
1611 
1612 	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1613 		return false;
1614 
1615 	if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
1616 		return false;
1617 
1618 	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1619 		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1620 			return false;
1621 	}
1622 
1623 	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1624 		return false;
1625 
1626 	return true;
1627 }
1628 
1629 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
1630 {
1631 	struct net_device *dev;
1632 	struct nlattr **attrs = info->attrs;
1633 	struct macsec_secy *secy;
1634 	struct macsec_rx_sc *rx_sc;
1635 	struct macsec_rx_sa *rx_sa;
1636 	unsigned char assoc_num;
1637 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1638 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1639 	int err;
1640 
1641 	if (!attrs[MACSEC_ATTR_IFINDEX])
1642 		return -EINVAL;
1643 
1644 	if (parse_sa_config(attrs, tb_sa))
1645 		return -EINVAL;
1646 
1647 	if (parse_rxsc_config(attrs, tb_rxsc))
1648 		return -EINVAL;
1649 
1650 	if (!validate_add_rxsa(tb_sa))
1651 		return -EINVAL;
1652 
1653 	rtnl_lock();
1654 	rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
1655 	if (IS_ERR(rx_sc)) {
1656 		rtnl_unlock();
1657 		return PTR_ERR(rx_sc);
1658 	}
1659 
1660 	assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1661 
1662 	if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1663 		pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1664 			  nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1665 		rtnl_unlock();
1666 		return -EINVAL;
1667 	}
1668 
1669 	rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
1670 	if (rx_sa) {
1671 		rtnl_unlock();
1672 		return -EBUSY;
1673 	}
1674 
1675 	rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
1676 	if (!rx_sa) {
1677 		rtnl_unlock();
1678 		return -ENOMEM;
1679 	}
1680 
1681 	err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1682 			 secy->key_len, secy->icv_len);
1683 	if (err < 0) {
1684 		kfree(rx_sa);
1685 		rtnl_unlock();
1686 		return err;
1687 	}
1688 
1689 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
1690 		spin_lock_bh(&rx_sa->lock);
1691 		rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
1692 		spin_unlock_bh(&rx_sa->lock);
1693 	}
1694 
1695 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1696 		rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1697 
1698 	nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
1699 	rx_sa->sc = rx_sc;
1700 	rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
1701 
1702 	rtnl_unlock();
1703 
1704 	return 0;
1705 }
1706 
1707 static bool validate_add_rxsc(struct nlattr **attrs)
1708 {
1709 	if (!attrs[MACSEC_RXSC_ATTR_SCI])
1710 		return false;
1711 
1712 	if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
1713 		if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
1714 			return false;
1715 	}
1716 
1717 	return true;
1718 }
1719 
1720 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
1721 {
1722 	struct net_device *dev;
1723 	sci_t sci = MACSEC_UNDEF_SCI;
1724 	struct nlattr **attrs = info->attrs;
1725 	struct macsec_rx_sc *rx_sc;
1726 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1727 
1728 	if (!attrs[MACSEC_ATTR_IFINDEX])
1729 		return -EINVAL;
1730 
1731 	if (parse_rxsc_config(attrs, tb_rxsc))
1732 		return -EINVAL;
1733 
1734 	if (!validate_add_rxsc(tb_rxsc))
1735 		return -EINVAL;
1736 
1737 	rtnl_lock();
1738 	dev = get_dev_from_nl(genl_info_net(info), attrs);
1739 	if (IS_ERR(dev)) {
1740 		rtnl_unlock();
1741 		return PTR_ERR(dev);
1742 	}
1743 
1744 	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1745 
1746 	rx_sc = create_rx_sc(dev, sci);
1747 	if (IS_ERR(rx_sc)) {
1748 		rtnl_unlock();
1749 		return PTR_ERR(rx_sc);
1750 	}
1751 
1752 	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
1753 		rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
1754 
1755 	rtnl_unlock();
1756 
1757 	return 0;
1758 }
1759 
1760 static bool validate_add_txsa(struct nlattr **attrs)
1761 {
1762 	if (!attrs[MACSEC_SA_ATTR_AN] ||
1763 	    !attrs[MACSEC_SA_ATTR_PN] ||
1764 	    !attrs[MACSEC_SA_ATTR_KEY] ||
1765 	    !attrs[MACSEC_SA_ATTR_KEYID])
1766 		return false;
1767 
1768 	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1769 		return false;
1770 
1771 	if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
1772 		return false;
1773 
1774 	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1775 		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1776 			return false;
1777 	}
1778 
1779 	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1780 		return false;
1781 
1782 	return true;
1783 }
1784 
1785 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
1786 {
1787 	struct net_device *dev;
1788 	struct nlattr **attrs = info->attrs;
1789 	struct macsec_secy *secy;
1790 	struct macsec_tx_sc *tx_sc;
1791 	struct macsec_tx_sa *tx_sa;
1792 	unsigned char assoc_num;
1793 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1794 	int err;
1795 
1796 	if (!attrs[MACSEC_ATTR_IFINDEX])
1797 		return -EINVAL;
1798 
1799 	if (parse_sa_config(attrs, tb_sa))
1800 		return -EINVAL;
1801 
1802 	if (!validate_add_txsa(tb_sa))
1803 		return -EINVAL;
1804 
1805 	rtnl_lock();
1806 	dev = get_dev_from_nl(genl_info_net(info), attrs);
1807 	if (IS_ERR(dev)) {
1808 		rtnl_unlock();
1809 		return PTR_ERR(dev);
1810 	}
1811 
1812 	secy = &macsec_priv(dev)->secy;
1813 	tx_sc = &secy->tx_sc;
1814 
1815 	assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1816 
1817 	if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1818 		pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
1819 			  nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1820 		rtnl_unlock();
1821 		return -EINVAL;
1822 	}
1823 
1824 	tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
1825 	if (tx_sa) {
1826 		rtnl_unlock();
1827 		return -EBUSY;
1828 	}
1829 
1830 	tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
1831 	if (!tx_sa) {
1832 		rtnl_unlock();
1833 		return -ENOMEM;
1834 	}
1835 
1836 	err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1837 			 secy->key_len, secy->icv_len);
1838 	if (err < 0) {
1839 		kfree(tx_sa);
1840 		rtnl_unlock();
1841 		return err;
1842 	}
1843 
1844 	nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
1845 
1846 	spin_lock_bh(&tx_sa->lock);
1847 	tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
1848 	spin_unlock_bh(&tx_sa->lock);
1849 
1850 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1851 		tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1852 
1853 	if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
1854 		secy->operational = true;
1855 
1856 	rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
1857 
1858 	rtnl_unlock();
1859 
1860 	return 0;
1861 }
1862 
1863 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
1864 {
1865 	struct nlattr **attrs = info->attrs;
1866 	struct net_device *dev;
1867 	struct macsec_secy *secy;
1868 	struct macsec_rx_sc *rx_sc;
1869 	struct macsec_rx_sa *rx_sa;
1870 	u8 assoc_num;
1871 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1872 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1873 
1874 	if (!attrs[MACSEC_ATTR_IFINDEX])
1875 		return -EINVAL;
1876 
1877 	if (parse_sa_config(attrs, tb_sa))
1878 		return -EINVAL;
1879 
1880 	if (parse_rxsc_config(attrs, tb_rxsc))
1881 		return -EINVAL;
1882 
1883 	rtnl_lock();
1884 	rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
1885 				 &dev, &secy, &rx_sc, &assoc_num);
1886 	if (IS_ERR(rx_sa)) {
1887 		rtnl_unlock();
1888 		return PTR_ERR(rx_sa);
1889 	}
1890 
1891 	if (rx_sa->active) {
1892 		rtnl_unlock();
1893 		return -EBUSY;
1894 	}
1895 
1896 	RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
1897 	clear_rx_sa(rx_sa);
1898 
1899 	rtnl_unlock();
1900 
1901 	return 0;
1902 }
1903 
1904 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
1905 {
1906 	struct nlattr **attrs = info->attrs;
1907 	struct net_device *dev;
1908 	struct macsec_secy *secy;
1909 	struct macsec_rx_sc *rx_sc;
1910 	sci_t sci;
1911 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1912 
1913 	if (!attrs[MACSEC_ATTR_IFINDEX])
1914 		return -EINVAL;
1915 
1916 	if (parse_rxsc_config(attrs, tb_rxsc))
1917 		return -EINVAL;
1918 
1919 	if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
1920 		return -EINVAL;
1921 
1922 	rtnl_lock();
1923 	dev = get_dev_from_nl(genl_info_net(info), info->attrs);
1924 	if (IS_ERR(dev)) {
1925 		rtnl_unlock();
1926 		return PTR_ERR(dev);
1927 	}
1928 
1929 	secy = &macsec_priv(dev)->secy;
1930 	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1931 
1932 	rx_sc = del_rx_sc(secy, sci);
1933 	if (!rx_sc) {
1934 		rtnl_unlock();
1935 		return -ENODEV;
1936 	}
1937 
1938 	free_rx_sc(rx_sc);
1939 	rtnl_unlock();
1940 
1941 	return 0;
1942 }
1943 
1944 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
1945 {
1946 	struct nlattr **attrs = info->attrs;
1947 	struct net_device *dev;
1948 	struct macsec_secy *secy;
1949 	struct macsec_tx_sc *tx_sc;
1950 	struct macsec_tx_sa *tx_sa;
1951 	u8 assoc_num;
1952 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1953 
1954 	if (!attrs[MACSEC_ATTR_IFINDEX])
1955 		return -EINVAL;
1956 
1957 	if (parse_sa_config(attrs, tb_sa))
1958 		return -EINVAL;
1959 
1960 	rtnl_lock();
1961 	tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
1962 				 &dev, &secy, &tx_sc, &assoc_num);
1963 	if (IS_ERR(tx_sa)) {
1964 		rtnl_unlock();
1965 		return PTR_ERR(tx_sa);
1966 	}
1967 
1968 	if (tx_sa->active) {
1969 		rtnl_unlock();
1970 		return -EBUSY;
1971 	}
1972 
1973 	RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
1974 	clear_tx_sa(tx_sa);
1975 
1976 	rtnl_unlock();
1977 
1978 	return 0;
1979 }
1980 
1981 static bool validate_upd_sa(struct nlattr **attrs)
1982 {
1983 	if (!attrs[MACSEC_SA_ATTR_AN] ||
1984 	    attrs[MACSEC_SA_ATTR_KEY] ||
1985 	    attrs[MACSEC_SA_ATTR_KEYID])
1986 		return false;
1987 
1988 	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1989 		return false;
1990 
1991 	if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
1992 		return false;
1993 
1994 	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1995 		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1996 			return false;
1997 	}
1998 
1999 	return true;
2000 }
2001 
2002 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
2003 {
2004 	struct nlattr **attrs = info->attrs;
2005 	struct net_device *dev;
2006 	struct macsec_secy *secy;
2007 	struct macsec_tx_sc *tx_sc;
2008 	struct macsec_tx_sa *tx_sa;
2009 	u8 assoc_num;
2010 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2011 
2012 	if (!attrs[MACSEC_ATTR_IFINDEX])
2013 		return -EINVAL;
2014 
2015 	if (parse_sa_config(attrs, tb_sa))
2016 		return -EINVAL;
2017 
2018 	if (!validate_upd_sa(tb_sa))
2019 		return -EINVAL;
2020 
2021 	rtnl_lock();
2022 	tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2023 				 &dev, &secy, &tx_sc, &assoc_num);
2024 	if (IS_ERR(tx_sa)) {
2025 		rtnl_unlock();
2026 		return PTR_ERR(tx_sa);
2027 	}
2028 
2029 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2030 		spin_lock_bh(&tx_sa->lock);
2031 		tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
2032 		spin_unlock_bh(&tx_sa->lock);
2033 	}
2034 
2035 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2036 		tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2037 
2038 	if (assoc_num == tx_sc->encoding_sa)
2039 		secy->operational = tx_sa->active;
2040 
2041 	rtnl_unlock();
2042 
2043 	return 0;
2044 }
2045 
2046 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
2047 {
2048 	struct nlattr **attrs = info->attrs;
2049 	struct net_device *dev;
2050 	struct macsec_secy *secy;
2051 	struct macsec_rx_sc *rx_sc;
2052 	struct macsec_rx_sa *rx_sa;
2053 	u8 assoc_num;
2054 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2055 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2056 
2057 	if (!attrs[MACSEC_ATTR_IFINDEX])
2058 		return -EINVAL;
2059 
2060 	if (parse_rxsc_config(attrs, tb_rxsc))
2061 		return -EINVAL;
2062 
2063 	if (parse_sa_config(attrs, tb_sa))
2064 		return -EINVAL;
2065 
2066 	if (!validate_upd_sa(tb_sa))
2067 		return -EINVAL;
2068 
2069 	rtnl_lock();
2070 	rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2071 				 &dev, &secy, &rx_sc, &assoc_num);
2072 	if (IS_ERR(rx_sa)) {
2073 		rtnl_unlock();
2074 		return PTR_ERR(rx_sa);
2075 	}
2076 
2077 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2078 		spin_lock_bh(&rx_sa->lock);
2079 		rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
2080 		spin_unlock_bh(&rx_sa->lock);
2081 	}
2082 
2083 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2084 		rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2085 
2086 	rtnl_unlock();
2087 	return 0;
2088 }
2089 
2090 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
2091 {
2092 	struct nlattr **attrs = info->attrs;
2093 	struct net_device *dev;
2094 	struct macsec_secy *secy;
2095 	struct macsec_rx_sc *rx_sc;
2096 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2097 
2098 	if (!attrs[MACSEC_ATTR_IFINDEX])
2099 		return -EINVAL;
2100 
2101 	if (parse_rxsc_config(attrs, tb_rxsc))
2102 		return -EINVAL;
2103 
2104 	if (!validate_add_rxsc(tb_rxsc))
2105 		return -EINVAL;
2106 
2107 	rtnl_lock();
2108 	rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
2109 	if (IS_ERR(rx_sc)) {
2110 		rtnl_unlock();
2111 		return PTR_ERR(rx_sc);
2112 	}
2113 
2114 	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
2115 		bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
2116 
2117 		if (rx_sc->active != new)
2118 			secy->n_rx_sc += new ? 1 : -1;
2119 
2120 		rx_sc->active = new;
2121 	}
2122 
2123 	rtnl_unlock();
2124 
2125 	return 0;
2126 }
2127 
2128 static int copy_tx_sa_stats(struct sk_buff *skb,
2129 			     struct macsec_tx_sa_stats __percpu *pstats)
2130 {
2131 	struct macsec_tx_sa_stats sum = {0, };
2132 	int cpu;
2133 
2134 	for_each_possible_cpu(cpu) {
2135 		const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
2136 
2137 		sum.OutPktsProtected += stats->OutPktsProtected;
2138 		sum.OutPktsEncrypted += stats->OutPktsEncrypted;
2139 	}
2140 
2141 	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) ||
2142 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted))
2143 		return -EMSGSIZE;
2144 
2145 	return 0;
2146 }
2147 
2148 static int copy_rx_sa_stats(struct sk_buff *skb,
2149 			     struct macsec_rx_sa_stats __percpu *pstats)
2150 {
2151 	struct macsec_rx_sa_stats sum = {0, };
2152 	int cpu;
2153 
2154 	for_each_possible_cpu(cpu) {
2155 		const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
2156 
2157 		sum.InPktsOK         += stats->InPktsOK;
2158 		sum.InPktsInvalid    += stats->InPktsInvalid;
2159 		sum.InPktsNotValid   += stats->InPktsNotValid;
2160 		sum.InPktsNotUsingSA += stats->InPktsNotUsingSA;
2161 		sum.InPktsUnusedSA   += stats->InPktsUnusedSA;
2162 	}
2163 
2164 	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) ||
2165 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) ||
2166 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) ||
2167 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) ||
2168 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA))
2169 		return -EMSGSIZE;
2170 
2171 	return 0;
2172 }
2173 
2174 static int copy_rx_sc_stats(struct sk_buff *skb,
2175 			     struct pcpu_rx_sc_stats __percpu *pstats)
2176 {
2177 	struct macsec_rx_sc_stats sum = {0, };
2178 	int cpu;
2179 
2180 	for_each_possible_cpu(cpu) {
2181 		const struct pcpu_rx_sc_stats *stats;
2182 		struct macsec_rx_sc_stats tmp;
2183 		unsigned int start;
2184 
2185 		stats = per_cpu_ptr(pstats, cpu);
2186 		do {
2187 			start = u64_stats_fetch_begin_irq(&stats->syncp);
2188 			memcpy(&tmp, &stats->stats, sizeof(tmp));
2189 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2190 
2191 		sum.InOctetsValidated += tmp.InOctetsValidated;
2192 		sum.InOctetsDecrypted += tmp.InOctetsDecrypted;
2193 		sum.InPktsUnchecked   += tmp.InPktsUnchecked;
2194 		sum.InPktsDelayed     += tmp.InPktsDelayed;
2195 		sum.InPktsOK          += tmp.InPktsOK;
2196 		sum.InPktsInvalid     += tmp.InPktsInvalid;
2197 		sum.InPktsLate        += tmp.InPktsLate;
2198 		sum.InPktsNotValid    += tmp.InPktsNotValid;
2199 		sum.InPktsNotUsingSA  += tmp.InPktsNotUsingSA;
2200 		sum.InPktsUnusedSA    += tmp.InPktsUnusedSA;
2201 	}
2202 
2203 	if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
2204 			      sum.InOctetsValidated,
2205 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2206 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
2207 			      sum.InOctetsDecrypted,
2208 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2209 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
2210 			      sum.InPktsUnchecked,
2211 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2212 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
2213 			      sum.InPktsDelayed,
2214 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2215 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
2216 			      sum.InPktsOK,
2217 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2218 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
2219 			      sum.InPktsInvalid,
2220 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2221 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
2222 			      sum.InPktsLate,
2223 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2224 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
2225 			      sum.InPktsNotValid,
2226 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2227 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2228 			      sum.InPktsNotUsingSA,
2229 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2230 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
2231 			      sum.InPktsUnusedSA,
2232 			      MACSEC_RXSC_STATS_ATTR_PAD))
2233 		return -EMSGSIZE;
2234 
2235 	return 0;
2236 }
2237 
2238 static int copy_tx_sc_stats(struct sk_buff *skb,
2239 			     struct pcpu_tx_sc_stats __percpu *pstats)
2240 {
2241 	struct macsec_tx_sc_stats sum = {0, };
2242 	int cpu;
2243 
2244 	for_each_possible_cpu(cpu) {
2245 		const struct pcpu_tx_sc_stats *stats;
2246 		struct macsec_tx_sc_stats tmp;
2247 		unsigned int start;
2248 
2249 		stats = per_cpu_ptr(pstats, cpu);
2250 		do {
2251 			start = u64_stats_fetch_begin_irq(&stats->syncp);
2252 			memcpy(&tmp, &stats->stats, sizeof(tmp));
2253 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2254 
2255 		sum.OutPktsProtected   += tmp.OutPktsProtected;
2256 		sum.OutPktsEncrypted   += tmp.OutPktsEncrypted;
2257 		sum.OutOctetsProtected += tmp.OutOctetsProtected;
2258 		sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted;
2259 	}
2260 
2261 	if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
2262 			      sum.OutPktsProtected,
2263 			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2264 	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2265 			      sum.OutPktsEncrypted,
2266 			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2267 	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
2268 			      sum.OutOctetsProtected,
2269 			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2270 	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
2271 			      sum.OutOctetsEncrypted,
2272 			      MACSEC_TXSC_STATS_ATTR_PAD))
2273 		return -EMSGSIZE;
2274 
2275 	return 0;
2276 }
2277 
2278 static int copy_secy_stats(struct sk_buff *skb,
2279 			    struct pcpu_secy_stats __percpu *pstats)
2280 {
2281 	struct macsec_dev_stats sum = {0, };
2282 	int cpu;
2283 
2284 	for_each_possible_cpu(cpu) {
2285 		const struct pcpu_secy_stats *stats;
2286 		struct macsec_dev_stats tmp;
2287 		unsigned int start;
2288 
2289 		stats = per_cpu_ptr(pstats, cpu);
2290 		do {
2291 			start = u64_stats_fetch_begin_irq(&stats->syncp);
2292 			memcpy(&tmp, &stats->stats, sizeof(tmp));
2293 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2294 
2295 		sum.OutPktsUntagged  += tmp.OutPktsUntagged;
2296 		sum.InPktsUntagged   += tmp.InPktsUntagged;
2297 		sum.OutPktsTooLong   += tmp.OutPktsTooLong;
2298 		sum.InPktsNoTag      += tmp.InPktsNoTag;
2299 		sum.InPktsBadTag     += tmp.InPktsBadTag;
2300 		sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI;
2301 		sum.InPktsNoSCI      += tmp.InPktsNoSCI;
2302 		sum.InPktsOverrun    += tmp.InPktsOverrun;
2303 	}
2304 
2305 	if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
2306 			      sum.OutPktsUntagged,
2307 			      MACSEC_SECY_STATS_ATTR_PAD) ||
2308 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
2309 			      sum.InPktsUntagged,
2310 			      MACSEC_SECY_STATS_ATTR_PAD) ||
2311 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
2312 			      sum.OutPktsTooLong,
2313 			      MACSEC_SECY_STATS_ATTR_PAD) ||
2314 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
2315 			      sum.InPktsNoTag,
2316 			      MACSEC_SECY_STATS_ATTR_PAD) ||
2317 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
2318 			      sum.InPktsBadTag,
2319 			      MACSEC_SECY_STATS_ATTR_PAD) ||
2320 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
2321 			      sum.InPktsUnknownSCI,
2322 			      MACSEC_SECY_STATS_ATTR_PAD) ||
2323 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
2324 			      sum.InPktsNoSCI,
2325 			      MACSEC_SECY_STATS_ATTR_PAD) ||
2326 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
2327 			      sum.InPktsOverrun,
2328 			      MACSEC_SECY_STATS_ATTR_PAD))
2329 		return -EMSGSIZE;
2330 
2331 	return 0;
2332 }
2333 
2334 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
2335 {
2336 	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2337 	struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY);
2338 
2339 	if (!secy_nest)
2340 		return 1;
2341 
2342 	if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
2343 			MACSEC_SECY_ATTR_PAD) ||
2344 	    nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
2345 			      MACSEC_DEFAULT_CIPHER_ID,
2346 			      MACSEC_SECY_ATTR_PAD) ||
2347 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
2348 	    nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
2349 	    nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
2350 	    nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
2351 	    nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
2352 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
2353 	    nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
2354 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
2355 	    nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
2356 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
2357 		goto cancel;
2358 
2359 	if (secy->replay_protect) {
2360 		if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
2361 			goto cancel;
2362 	}
2363 
2364 	nla_nest_end(skb, secy_nest);
2365 	return 0;
2366 
2367 cancel:
2368 	nla_nest_cancel(skb, secy_nest);
2369 	return 1;
2370 }
2371 
2372 static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
2373 		     struct sk_buff *skb, struct netlink_callback *cb)
2374 {
2375 	struct macsec_rx_sc *rx_sc;
2376 	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2377 	struct nlattr *txsa_list, *rxsc_list;
2378 	int i, j;
2379 	void *hdr;
2380 	struct nlattr *attr;
2381 
2382 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2383 			  &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
2384 	if (!hdr)
2385 		return -EMSGSIZE;
2386 
2387 	genl_dump_check_consistent(cb, hdr, &macsec_fam);
2388 
2389 	if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
2390 		goto nla_put_failure;
2391 
2392 	if (nla_put_secy(secy, skb))
2393 		goto nla_put_failure;
2394 
2395 	attr = nla_nest_start(skb, MACSEC_ATTR_TXSC_STATS);
2396 	if (!attr)
2397 		goto nla_put_failure;
2398 	if (copy_tx_sc_stats(skb, tx_sc->stats)) {
2399 		nla_nest_cancel(skb, attr);
2400 		goto nla_put_failure;
2401 	}
2402 	nla_nest_end(skb, attr);
2403 
2404 	attr = nla_nest_start(skb, MACSEC_ATTR_SECY_STATS);
2405 	if (!attr)
2406 		goto nla_put_failure;
2407 	if (copy_secy_stats(skb, macsec_priv(dev)->stats)) {
2408 		nla_nest_cancel(skb, attr);
2409 		goto nla_put_failure;
2410 	}
2411 	nla_nest_end(skb, attr);
2412 
2413 	txsa_list = nla_nest_start(skb, MACSEC_ATTR_TXSA_LIST);
2414 	if (!txsa_list)
2415 		goto nla_put_failure;
2416 	for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
2417 		struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
2418 		struct nlattr *txsa_nest;
2419 
2420 		if (!tx_sa)
2421 			continue;
2422 
2423 		txsa_nest = nla_nest_start(skb, j++);
2424 		if (!txsa_nest) {
2425 			nla_nest_cancel(skb, txsa_list);
2426 			goto nla_put_failure;
2427 		}
2428 
2429 		if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
2430 		    nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) ||
2431 		    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
2432 		    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
2433 			nla_nest_cancel(skb, txsa_nest);
2434 			nla_nest_cancel(skb, txsa_list);
2435 			goto nla_put_failure;
2436 		}
2437 
2438 		attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS);
2439 		if (!attr) {
2440 			nla_nest_cancel(skb, txsa_nest);
2441 			nla_nest_cancel(skb, txsa_list);
2442 			goto nla_put_failure;
2443 		}
2444 		if (copy_tx_sa_stats(skb, tx_sa->stats)) {
2445 			nla_nest_cancel(skb, attr);
2446 			nla_nest_cancel(skb, txsa_nest);
2447 			nla_nest_cancel(skb, txsa_list);
2448 			goto nla_put_failure;
2449 		}
2450 		nla_nest_end(skb, attr);
2451 
2452 		nla_nest_end(skb, txsa_nest);
2453 	}
2454 	nla_nest_end(skb, txsa_list);
2455 
2456 	rxsc_list = nla_nest_start(skb, MACSEC_ATTR_RXSC_LIST);
2457 	if (!rxsc_list)
2458 		goto nla_put_failure;
2459 
2460 	j = 1;
2461 	for_each_rxsc_rtnl(secy, rx_sc) {
2462 		int k;
2463 		struct nlattr *rxsa_list;
2464 		struct nlattr *rxsc_nest = nla_nest_start(skb, j++);
2465 
2466 		if (!rxsc_nest) {
2467 			nla_nest_cancel(skb, rxsc_list);
2468 			goto nla_put_failure;
2469 		}
2470 
2471 		if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
2472 		    nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
2473 				MACSEC_RXSC_ATTR_PAD)) {
2474 			nla_nest_cancel(skb, rxsc_nest);
2475 			nla_nest_cancel(skb, rxsc_list);
2476 			goto nla_put_failure;
2477 		}
2478 
2479 		attr = nla_nest_start(skb, MACSEC_RXSC_ATTR_STATS);
2480 		if (!attr) {
2481 			nla_nest_cancel(skb, rxsc_nest);
2482 			nla_nest_cancel(skb, rxsc_list);
2483 			goto nla_put_failure;
2484 		}
2485 		if (copy_rx_sc_stats(skb, rx_sc->stats)) {
2486 			nla_nest_cancel(skb, attr);
2487 			nla_nest_cancel(skb, rxsc_nest);
2488 			nla_nest_cancel(skb, rxsc_list);
2489 			goto nla_put_failure;
2490 		}
2491 		nla_nest_end(skb, attr);
2492 
2493 		rxsa_list = nla_nest_start(skb, MACSEC_RXSC_ATTR_SA_LIST);
2494 		if (!rxsa_list) {
2495 			nla_nest_cancel(skb, rxsc_nest);
2496 			nla_nest_cancel(skb, rxsc_list);
2497 			goto nla_put_failure;
2498 		}
2499 
2500 		for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
2501 			struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
2502 			struct nlattr *rxsa_nest;
2503 
2504 			if (!rx_sa)
2505 				continue;
2506 
2507 			rxsa_nest = nla_nest_start(skb, k++);
2508 			if (!rxsa_nest) {
2509 				nla_nest_cancel(skb, rxsa_list);
2510 				nla_nest_cancel(skb, rxsc_nest);
2511 				nla_nest_cancel(skb, rxsc_list);
2512 				goto nla_put_failure;
2513 			}
2514 
2515 			attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS);
2516 			if (!attr) {
2517 				nla_nest_cancel(skb, rxsa_list);
2518 				nla_nest_cancel(skb, rxsc_nest);
2519 				nla_nest_cancel(skb, rxsc_list);
2520 				goto nla_put_failure;
2521 			}
2522 			if (copy_rx_sa_stats(skb, rx_sa->stats)) {
2523 				nla_nest_cancel(skb, attr);
2524 				nla_nest_cancel(skb, rxsa_list);
2525 				nla_nest_cancel(skb, rxsc_nest);
2526 				nla_nest_cancel(skb, rxsc_list);
2527 				goto nla_put_failure;
2528 			}
2529 			nla_nest_end(skb, attr);
2530 
2531 			if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
2532 			    nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) ||
2533 			    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
2534 			    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
2535 				nla_nest_cancel(skb, rxsa_nest);
2536 				nla_nest_cancel(skb, rxsc_nest);
2537 				nla_nest_cancel(skb, rxsc_list);
2538 				goto nla_put_failure;
2539 			}
2540 			nla_nest_end(skb, rxsa_nest);
2541 		}
2542 
2543 		nla_nest_end(skb, rxsa_list);
2544 		nla_nest_end(skb, rxsc_nest);
2545 	}
2546 
2547 	nla_nest_end(skb, rxsc_list);
2548 
2549 	genlmsg_end(skb, hdr);
2550 
2551 	return 0;
2552 
2553 nla_put_failure:
2554 	genlmsg_cancel(skb, hdr);
2555 	return -EMSGSIZE;
2556 }
2557 
2558 static int macsec_generation = 1; /* protected by RTNL */
2559 
2560 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
2561 {
2562 	struct net *net = sock_net(skb->sk);
2563 	struct net_device *dev;
2564 	int dev_idx, d;
2565 
2566 	dev_idx = cb->args[0];
2567 
2568 	d = 0;
2569 	rtnl_lock();
2570 
2571 	cb->seq = macsec_generation;
2572 
2573 	for_each_netdev(net, dev) {
2574 		struct macsec_secy *secy;
2575 
2576 		if (d < dev_idx)
2577 			goto next;
2578 
2579 		if (!netif_is_macsec(dev))
2580 			goto next;
2581 
2582 		secy = &macsec_priv(dev)->secy;
2583 		if (dump_secy(secy, dev, skb, cb) < 0)
2584 			goto done;
2585 next:
2586 		d++;
2587 	}
2588 
2589 done:
2590 	rtnl_unlock();
2591 	cb->args[0] = d;
2592 	return skb->len;
2593 }
2594 
2595 static const struct genl_ops macsec_genl_ops[] = {
2596 	{
2597 		.cmd = MACSEC_CMD_GET_TXSC,
2598 		.dumpit = macsec_dump_txsc,
2599 		.policy = macsec_genl_policy,
2600 	},
2601 	{
2602 		.cmd = MACSEC_CMD_ADD_RXSC,
2603 		.doit = macsec_add_rxsc,
2604 		.policy = macsec_genl_policy,
2605 		.flags = GENL_ADMIN_PERM,
2606 	},
2607 	{
2608 		.cmd = MACSEC_CMD_DEL_RXSC,
2609 		.doit = macsec_del_rxsc,
2610 		.policy = macsec_genl_policy,
2611 		.flags = GENL_ADMIN_PERM,
2612 	},
2613 	{
2614 		.cmd = MACSEC_CMD_UPD_RXSC,
2615 		.doit = macsec_upd_rxsc,
2616 		.policy = macsec_genl_policy,
2617 		.flags = GENL_ADMIN_PERM,
2618 	},
2619 	{
2620 		.cmd = MACSEC_CMD_ADD_TXSA,
2621 		.doit = macsec_add_txsa,
2622 		.policy = macsec_genl_policy,
2623 		.flags = GENL_ADMIN_PERM,
2624 	},
2625 	{
2626 		.cmd = MACSEC_CMD_DEL_TXSA,
2627 		.doit = macsec_del_txsa,
2628 		.policy = macsec_genl_policy,
2629 		.flags = GENL_ADMIN_PERM,
2630 	},
2631 	{
2632 		.cmd = MACSEC_CMD_UPD_TXSA,
2633 		.doit = macsec_upd_txsa,
2634 		.policy = macsec_genl_policy,
2635 		.flags = GENL_ADMIN_PERM,
2636 	},
2637 	{
2638 		.cmd = MACSEC_CMD_ADD_RXSA,
2639 		.doit = macsec_add_rxsa,
2640 		.policy = macsec_genl_policy,
2641 		.flags = GENL_ADMIN_PERM,
2642 	},
2643 	{
2644 		.cmd = MACSEC_CMD_DEL_RXSA,
2645 		.doit = macsec_del_rxsa,
2646 		.policy = macsec_genl_policy,
2647 		.flags = GENL_ADMIN_PERM,
2648 	},
2649 	{
2650 		.cmd = MACSEC_CMD_UPD_RXSA,
2651 		.doit = macsec_upd_rxsa,
2652 		.policy = macsec_genl_policy,
2653 		.flags = GENL_ADMIN_PERM,
2654 	},
2655 };
2656 
2657 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
2658 				     struct net_device *dev)
2659 {
2660 	struct macsec_dev *macsec = netdev_priv(dev);
2661 	struct macsec_secy *secy = &macsec->secy;
2662 	struct pcpu_secy_stats *secy_stats;
2663 	int ret, len;
2664 
2665 	/* 10.5 */
2666 	if (!secy->protect_frames) {
2667 		secy_stats = this_cpu_ptr(macsec->stats);
2668 		u64_stats_update_begin(&secy_stats->syncp);
2669 		secy_stats->stats.OutPktsUntagged++;
2670 		u64_stats_update_end(&secy_stats->syncp);
2671 		skb->dev = macsec->real_dev;
2672 		len = skb->len;
2673 		ret = dev_queue_xmit(skb);
2674 		count_tx(dev, ret, len);
2675 		return ret;
2676 	}
2677 
2678 	if (!secy->operational) {
2679 		kfree_skb(skb);
2680 		dev->stats.tx_dropped++;
2681 		return NETDEV_TX_OK;
2682 	}
2683 
2684 	skb = macsec_encrypt(skb, dev);
2685 	if (IS_ERR(skb)) {
2686 		if (PTR_ERR(skb) != -EINPROGRESS)
2687 			dev->stats.tx_dropped++;
2688 		return NETDEV_TX_OK;
2689 	}
2690 
2691 	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
2692 
2693 	macsec_encrypt_finish(skb, dev);
2694 	len = skb->len;
2695 	ret = dev_queue_xmit(skb);
2696 	count_tx(dev, ret, len);
2697 	return ret;
2698 }
2699 
2700 #define MACSEC_FEATURES \
2701 	(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
2702 static int macsec_dev_init(struct net_device *dev)
2703 {
2704 	struct macsec_dev *macsec = macsec_priv(dev);
2705 	struct net_device *real_dev = macsec->real_dev;
2706 	int err;
2707 
2708 	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2709 	if (!dev->tstats)
2710 		return -ENOMEM;
2711 
2712 	err = gro_cells_init(&macsec->gro_cells, dev);
2713 	if (err) {
2714 		free_percpu(dev->tstats);
2715 		return err;
2716 	}
2717 
2718 	dev->features = real_dev->features & MACSEC_FEATURES;
2719 	dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
2720 
2721 	dev->needed_headroom = real_dev->needed_headroom +
2722 			       MACSEC_NEEDED_HEADROOM;
2723 	dev->needed_tailroom = real_dev->needed_tailroom +
2724 			       MACSEC_NEEDED_TAILROOM;
2725 
2726 	if (is_zero_ether_addr(dev->dev_addr))
2727 		eth_hw_addr_inherit(dev, real_dev);
2728 	if (is_zero_ether_addr(dev->broadcast))
2729 		memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
2730 
2731 	return 0;
2732 }
2733 
2734 static void macsec_dev_uninit(struct net_device *dev)
2735 {
2736 	struct macsec_dev *macsec = macsec_priv(dev);
2737 
2738 	gro_cells_destroy(&macsec->gro_cells);
2739 	free_percpu(dev->tstats);
2740 }
2741 
2742 static netdev_features_t macsec_fix_features(struct net_device *dev,
2743 					     netdev_features_t features)
2744 {
2745 	struct macsec_dev *macsec = macsec_priv(dev);
2746 	struct net_device *real_dev = macsec->real_dev;
2747 
2748 	features &= (real_dev->features & MACSEC_FEATURES) |
2749 		    NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
2750 	features |= NETIF_F_LLTX;
2751 
2752 	return features;
2753 }
2754 
2755 static int macsec_dev_open(struct net_device *dev)
2756 {
2757 	struct macsec_dev *macsec = macsec_priv(dev);
2758 	struct net_device *real_dev = macsec->real_dev;
2759 	int err;
2760 
2761 	if (!(real_dev->flags & IFF_UP))
2762 		return -ENETDOWN;
2763 
2764 	err = dev_uc_add(real_dev, dev->dev_addr);
2765 	if (err < 0)
2766 		return err;
2767 
2768 	if (dev->flags & IFF_ALLMULTI) {
2769 		err = dev_set_allmulti(real_dev, 1);
2770 		if (err < 0)
2771 			goto del_unicast;
2772 	}
2773 
2774 	if (dev->flags & IFF_PROMISC) {
2775 		err = dev_set_promiscuity(real_dev, 1);
2776 		if (err < 0)
2777 			goto clear_allmulti;
2778 	}
2779 
2780 	if (netif_carrier_ok(real_dev))
2781 		netif_carrier_on(dev);
2782 
2783 	return 0;
2784 clear_allmulti:
2785 	if (dev->flags & IFF_ALLMULTI)
2786 		dev_set_allmulti(real_dev, -1);
2787 del_unicast:
2788 	dev_uc_del(real_dev, dev->dev_addr);
2789 	netif_carrier_off(dev);
2790 	return err;
2791 }
2792 
2793 static int macsec_dev_stop(struct net_device *dev)
2794 {
2795 	struct macsec_dev *macsec = macsec_priv(dev);
2796 	struct net_device *real_dev = macsec->real_dev;
2797 
2798 	netif_carrier_off(dev);
2799 
2800 	dev_mc_unsync(real_dev, dev);
2801 	dev_uc_unsync(real_dev, dev);
2802 
2803 	if (dev->flags & IFF_ALLMULTI)
2804 		dev_set_allmulti(real_dev, -1);
2805 
2806 	if (dev->flags & IFF_PROMISC)
2807 		dev_set_promiscuity(real_dev, -1);
2808 
2809 	dev_uc_del(real_dev, dev->dev_addr);
2810 
2811 	return 0;
2812 }
2813 
2814 static void macsec_dev_change_rx_flags(struct net_device *dev, int change)
2815 {
2816 	struct net_device *real_dev = macsec_priv(dev)->real_dev;
2817 
2818 	if (!(dev->flags & IFF_UP))
2819 		return;
2820 
2821 	if (change & IFF_ALLMULTI)
2822 		dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
2823 
2824 	if (change & IFF_PROMISC)
2825 		dev_set_promiscuity(real_dev,
2826 				    dev->flags & IFF_PROMISC ? 1 : -1);
2827 }
2828 
2829 static void macsec_dev_set_rx_mode(struct net_device *dev)
2830 {
2831 	struct net_device *real_dev = macsec_priv(dev)->real_dev;
2832 
2833 	dev_mc_sync(real_dev, dev);
2834 	dev_uc_sync(real_dev, dev);
2835 }
2836 
2837 static int macsec_set_mac_address(struct net_device *dev, void *p)
2838 {
2839 	struct macsec_dev *macsec = macsec_priv(dev);
2840 	struct net_device *real_dev = macsec->real_dev;
2841 	struct sockaddr *addr = p;
2842 	int err;
2843 
2844 	if (!is_valid_ether_addr(addr->sa_data))
2845 		return -EADDRNOTAVAIL;
2846 
2847 	if (!(dev->flags & IFF_UP))
2848 		goto out;
2849 
2850 	err = dev_uc_add(real_dev, addr->sa_data);
2851 	if (err < 0)
2852 		return err;
2853 
2854 	dev_uc_del(real_dev, dev->dev_addr);
2855 
2856 out:
2857 	ether_addr_copy(dev->dev_addr, addr->sa_data);
2858 	return 0;
2859 }
2860 
2861 static int macsec_change_mtu(struct net_device *dev, int new_mtu)
2862 {
2863 	struct macsec_dev *macsec = macsec_priv(dev);
2864 	unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true);
2865 
2866 	if (macsec->real_dev->mtu - extra < new_mtu)
2867 		return -ERANGE;
2868 
2869 	dev->mtu = new_mtu;
2870 
2871 	return 0;
2872 }
2873 
2874 static struct rtnl_link_stats64 *macsec_get_stats64(struct net_device *dev,
2875 						    struct rtnl_link_stats64 *s)
2876 {
2877 	int cpu;
2878 
2879 	if (!dev->tstats)
2880 		return s;
2881 
2882 	for_each_possible_cpu(cpu) {
2883 		struct pcpu_sw_netstats *stats;
2884 		struct pcpu_sw_netstats tmp;
2885 		int start;
2886 
2887 		stats = per_cpu_ptr(dev->tstats, cpu);
2888 		do {
2889 			start = u64_stats_fetch_begin_irq(&stats->syncp);
2890 			tmp.rx_packets = stats->rx_packets;
2891 			tmp.rx_bytes   = stats->rx_bytes;
2892 			tmp.tx_packets = stats->tx_packets;
2893 			tmp.tx_bytes   = stats->tx_bytes;
2894 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2895 
2896 		s->rx_packets += tmp.rx_packets;
2897 		s->rx_bytes   += tmp.rx_bytes;
2898 		s->tx_packets += tmp.tx_packets;
2899 		s->tx_bytes   += tmp.tx_bytes;
2900 	}
2901 
2902 	s->rx_dropped = dev->stats.rx_dropped;
2903 	s->tx_dropped = dev->stats.tx_dropped;
2904 
2905 	return s;
2906 }
2907 
2908 static int macsec_get_iflink(const struct net_device *dev)
2909 {
2910 	return macsec_priv(dev)->real_dev->ifindex;
2911 }
2912 
2913 static const struct net_device_ops macsec_netdev_ops = {
2914 	.ndo_init		= macsec_dev_init,
2915 	.ndo_uninit		= macsec_dev_uninit,
2916 	.ndo_open		= macsec_dev_open,
2917 	.ndo_stop		= macsec_dev_stop,
2918 	.ndo_fix_features	= macsec_fix_features,
2919 	.ndo_change_mtu		= macsec_change_mtu,
2920 	.ndo_set_rx_mode	= macsec_dev_set_rx_mode,
2921 	.ndo_change_rx_flags	= macsec_dev_change_rx_flags,
2922 	.ndo_set_mac_address	= macsec_set_mac_address,
2923 	.ndo_start_xmit		= macsec_start_xmit,
2924 	.ndo_get_stats64	= macsec_get_stats64,
2925 	.ndo_get_iflink		= macsec_get_iflink,
2926 };
2927 
2928 static const struct device_type macsec_type = {
2929 	.name = "macsec",
2930 };
2931 
2932 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
2933 	[IFLA_MACSEC_SCI] = { .type = NLA_U64 },
2934 	[IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
2935 	[IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
2936 	[IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
2937 	[IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
2938 	[IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
2939 	[IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
2940 	[IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
2941 	[IFLA_MACSEC_ES] = { .type = NLA_U8 },
2942 	[IFLA_MACSEC_SCB] = { .type = NLA_U8 },
2943 	[IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
2944 	[IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
2945 };
2946 
2947 static void macsec_free_netdev(struct net_device *dev)
2948 {
2949 	struct macsec_dev *macsec = macsec_priv(dev);
2950 	struct net_device *real_dev = macsec->real_dev;
2951 
2952 	free_percpu(macsec->stats);
2953 	free_percpu(macsec->secy.tx_sc.stats);
2954 
2955 	dev_put(real_dev);
2956 	free_netdev(dev);
2957 }
2958 
2959 static void macsec_setup(struct net_device *dev)
2960 {
2961 	ether_setup(dev);
2962 	dev->priv_flags |= IFF_NO_QUEUE;
2963 	dev->netdev_ops = &macsec_netdev_ops;
2964 	dev->destructor = macsec_free_netdev;
2965 
2966 	eth_zero_addr(dev->broadcast);
2967 }
2968 
2969 static void macsec_changelink_common(struct net_device *dev,
2970 				     struct nlattr *data[])
2971 {
2972 	struct macsec_secy *secy;
2973 	struct macsec_tx_sc *tx_sc;
2974 
2975 	secy = &macsec_priv(dev)->secy;
2976 	tx_sc = &secy->tx_sc;
2977 
2978 	if (data[IFLA_MACSEC_ENCODING_SA]) {
2979 		struct macsec_tx_sa *tx_sa;
2980 
2981 		tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]);
2982 		tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]);
2983 
2984 		secy->operational = tx_sa && tx_sa->active;
2985 	}
2986 
2987 	if (data[IFLA_MACSEC_WINDOW])
2988 		secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
2989 
2990 	if (data[IFLA_MACSEC_ENCRYPT])
2991 		tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
2992 
2993 	if (data[IFLA_MACSEC_PROTECT])
2994 		secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]);
2995 
2996 	if (data[IFLA_MACSEC_INC_SCI])
2997 		tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
2998 
2999 	if (data[IFLA_MACSEC_ES])
3000 		tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]);
3001 
3002 	if (data[IFLA_MACSEC_SCB])
3003 		tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]);
3004 
3005 	if (data[IFLA_MACSEC_REPLAY_PROTECT])
3006 		secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]);
3007 
3008 	if (data[IFLA_MACSEC_VALIDATION])
3009 		secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
3010 }
3011 
3012 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
3013 			     struct nlattr *data[])
3014 {
3015 	if (!data)
3016 		return 0;
3017 
3018 	if (data[IFLA_MACSEC_CIPHER_SUITE] ||
3019 	    data[IFLA_MACSEC_ICV_LEN] ||
3020 	    data[IFLA_MACSEC_SCI] ||
3021 	    data[IFLA_MACSEC_PORT])
3022 		return -EINVAL;
3023 
3024 	macsec_changelink_common(dev, data);
3025 
3026 	return 0;
3027 }
3028 
3029 static void macsec_del_dev(struct macsec_dev *macsec)
3030 {
3031 	int i;
3032 
3033 	while (macsec->secy.rx_sc) {
3034 		struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc);
3035 
3036 		rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next);
3037 		free_rx_sc(rx_sc);
3038 	}
3039 
3040 	for (i = 0; i < MACSEC_NUM_AN; i++) {
3041 		struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]);
3042 
3043 		if (sa) {
3044 			RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL);
3045 			clear_tx_sa(sa);
3046 		}
3047 	}
3048 }
3049 
3050 static void macsec_dellink(struct net_device *dev, struct list_head *head)
3051 {
3052 	struct macsec_dev *macsec = macsec_priv(dev);
3053 	struct net_device *real_dev = macsec->real_dev;
3054 	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3055 
3056 	macsec_generation++;
3057 
3058 	unregister_netdevice_queue(dev, head);
3059 	list_del_rcu(&macsec->secys);
3060 	if (list_empty(&rxd->secys)) {
3061 		netdev_rx_handler_unregister(real_dev);
3062 		kfree(rxd);
3063 	}
3064 
3065 	macsec_del_dev(macsec);
3066 }
3067 
3068 static int register_macsec_dev(struct net_device *real_dev,
3069 			       struct net_device *dev)
3070 {
3071 	struct macsec_dev *macsec = macsec_priv(dev);
3072 	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3073 
3074 	if (!rxd) {
3075 		int err;
3076 
3077 		rxd = kmalloc(sizeof(*rxd), GFP_KERNEL);
3078 		if (!rxd)
3079 			return -ENOMEM;
3080 
3081 		INIT_LIST_HEAD(&rxd->secys);
3082 
3083 		err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
3084 						 rxd);
3085 		if (err < 0) {
3086 			kfree(rxd);
3087 			return err;
3088 		}
3089 	}
3090 
3091 	list_add_tail_rcu(&macsec->secys, &rxd->secys);
3092 	return 0;
3093 }
3094 
3095 static bool sci_exists(struct net_device *dev, sci_t sci)
3096 {
3097 	struct macsec_rxh_data *rxd = macsec_data_rtnl(dev);
3098 	struct macsec_dev *macsec;
3099 
3100 	list_for_each_entry(macsec, &rxd->secys, secys) {
3101 		if (macsec->secy.sci == sci)
3102 			return true;
3103 	}
3104 
3105 	return false;
3106 }
3107 
3108 static sci_t dev_to_sci(struct net_device *dev, __be16 port)
3109 {
3110 	return make_sci(dev->dev_addr, port);
3111 }
3112 
3113 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
3114 {
3115 	struct macsec_dev *macsec = macsec_priv(dev);
3116 	struct macsec_secy *secy = &macsec->secy;
3117 
3118 	macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats);
3119 	if (!macsec->stats)
3120 		return -ENOMEM;
3121 
3122 	secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
3123 	if (!secy->tx_sc.stats) {
3124 		free_percpu(macsec->stats);
3125 		return -ENOMEM;
3126 	}
3127 
3128 	if (sci == MACSEC_UNDEF_SCI)
3129 		sci = dev_to_sci(dev, MACSEC_PORT_ES);
3130 
3131 	secy->netdev = dev;
3132 	secy->operational = true;
3133 	secy->key_len = DEFAULT_SAK_LEN;
3134 	secy->icv_len = icv_len;
3135 	secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
3136 	secy->protect_frames = true;
3137 	secy->replay_protect = false;
3138 
3139 	secy->sci = sci;
3140 	secy->tx_sc.active = true;
3141 	secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
3142 	secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
3143 	secy->tx_sc.send_sci = DEFAULT_SEND_SCI;
3144 	secy->tx_sc.end_station = false;
3145 	secy->tx_sc.scb = false;
3146 
3147 	return 0;
3148 }
3149 
3150 static int macsec_newlink(struct net *net, struct net_device *dev,
3151 			  struct nlattr *tb[], struct nlattr *data[])
3152 {
3153 	struct macsec_dev *macsec = macsec_priv(dev);
3154 	struct net_device *real_dev;
3155 	int err;
3156 	sci_t sci;
3157 	u8 icv_len = DEFAULT_ICV_LEN;
3158 	rx_handler_func_t *rx_handler;
3159 
3160 	if (!tb[IFLA_LINK])
3161 		return -EINVAL;
3162 	real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
3163 	if (!real_dev)
3164 		return -ENODEV;
3165 
3166 	dev->priv_flags |= IFF_MACSEC;
3167 
3168 	macsec->real_dev = real_dev;
3169 
3170 	if (data && data[IFLA_MACSEC_ICV_LEN])
3171 		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
3172 	dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
3173 
3174 	rx_handler = rtnl_dereference(real_dev->rx_handler);
3175 	if (rx_handler && rx_handler != macsec_handle_frame)
3176 		return -EBUSY;
3177 
3178 	err = register_netdevice(dev);
3179 	if (err < 0)
3180 		return err;
3181 
3182 	dev_hold(real_dev);
3183 
3184 	/* need to be already registered so that ->init has run and
3185 	 * the MAC addr is set
3186 	 */
3187 	if (data && data[IFLA_MACSEC_SCI])
3188 		sci = nla_get_sci(data[IFLA_MACSEC_SCI]);
3189 	else if (data && data[IFLA_MACSEC_PORT])
3190 		sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT]));
3191 	else
3192 		sci = dev_to_sci(dev, MACSEC_PORT_ES);
3193 
3194 	if (rx_handler && sci_exists(real_dev, sci)) {
3195 		err = -EBUSY;
3196 		goto unregister;
3197 	}
3198 
3199 	err = macsec_add_dev(dev, sci, icv_len);
3200 	if (err)
3201 		goto unregister;
3202 
3203 	if (data)
3204 		macsec_changelink_common(dev, data);
3205 
3206 	err = register_macsec_dev(real_dev, dev);
3207 	if (err < 0)
3208 		goto del_dev;
3209 
3210 	macsec_generation++;
3211 
3212 	return 0;
3213 
3214 del_dev:
3215 	macsec_del_dev(macsec);
3216 unregister:
3217 	unregister_netdevice(dev);
3218 	return err;
3219 }
3220 
3221 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
3222 {
3223 	u64 csid = MACSEC_DEFAULT_CIPHER_ID;
3224 	u8 icv_len = DEFAULT_ICV_LEN;
3225 	int flag;
3226 	bool es, scb, sci;
3227 
3228 	if (!data)
3229 		return 0;
3230 
3231 	if (data[IFLA_MACSEC_CIPHER_SUITE])
3232 		csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
3233 
3234 	if (data[IFLA_MACSEC_ICV_LEN]) {
3235 		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
3236 		if (icv_len != DEFAULT_ICV_LEN) {
3237 			char dummy_key[DEFAULT_SAK_LEN] = { 0 };
3238 			struct crypto_aead *dummy_tfm;
3239 
3240 			dummy_tfm = macsec_alloc_tfm(dummy_key,
3241 						     DEFAULT_SAK_LEN,
3242 						     icv_len);
3243 			if (IS_ERR(dummy_tfm))
3244 				return PTR_ERR(dummy_tfm);
3245 			crypto_free_aead(dummy_tfm);
3246 		}
3247 	}
3248 
3249 	switch (csid) {
3250 	case MACSEC_DEFAULT_CIPHER_ID:
3251 	case MACSEC_DEFAULT_CIPHER_ALT:
3252 		if (icv_len < MACSEC_MIN_ICV_LEN ||
3253 		    icv_len > MACSEC_STD_ICV_LEN)
3254 			return -EINVAL;
3255 		break;
3256 	default:
3257 		return -EINVAL;
3258 	}
3259 
3260 	if (data[IFLA_MACSEC_ENCODING_SA]) {
3261 		if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
3262 			return -EINVAL;
3263 	}
3264 
3265 	for (flag = IFLA_MACSEC_ENCODING_SA + 1;
3266 	     flag < IFLA_MACSEC_VALIDATION;
3267 	     flag++) {
3268 		if (data[flag]) {
3269 			if (nla_get_u8(data[flag]) > 1)
3270 				return -EINVAL;
3271 		}
3272 	}
3273 
3274 	es  = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false;
3275 	sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false;
3276 	scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false;
3277 
3278 	if ((sci && (scb || es)) || (scb && es))
3279 		return -EINVAL;
3280 
3281 	if (data[IFLA_MACSEC_VALIDATION] &&
3282 	    nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
3283 		return -EINVAL;
3284 
3285 	if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
3286 	     nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
3287 	    !data[IFLA_MACSEC_WINDOW])
3288 		return -EINVAL;
3289 
3290 	return 0;
3291 }
3292 
3293 static struct net *macsec_get_link_net(const struct net_device *dev)
3294 {
3295 	return dev_net(macsec_priv(dev)->real_dev);
3296 }
3297 
3298 static size_t macsec_get_size(const struct net_device *dev)
3299 {
3300 	return 0 +
3301 		nla_total_size_64bit(8) + /* SCI */
3302 		nla_total_size(1) + /* ICV_LEN */
3303 		nla_total_size_64bit(8) + /* CIPHER_SUITE */
3304 		nla_total_size(4) + /* WINDOW */
3305 		nla_total_size(1) + /* ENCODING_SA */
3306 		nla_total_size(1) + /* ENCRYPT */
3307 		nla_total_size(1) + /* PROTECT */
3308 		nla_total_size(1) + /* INC_SCI */
3309 		nla_total_size(1) + /* ES */
3310 		nla_total_size(1) + /* SCB */
3311 		nla_total_size(1) + /* REPLAY_PROTECT */
3312 		nla_total_size(1) + /* VALIDATION */
3313 		0;
3314 }
3315 
3316 static int macsec_fill_info(struct sk_buff *skb,
3317 			    const struct net_device *dev)
3318 {
3319 	struct macsec_secy *secy = &macsec_priv(dev)->secy;
3320 	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3321 
3322 	if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
3323 			IFLA_MACSEC_PAD) ||
3324 	    nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
3325 	    nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
3326 			      MACSEC_DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) ||
3327 	    nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
3328 	    nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
3329 	    nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
3330 	    nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
3331 	    nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
3332 	    nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
3333 	    nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
3334 	    nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
3335 	    0)
3336 		goto nla_put_failure;
3337 
3338 	if (secy->replay_protect) {
3339 		if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
3340 			goto nla_put_failure;
3341 	}
3342 
3343 	return 0;
3344 
3345 nla_put_failure:
3346 	return -EMSGSIZE;
3347 }
3348 
3349 static struct rtnl_link_ops macsec_link_ops __read_mostly = {
3350 	.kind		= "macsec",
3351 	.priv_size	= sizeof(struct macsec_dev),
3352 	.maxtype	= IFLA_MACSEC_MAX,
3353 	.policy		= macsec_rtnl_policy,
3354 	.setup		= macsec_setup,
3355 	.validate	= macsec_validate_attr,
3356 	.newlink	= macsec_newlink,
3357 	.changelink	= macsec_changelink,
3358 	.dellink	= macsec_dellink,
3359 	.get_size	= macsec_get_size,
3360 	.fill_info	= macsec_fill_info,
3361 	.get_link_net	= macsec_get_link_net,
3362 };
3363 
3364 static bool is_macsec_master(struct net_device *dev)
3365 {
3366 	return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame;
3367 }
3368 
3369 static int macsec_notify(struct notifier_block *this, unsigned long event,
3370 			 void *ptr)
3371 {
3372 	struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
3373 	LIST_HEAD(head);
3374 
3375 	if (!is_macsec_master(real_dev))
3376 		return NOTIFY_DONE;
3377 
3378 	switch (event) {
3379 	case NETDEV_UNREGISTER: {
3380 		struct macsec_dev *m, *n;
3381 		struct macsec_rxh_data *rxd;
3382 
3383 		rxd = macsec_data_rtnl(real_dev);
3384 		list_for_each_entry_safe(m, n, &rxd->secys, secys) {
3385 			macsec_dellink(m->secy.netdev, &head);
3386 		}
3387 		unregister_netdevice_many(&head);
3388 		break;
3389 	}
3390 	case NETDEV_CHANGEMTU: {
3391 		struct macsec_dev *m;
3392 		struct macsec_rxh_data *rxd;
3393 
3394 		rxd = macsec_data_rtnl(real_dev);
3395 		list_for_each_entry(m, &rxd->secys, secys) {
3396 			struct net_device *dev = m->secy.netdev;
3397 			unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
3398 							    macsec_extra_len(true));
3399 
3400 			if (dev->mtu > mtu)
3401 				dev_set_mtu(dev, mtu);
3402 		}
3403 	}
3404 	}
3405 
3406 	return NOTIFY_OK;
3407 }
3408 
3409 static struct notifier_block macsec_notifier = {
3410 	.notifier_call = macsec_notify,
3411 };
3412 
3413 static int __init macsec_init(void)
3414 {
3415 	int err;
3416 
3417 	pr_info("MACsec IEEE 802.1AE\n");
3418 	err = register_netdevice_notifier(&macsec_notifier);
3419 	if (err)
3420 		return err;
3421 
3422 	err = rtnl_link_register(&macsec_link_ops);
3423 	if (err)
3424 		goto notifier;
3425 
3426 	err = genl_register_family_with_ops(&macsec_fam, macsec_genl_ops);
3427 	if (err)
3428 		goto rtnl;
3429 
3430 	return 0;
3431 
3432 rtnl:
3433 	rtnl_link_unregister(&macsec_link_ops);
3434 notifier:
3435 	unregister_netdevice_notifier(&macsec_notifier);
3436 	return err;
3437 }
3438 
3439 static void __exit macsec_exit(void)
3440 {
3441 	genl_unregister_family(&macsec_fam);
3442 	rtnl_link_unregister(&macsec_link_ops);
3443 	unregister_netdevice_notifier(&macsec_notifier);
3444 	rcu_barrier();
3445 }
3446 
3447 module_init(macsec_init);
3448 module_exit(macsec_exit);
3449 
3450 MODULE_ALIAS_RTNL_LINK("macsec");
3451 
3452 MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
3453 MODULE_LICENSE("GPL v2");
3454