1 /* 2 * drivers/net/macsec.c - MACsec device 3 * 4 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/types.h> 13 #include <linux/skbuff.h> 14 #include <linux/socket.h> 15 #include <linux/module.h> 16 #include <crypto/aead.h> 17 #include <linux/etherdevice.h> 18 #include <linux/rtnetlink.h> 19 #include <net/genetlink.h> 20 #include <net/sock.h> 21 #include <net/gro_cells.h> 22 23 #include <uapi/linux/if_macsec.h> 24 25 typedef u64 __bitwise sci_t; 26 27 #define MACSEC_SCI_LEN 8 28 29 /* SecTAG length = macsec_eth_header without the optional SCI */ 30 #define MACSEC_TAG_LEN 6 31 32 struct macsec_eth_header { 33 struct ethhdr eth; 34 /* SecTAG */ 35 u8 tci_an; 36 #if defined(__LITTLE_ENDIAN_BITFIELD) 37 u8 short_length:6, 38 unused:2; 39 #elif defined(__BIG_ENDIAN_BITFIELD) 40 u8 unused:2, 41 short_length:6; 42 #else 43 #error "Please fix <asm/byteorder.h>" 44 #endif 45 __be32 packet_number; 46 u8 secure_channel_id[8]; /* optional */ 47 } __packed; 48 49 #define MACSEC_TCI_VERSION 0x80 50 #define MACSEC_TCI_ES 0x40 /* end station */ 51 #define MACSEC_TCI_SC 0x20 /* SCI present */ 52 #define MACSEC_TCI_SCB 0x10 /* epon */ 53 #define MACSEC_TCI_E 0x08 /* encryption */ 54 #define MACSEC_TCI_C 0x04 /* changed text */ 55 #define MACSEC_AN_MASK 0x03 /* association number */ 56 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C) 57 58 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ 59 #define MIN_NON_SHORT_LEN 48 60 61 #define GCM_AES_IV_LEN 12 62 #define DEFAULT_ICV_LEN 16 63 64 #define MACSEC_NUM_AN 4 /* 2 bits for the association number */ 65 66 #define for_each_rxsc(secy, sc) \ 67 for (sc = rcu_dereference_bh(secy->rx_sc); \ 68 sc; \ 69 sc = rcu_dereference_bh(sc->next)) 70 #define for_each_rxsc_rtnl(secy, sc) \ 71 for (sc = rtnl_dereference(secy->rx_sc); \ 72 sc; \ 73 sc = rtnl_dereference(sc->next)) 74 75 struct gcm_iv { 76 union { 77 u8 secure_channel_id[8]; 78 sci_t sci; 79 }; 80 __be32 pn; 81 }; 82 83 /** 84 * struct macsec_key - SA key 85 * @id: user-provided key identifier 86 * @tfm: crypto struct, key storage 87 */ 88 struct macsec_key { 89 u8 id[MACSEC_KEYID_LEN]; 90 struct crypto_aead *tfm; 91 }; 92 93 struct macsec_rx_sc_stats { 94 __u64 InOctetsValidated; 95 __u64 InOctetsDecrypted; 96 __u64 InPktsUnchecked; 97 __u64 InPktsDelayed; 98 __u64 InPktsOK; 99 __u64 InPktsInvalid; 100 __u64 InPktsLate; 101 __u64 InPktsNotValid; 102 __u64 InPktsNotUsingSA; 103 __u64 InPktsUnusedSA; 104 }; 105 106 struct macsec_rx_sa_stats { 107 __u32 InPktsOK; 108 __u32 InPktsInvalid; 109 __u32 InPktsNotValid; 110 __u32 InPktsNotUsingSA; 111 __u32 InPktsUnusedSA; 112 }; 113 114 struct macsec_tx_sa_stats { 115 __u32 OutPktsProtected; 116 __u32 OutPktsEncrypted; 117 }; 118 119 struct macsec_tx_sc_stats { 120 __u64 OutPktsProtected; 121 __u64 OutPktsEncrypted; 122 __u64 OutOctetsProtected; 123 __u64 OutOctetsEncrypted; 124 }; 125 126 struct macsec_dev_stats { 127 __u64 OutPktsUntagged; 128 __u64 InPktsUntagged; 129 __u64 OutPktsTooLong; 130 __u64 InPktsNoTag; 131 __u64 InPktsBadTag; 132 __u64 InPktsUnknownSCI; 133 __u64 InPktsNoSCI; 134 __u64 InPktsOverrun; 135 }; 136 137 /** 138 * struct macsec_rx_sa - receive secure association 139 * @active: 140 * @next_pn: packet number expected for the next packet 141 * @lock: protects next_pn manipulations 142 * @key: key structure 143 * @stats: per-SA stats 144 */ 145 struct macsec_rx_sa { 146 struct macsec_key key; 147 spinlock_t lock; 148 u32 next_pn; 149 atomic_t refcnt; 150 bool active; 151 struct macsec_rx_sa_stats __percpu *stats; 152 struct macsec_rx_sc *sc; 153 struct rcu_head rcu; 154 }; 155 156 struct pcpu_rx_sc_stats { 157 struct macsec_rx_sc_stats stats; 158 struct u64_stats_sync syncp; 159 }; 160 161 /** 162 * struct macsec_rx_sc - receive secure channel 163 * @sci: secure channel identifier for this SC 164 * @active: channel is active 165 * @sa: array of secure associations 166 * @stats: per-SC stats 167 */ 168 struct macsec_rx_sc { 169 struct macsec_rx_sc __rcu *next; 170 sci_t sci; 171 bool active; 172 struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN]; 173 struct pcpu_rx_sc_stats __percpu *stats; 174 atomic_t refcnt; 175 struct rcu_head rcu_head; 176 }; 177 178 /** 179 * struct macsec_tx_sa - transmit secure association 180 * @active: 181 * @next_pn: packet number to use for the next packet 182 * @lock: protects next_pn manipulations 183 * @key: key structure 184 * @stats: per-SA stats 185 */ 186 struct macsec_tx_sa { 187 struct macsec_key key; 188 spinlock_t lock; 189 u32 next_pn; 190 atomic_t refcnt; 191 bool active; 192 struct macsec_tx_sa_stats __percpu *stats; 193 struct rcu_head rcu; 194 }; 195 196 struct pcpu_tx_sc_stats { 197 struct macsec_tx_sc_stats stats; 198 struct u64_stats_sync syncp; 199 }; 200 201 /** 202 * struct macsec_tx_sc - transmit secure channel 203 * @active: 204 * @encoding_sa: association number of the SA currently in use 205 * @encrypt: encrypt packets on transmit, or authenticate only 206 * @send_sci: always include the SCI in the SecTAG 207 * @end_station: 208 * @scb: single copy broadcast flag 209 * @sa: array of secure associations 210 * @stats: stats for this TXSC 211 */ 212 struct macsec_tx_sc { 213 bool active; 214 u8 encoding_sa; 215 bool encrypt; 216 bool send_sci; 217 bool end_station; 218 bool scb; 219 struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN]; 220 struct pcpu_tx_sc_stats __percpu *stats; 221 }; 222 223 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT 224 225 /** 226 * struct macsec_secy - MACsec Security Entity 227 * @netdev: netdevice for this SecY 228 * @n_rx_sc: number of receive secure channels configured on this SecY 229 * @sci: secure channel identifier used for tx 230 * @key_len: length of keys used by the cipher suite 231 * @icv_len: length of ICV used by the cipher suite 232 * @validate_frames: validation mode 233 * @operational: MAC_Operational flag 234 * @protect_frames: enable protection for this SecY 235 * @replay_protect: enable packet number checks on receive 236 * @replay_window: size of the replay window 237 * @tx_sc: transmit secure channel 238 * @rx_sc: linked list of receive secure channels 239 */ 240 struct macsec_secy { 241 struct net_device *netdev; 242 unsigned int n_rx_sc; 243 sci_t sci; 244 u16 key_len; 245 u16 icv_len; 246 enum macsec_validation_type validate_frames; 247 bool operational; 248 bool protect_frames; 249 bool replay_protect; 250 u32 replay_window; 251 struct macsec_tx_sc tx_sc; 252 struct macsec_rx_sc __rcu *rx_sc; 253 }; 254 255 struct pcpu_secy_stats { 256 struct macsec_dev_stats stats; 257 struct u64_stats_sync syncp; 258 }; 259 260 /** 261 * struct macsec_dev - private data 262 * @secy: SecY config 263 * @real_dev: pointer to underlying netdevice 264 * @stats: MACsec device stats 265 * @secys: linked list of SecY's on the underlying device 266 */ 267 struct macsec_dev { 268 struct macsec_secy secy; 269 struct net_device *real_dev; 270 struct pcpu_secy_stats __percpu *stats; 271 struct list_head secys; 272 struct gro_cells gro_cells; 273 unsigned int nest_level; 274 }; 275 276 /** 277 * struct macsec_rxh_data - rx_handler private argument 278 * @secys: linked list of SecY's on this underlying device 279 */ 280 struct macsec_rxh_data { 281 struct list_head secys; 282 }; 283 284 static struct macsec_dev *macsec_priv(const struct net_device *dev) 285 { 286 return (struct macsec_dev *)netdev_priv(dev); 287 } 288 289 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev) 290 { 291 return rcu_dereference_bh(dev->rx_handler_data); 292 } 293 294 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev) 295 { 296 return rtnl_dereference(dev->rx_handler_data); 297 } 298 299 struct macsec_cb { 300 struct aead_request *req; 301 union { 302 struct macsec_tx_sa *tx_sa; 303 struct macsec_rx_sa *rx_sa; 304 }; 305 u8 assoc_num; 306 bool valid; 307 bool has_sci; 308 }; 309 310 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) 311 { 312 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr); 313 314 if (!sa || !sa->active) 315 return NULL; 316 317 if (!atomic_inc_not_zero(&sa->refcnt)) 318 return NULL; 319 320 return sa; 321 } 322 323 static void free_rx_sc_rcu(struct rcu_head *head) 324 { 325 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); 326 327 free_percpu(rx_sc->stats); 328 kfree(rx_sc); 329 } 330 331 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) 332 { 333 return atomic_inc_not_zero(&sc->refcnt) ? sc : NULL; 334 } 335 336 static void macsec_rxsc_put(struct macsec_rx_sc *sc) 337 { 338 if (atomic_dec_and_test(&sc->refcnt)) 339 call_rcu(&sc->rcu_head, free_rx_sc_rcu); 340 } 341 342 static void free_rxsa(struct rcu_head *head) 343 { 344 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); 345 346 crypto_free_aead(sa->key.tfm); 347 free_percpu(sa->stats); 348 kfree(sa); 349 } 350 351 static void macsec_rxsa_put(struct macsec_rx_sa *sa) 352 { 353 if (atomic_dec_and_test(&sa->refcnt)) 354 call_rcu(&sa->rcu, free_rxsa); 355 } 356 357 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) 358 { 359 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr); 360 361 if (!sa || !sa->active) 362 return NULL; 363 364 if (!atomic_inc_not_zero(&sa->refcnt)) 365 return NULL; 366 367 return sa; 368 } 369 370 static void free_txsa(struct rcu_head *head) 371 { 372 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); 373 374 crypto_free_aead(sa->key.tfm); 375 free_percpu(sa->stats); 376 kfree(sa); 377 } 378 379 static void macsec_txsa_put(struct macsec_tx_sa *sa) 380 { 381 if (atomic_dec_and_test(&sa->refcnt)) 382 call_rcu(&sa->rcu, free_txsa); 383 } 384 385 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) 386 { 387 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb)); 388 return (struct macsec_cb *)skb->cb; 389 } 390 391 #define MACSEC_PORT_ES (htons(0x0001)) 392 #define MACSEC_PORT_SCB (0x0000) 393 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) 394 395 #define DEFAULT_SAK_LEN 16 396 #define DEFAULT_SEND_SCI true 397 #define DEFAULT_ENCRYPT false 398 #define DEFAULT_ENCODING_SA 0 399 400 static bool send_sci(const struct macsec_secy *secy) 401 { 402 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 403 404 return tx_sc->send_sci || 405 (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb); 406 } 407 408 static sci_t make_sci(u8 *addr, __be16 port) 409 { 410 sci_t sci; 411 412 memcpy(&sci, addr, ETH_ALEN); 413 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port)); 414 415 return sci; 416 } 417 418 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) 419 { 420 sci_t sci; 421 422 if (sci_present) 423 memcpy(&sci, hdr->secure_channel_id, 424 sizeof(hdr->secure_channel_id)); 425 else 426 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); 427 428 return sci; 429 } 430 431 static unsigned int macsec_sectag_len(bool sci_present) 432 { 433 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); 434 } 435 436 static unsigned int macsec_hdr_len(bool sci_present) 437 { 438 return macsec_sectag_len(sci_present) + ETH_HLEN; 439 } 440 441 static unsigned int macsec_extra_len(bool sci_present) 442 { 443 return macsec_sectag_len(sci_present) + sizeof(__be16); 444 } 445 446 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ 447 static void macsec_fill_sectag(struct macsec_eth_header *h, 448 const struct macsec_secy *secy, u32 pn, 449 bool sci_present) 450 { 451 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 452 453 memset(&h->tci_an, 0, macsec_sectag_len(sci_present)); 454 h->eth.h_proto = htons(ETH_P_MACSEC); 455 456 if (sci_present) { 457 h->tci_an |= MACSEC_TCI_SC; 458 memcpy(&h->secure_channel_id, &secy->sci, 459 sizeof(h->secure_channel_id)); 460 } else { 461 if (tx_sc->end_station) 462 h->tci_an |= MACSEC_TCI_ES; 463 if (tx_sc->scb) 464 h->tci_an |= MACSEC_TCI_SCB; 465 } 466 467 h->packet_number = htonl(pn); 468 469 /* with GCM, C/E clear for !encrypt, both set for encrypt */ 470 if (tx_sc->encrypt) 471 h->tci_an |= MACSEC_TCI_CONFID; 472 else if (secy->icv_len != DEFAULT_ICV_LEN) 473 h->tci_an |= MACSEC_TCI_C; 474 475 h->tci_an |= tx_sc->encoding_sa; 476 } 477 478 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) 479 { 480 if (data_len < MIN_NON_SHORT_LEN) 481 h->short_length = data_len; 482 } 483 484 /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */ 485 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len) 486 { 487 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; 488 int len = skb->len - 2 * ETH_ALEN; 489 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; 490 491 /* a) It comprises at least 17 octets */ 492 if (skb->len <= 16) 493 return false; 494 495 /* b) MACsec EtherType: already checked */ 496 497 /* c) V bit is clear */ 498 if (h->tci_an & MACSEC_TCI_VERSION) 499 return false; 500 501 /* d) ES or SCB => !SC */ 502 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && 503 (h->tci_an & MACSEC_TCI_SC)) 504 return false; 505 506 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */ 507 if (h->unused) 508 return false; 509 510 /* rx.pn != 0 (figure 10-5) */ 511 if (!h->packet_number) 512 return false; 513 514 /* length check, f) g) h) i) */ 515 if (h->short_length) 516 return len == extra_len + h->short_length; 517 return len >= extra_len + MIN_NON_SHORT_LEN; 518 } 519 520 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) 521 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN 522 523 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) 524 { 525 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; 526 527 gcm_iv->sci = sci; 528 gcm_iv->pn = htonl(pn); 529 } 530 531 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) 532 { 533 return (struct macsec_eth_header *)skb_mac_header(skb); 534 } 535 536 static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy) 537 { 538 u32 pn; 539 540 spin_lock_bh(&tx_sa->lock); 541 pn = tx_sa->next_pn; 542 543 tx_sa->next_pn++; 544 if (tx_sa->next_pn == 0) { 545 pr_debug("PN wrapped, transitioning to !oper\n"); 546 tx_sa->active = false; 547 if (secy->protect_frames) 548 secy->operational = false; 549 } 550 spin_unlock_bh(&tx_sa->lock); 551 552 return pn; 553 } 554 555 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) 556 { 557 struct macsec_dev *macsec = netdev_priv(dev); 558 559 skb->dev = macsec->real_dev; 560 skb_reset_mac_header(skb); 561 skb->protocol = eth_hdr(skb)->h_proto; 562 } 563 564 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, 565 struct macsec_tx_sa *tx_sa) 566 { 567 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); 568 569 u64_stats_update_begin(&txsc_stats->syncp); 570 if (tx_sc->encrypt) { 571 txsc_stats->stats.OutOctetsEncrypted += skb->len; 572 txsc_stats->stats.OutPktsEncrypted++; 573 this_cpu_inc(tx_sa->stats->OutPktsEncrypted); 574 } else { 575 txsc_stats->stats.OutOctetsProtected += skb->len; 576 txsc_stats->stats.OutPktsProtected++; 577 this_cpu_inc(tx_sa->stats->OutPktsProtected); 578 } 579 u64_stats_update_end(&txsc_stats->syncp); 580 } 581 582 static void count_tx(struct net_device *dev, int ret, int len) 583 { 584 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 585 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 586 587 u64_stats_update_begin(&stats->syncp); 588 stats->tx_packets++; 589 stats->tx_bytes += len; 590 u64_stats_update_end(&stats->syncp); 591 } else { 592 dev->stats.tx_dropped++; 593 } 594 } 595 596 static void macsec_encrypt_done(struct crypto_async_request *base, int err) 597 { 598 struct sk_buff *skb = base->data; 599 struct net_device *dev = skb->dev; 600 struct macsec_dev *macsec = macsec_priv(dev); 601 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; 602 int len, ret; 603 604 aead_request_free(macsec_skb_cb(skb)->req); 605 606 rcu_read_lock_bh(); 607 macsec_encrypt_finish(skb, dev); 608 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 609 len = skb->len; 610 ret = dev_queue_xmit(skb); 611 count_tx(dev, ret, len); 612 rcu_read_unlock_bh(); 613 614 macsec_txsa_put(sa); 615 dev_put(dev); 616 } 617 618 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 619 unsigned char **iv, 620 struct scatterlist **sg) 621 { 622 size_t size, iv_offset, sg_offset; 623 struct aead_request *req; 624 void *tmp; 625 626 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm); 627 iv_offset = size; 628 size += GCM_AES_IV_LEN; 629 630 size = ALIGN(size, __alignof__(struct scatterlist)); 631 sg_offset = size; 632 size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1); 633 634 tmp = kmalloc(size, GFP_ATOMIC); 635 if (!tmp) 636 return NULL; 637 638 *iv = (unsigned char *)(tmp + iv_offset); 639 *sg = (struct scatterlist *)(tmp + sg_offset); 640 req = tmp; 641 642 aead_request_set_tfm(req, tfm); 643 644 return req; 645 } 646 647 static struct sk_buff *macsec_encrypt(struct sk_buff *skb, 648 struct net_device *dev) 649 { 650 int ret; 651 struct scatterlist *sg; 652 unsigned char *iv; 653 struct ethhdr *eth; 654 struct macsec_eth_header *hh; 655 size_t unprotected_len; 656 struct aead_request *req; 657 struct macsec_secy *secy; 658 struct macsec_tx_sc *tx_sc; 659 struct macsec_tx_sa *tx_sa; 660 struct macsec_dev *macsec = macsec_priv(dev); 661 bool sci_present; 662 u32 pn; 663 664 secy = &macsec->secy; 665 tx_sc = &secy->tx_sc; 666 667 /* 10.5.1 TX SA assignment */ 668 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]); 669 if (!tx_sa) { 670 secy->operational = false; 671 kfree_skb(skb); 672 return ERR_PTR(-EINVAL); 673 } 674 675 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || 676 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { 677 struct sk_buff *nskb = skb_copy_expand(skb, 678 MACSEC_NEEDED_HEADROOM, 679 MACSEC_NEEDED_TAILROOM, 680 GFP_ATOMIC); 681 if (likely(nskb)) { 682 consume_skb(skb); 683 skb = nskb; 684 } else { 685 macsec_txsa_put(tx_sa); 686 kfree_skb(skb); 687 return ERR_PTR(-ENOMEM); 688 } 689 } else { 690 skb = skb_unshare(skb, GFP_ATOMIC); 691 if (!skb) { 692 macsec_txsa_put(tx_sa); 693 return ERR_PTR(-ENOMEM); 694 } 695 } 696 697 unprotected_len = skb->len; 698 eth = eth_hdr(skb); 699 sci_present = send_sci(secy); 700 hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(sci_present)); 701 memmove(hh, eth, 2 * ETH_ALEN); 702 703 pn = tx_sa_update_pn(tx_sa, secy); 704 if (pn == 0) { 705 macsec_txsa_put(tx_sa); 706 kfree_skb(skb); 707 return ERR_PTR(-ENOLINK); 708 } 709 macsec_fill_sectag(hh, secy, pn, sci_present); 710 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); 711 712 skb_put(skb, secy->icv_len); 713 714 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { 715 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 716 717 u64_stats_update_begin(&secy_stats->syncp); 718 secy_stats->stats.OutPktsTooLong++; 719 u64_stats_update_end(&secy_stats->syncp); 720 721 macsec_txsa_put(tx_sa); 722 kfree_skb(skb); 723 return ERR_PTR(-EINVAL); 724 } 725 726 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg); 727 if (!req) { 728 macsec_txsa_put(tx_sa); 729 kfree_skb(skb); 730 return ERR_PTR(-ENOMEM); 731 } 732 733 macsec_fill_iv(iv, secy->sci, pn); 734 735 sg_init_table(sg, MAX_SKB_FRAGS + 1); 736 skb_to_sgvec(skb, sg, 0, skb->len); 737 738 if (tx_sc->encrypt) { 739 int len = skb->len - macsec_hdr_len(sci_present) - 740 secy->icv_len; 741 aead_request_set_crypt(req, sg, sg, len, iv); 742 aead_request_set_ad(req, macsec_hdr_len(sci_present)); 743 } else { 744 aead_request_set_crypt(req, sg, sg, 0, iv); 745 aead_request_set_ad(req, skb->len - secy->icv_len); 746 } 747 748 macsec_skb_cb(skb)->req = req; 749 macsec_skb_cb(skb)->tx_sa = tx_sa; 750 aead_request_set_callback(req, 0, macsec_encrypt_done, skb); 751 752 dev_hold(skb->dev); 753 ret = crypto_aead_encrypt(req); 754 if (ret == -EINPROGRESS) { 755 return ERR_PTR(ret); 756 } else if (ret != 0) { 757 dev_put(skb->dev); 758 kfree_skb(skb); 759 aead_request_free(req); 760 macsec_txsa_put(tx_sa); 761 return ERR_PTR(-EINVAL); 762 } 763 764 dev_put(skb->dev); 765 aead_request_free(req); 766 macsec_txsa_put(tx_sa); 767 768 return skb; 769 } 770 771 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn) 772 { 773 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 774 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats); 775 struct macsec_eth_header *hdr = macsec_ethhdr(skb); 776 u32 lowest_pn = 0; 777 778 spin_lock(&rx_sa->lock); 779 if (rx_sa->next_pn >= secy->replay_window) 780 lowest_pn = rx_sa->next_pn - secy->replay_window; 781 782 /* Now perform replay protection check again 783 * (see IEEE 802.1AE-2006 figure 10-5) 784 */ 785 if (secy->replay_protect && pn < lowest_pn) { 786 spin_unlock(&rx_sa->lock); 787 u64_stats_update_begin(&rxsc_stats->syncp); 788 rxsc_stats->stats.InPktsLate++; 789 u64_stats_update_end(&rxsc_stats->syncp); 790 return false; 791 } 792 793 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { 794 u64_stats_update_begin(&rxsc_stats->syncp); 795 if (hdr->tci_an & MACSEC_TCI_E) 796 rxsc_stats->stats.InOctetsDecrypted += skb->len; 797 else 798 rxsc_stats->stats.InOctetsValidated += skb->len; 799 u64_stats_update_end(&rxsc_stats->syncp); 800 } 801 802 if (!macsec_skb_cb(skb)->valid) { 803 spin_unlock(&rx_sa->lock); 804 805 /* 10.6.5 */ 806 if (hdr->tci_an & MACSEC_TCI_C || 807 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 808 u64_stats_update_begin(&rxsc_stats->syncp); 809 rxsc_stats->stats.InPktsNotValid++; 810 u64_stats_update_end(&rxsc_stats->syncp); 811 return false; 812 } 813 814 u64_stats_update_begin(&rxsc_stats->syncp); 815 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) { 816 rxsc_stats->stats.InPktsInvalid++; 817 this_cpu_inc(rx_sa->stats->InPktsInvalid); 818 } else if (pn < lowest_pn) { 819 rxsc_stats->stats.InPktsDelayed++; 820 } else { 821 rxsc_stats->stats.InPktsUnchecked++; 822 } 823 u64_stats_update_end(&rxsc_stats->syncp); 824 } else { 825 u64_stats_update_begin(&rxsc_stats->syncp); 826 if (pn < lowest_pn) { 827 rxsc_stats->stats.InPktsDelayed++; 828 } else { 829 rxsc_stats->stats.InPktsOK++; 830 this_cpu_inc(rx_sa->stats->InPktsOK); 831 } 832 u64_stats_update_end(&rxsc_stats->syncp); 833 834 if (pn >= rx_sa->next_pn) 835 rx_sa->next_pn = pn + 1; 836 spin_unlock(&rx_sa->lock); 837 } 838 839 return true; 840 } 841 842 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) 843 { 844 skb->pkt_type = PACKET_HOST; 845 skb->protocol = eth_type_trans(skb, dev); 846 847 skb_reset_network_header(skb); 848 if (!skb_transport_header_was_set(skb)) 849 skb_reset_transport_header(skb); 850 skb_reset_mac_len(skb); 851 } 852 853 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) 854 { 855 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); 856 skb_pull(skb, hdr_len); 857 pskb_trim_unique(skb, skb->len - icv_len); 858 } 859 860 static void count_rx(struct net_device *dev, int len) 861 { 862 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 863 864 u64_stats_update_begin(&stats->syncp); 865 stats->rx_packets++; 866 stats->rx_bytes += len; 867 u64_stats_update_end(&stats->syncp); 868 } 869 870 static void macsec_decrypt_done(struct crypto_async_request *base, int err) 871 { 872 struct sk_buff *skb = base->data; 873 struct net_device *dev = skb->dev; 874 struct macsec_dev *macsec = macsec_priv(dev); 875 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 876 struct macsec_rx_sc *rx_sc = rx_sa->sc; 877 int len, ret; 878 u32 pn; 879 880 aead_request_free(macsec_skb_cb(skb)->req); 881 882 if (!err) 883 macsec_skb_cb(skb)->valid = true; 884 885 rcu_read_lock_bh(); 886 pn = ntohl(macsec_ethhdr(skb)->packet_number); 887 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { 888 rcu_read_unlock_bh(); 889 kfree_skb(skb); 890 goto out; 891 } 892 893 macsec_finalize_skb(skb, macsec->secy.icv_len, 894 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 895 macsec_reset_skb(skb, macsec->secy.netdev); 896 897 len = skb->len; 898 ret = gro_cells_receive(&macsec->gro_cells, skb); 899 if (ret == NET_RX_SUCCESS) 900 count_rx(dev, len); 901 else 902 macsec->secy.netdev->stats.rx_dropped++; 903 904 rcu_read_unlock_bh(); 905 906 out: 907 macsec_rxsa_put(rx_sa); 908 macsec_rxsc_put(rx_sc); 909 dev_put(dev); 910 } 911 912 static struct sk_buff *macsec_decrypt(struct sk_buff *skb, 913 struct net_device *dev, 914 struct macsec_rx_sa *rx_sa, 915 sci_t sci, 916 struct macsec_secy *secy) 917 { 918 int ret; 919 struct scatterlist *sg; 920 unsigned char *iv; 921 struct aead_request *req; 922 struct macsec_eth_header *hdr; 923 u16 icv_len = secy->icv_len; 924 925 macsec_skb_cb(skb)->valid = false; 926 skb = skb_share_check(skb, GFP_ATOMIC); 927 if (!skb) 928 return ERR_PTR(-ENOMEM); 929 930 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg); 931 if (!req) { 932 kfree_skb(skb); 933 return ERR_PTR(-ENOMEM); 934 } 935 936 hdr = (struct macsec_eth_header *)skb->data; 937 macsec_fill_iv(iv, sci, ntohl(hdr->packet_number)); 938 939 sg_init_table(sg, MAX_SKB_FRAGS + 1); 940 skb_to_sgvec(skb, sg, 0, skb->len); 941 942 if (hdr->tci_an & MACSEC_TCI_E) { 943 /* confidentiality: ethernet + macsec header 944 * authenticated, encrypted payload 945 */ 946 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci); 947 948 aead_request_set_crypt(req, sg, sg, len, iv); 949 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci)); 950 skb = skb_unshare(skb, GFP_ATOMIC); 951 if (!skb) { 952 aead_request_free(req); 953 return ERR_PTR(-ENOMEM); 954 } 955 } else { 956 /* integrity only: all headers + data authenticated */ 957 aead_request_set_crypt(req, sg, sg, icv_len, iv); 958 aead_request_set_ad(req, skb->len - icv_len); 959 } 960 961 macsec_skb_cb(skb)->req = req; 962 skb->dev = dev; 963 aead_request_set_callback(req, 0, macsec_decrypt_done, skb); 964 965 dev_hold(dev); 966 ret = crypto_aead_decrypt(req); 967 if (ret == -EINPROGRESS) { 968 return ERR_PTR(ret); 969 } else if (ret != 0) { 970 /* decryption/authentication failed 971 * 10.6 if validateFrames is disabled, deliver anyway 972 */ 973 if (ret != -EBADMSG) { 974 kfree_skb(skb); 975 skb = ERR_PTR(ret); 976 } 977 } else { 978 macsec_skb_cb(skb)->valid = true; 979 } 980 dev_put(dev); 981 982 aead_request_free(req); 983 984 return skb; 985 } 986 987 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci) 988 { 989 struct macsec_rx_sc *rx_sc; 990 991 for_each_rxsc(secy, rx_sc) { 992 if (rx_sc->sci == sci) 993 return rx_sc; 994 } 995 996 return NULL; 997 } 998 999 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) 1000 { 1001 struct macsec_rx_sc *rx_sc; 1002 1003 for_each_rxsc_rtnl(secy, rx_sc) { 1004 if (rx_sc->sci == sci) 1005 return rx_sc; 1006 } 1007 1008 return NULL; 1009 } 1010 1011 static void handle_not_macsec(struct sk_buff *skb) 1012 { 1013 struct macsec_rxh_data *rxd; 1014 struct macsec_dev *macsec; 1015 1016 rcu_read_lock(); 1017 rxd = macsec_data_rcu(skb->dev); 1018 1019 /* 10.6 If the management control validateFrames is not 1020 * Strict, frames without a SecTAG are received, counted, and 1021 * delivered to the Controlled Port 1022 */ 1023 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1024 struct sk_buff *nskb; 1025 int ret; 1026 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 1027 1028 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1029 u64_stats_update_begin(&secy_stats->syncp); 1030 secy_stats->stats.InPktsNoTag++; 1031 u64_stats_update_end(&secy_stats->syncp); 1032 continue; 1033 } 1034 1035 /* deliver on this port */ 1036 nskb = skb_clone(skb, GFP_ATOMIC); 1037 if (!nskb) 1038 break; 1039 1040 nskb->dev = macsec->secy.netdev; 1041 1042 ret = netif_rx(nskb); 1043 if (ret == NET_RX_SUCCESS) { 1044 u64_stats_update_begin(&secy_stats->syncp); 1045 secy_stats->stats.InPktsUntagged++; 1046 u64_stats_update_end(&secy_stats->syncp); 1047 } else { 1048 macsec->secy.netdev->stats.rx_dropped++; 1049 } 1050 } 1051 1052 rcu_read_unlock(); 1053 } 1054 1055 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) 1056 { 1057 struct sk_buff *skb = *pskb; 1058 struct net_device *dev = skb->dev; 1059 struct macsec_eth_header *hdr; 1060 struct macsec_secy *secy = NULL; 1061 struct macsec_rx_sc *rx_sc; 1062 struct macsec_rx_sa *rx_sa; 1063 struct macsec_rxh_data *rxd; 1064 struct macsec_dev *macsec; 1065 sci_t sci; 1066 u32 pn; 1067 bool cbit; 1068 struct pcpu_rx_sc_stats *rxsc_stats; 1069 struct pcpu_secy_stats *secy_stats; 1070 bool pulled_sci; 1071 int ret; 1072 1073 if (skb_headroom(skb) < ETH_HLEN) 1074 goto drop_direct; 1075 1076 hdr = macsec_ethhdr(skb); 1077 if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) { 1078 handle_not_macsec(skb); 1079 1080 /* and deliver to the uncontrolled port */ 1081 return RX_HANDLER_PASS; 1082 } 1083 1084 skb = skb_unshare(skb, GFP_ATOMIC); 1085 if (!skb) { 1086 *pskb = NULL; 1087 return RX_HANDLER_CONSUMED; 1088 } 1089 1090 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); 1091 if (!pulled_sci) { 1092 if (!pskb_may_pull(skb, macsec_extra_len(false))) 1093 goto drop_direct; 1094 } 1095 1096 hdr = macsec_ethhdr(skb); 1097 1098 /* Frames with a SecTAG that has the TCI E bit set but the C 1099 * bit clear are discarded, as this reserved encoding is used 1100 * to identify frames with a SecTAG that are not to be 1101 * delivered to the Controlled Port. 1102 */ 1103 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E) 1104 return RX_HANDLER_PASS; 1105 1106 /* now, pull the extra length */ 1107 if (hdr->tci_an & MACSEC_TCI_SC) { 1108 if (!pulled_sci) 1109 goto drop_direct; 1110 } 1111 1112 /* ethernet header is part of crypto processing */ 1113 skb_push(skb, ETH_HLEN); 1114 1115 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); 1116 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; 1117 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); 1118 1119 rcu_read_lock(); 1120 rxd = macsec_data_rcu(skb->dev); 1121 1122 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1123 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); 1124 sc = sc ? macsec_rxsc_get(sc) : NULL; 1125 1126 if (sc) { 1127 secy = &macsec->secy; 1128 rx_sc = sc; 1129 break; 1130 } 1131 } 1132 1133 if (!secy) 1134 goto nosci; 1135 1136 dev = secy->netdev; 1137 macsec = macsec_priv(dev); 1138 secy_stats = this_cpu_ptr(macsec->stats); 1139 rxsc_stats = this_cpu_ptr(rx_sc->stats); 1140 1141 if (!macsec_validate_skb(skb, secy->icv_len)) { 1142 u64_stats_update_begin(&secy_stats->syncp); 1143 secy_stats->stats.InPktsBadTag++; 1144 u64_stats_update_end(&secy_stats->syncp); 1145 goto drop_nosa; 1146 } 1147 1148 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]); 1149 if (!rx_sa) { 1150 /* 10.6.1 if the SA is not in use */ 1151 1152 /* If validateFrames is Strict or the C bit in the 1153 * SecTAG is set, discard 1154 */ 1155 if (hdr->tci_an & MACSEC_TCI_C || 1156 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 1157 u64_stats_update_begin(&rxsc_stats->syncp); 1158 rxsc_stats->stats.InPktsNotUsingSA++; 1159 u64_stats_update_end(&rxsc_stats->syncp); 1160 goto drop_nosa; 1161 } 1162 1163 /* not Strict, the frame (with the SecTAG and ICV 1164 * removed) is delivered to the Controlled Port. 1165 */ 1166 u64_stats_update_begin(&rxsc_stats->syncp); 1167 rxsc_stats->stats.InPktsUnusedSA++; 1168 u64_stats_update_end(&rxsc_stats->syncp); 1169 goto deliver; 1170 } 1171 1172 /* First, PN check to avoid decrypting obviously wrong packets */ 1173 pn = ntohl(hdr->packet_number); 1174 if (secy->replay_protect) { 1175 bool late; 1176 1177 spin_lock(&rx_sa->lock); 1178 late = rx_sa->next_pn >= secy->replay_window && 1179 pn < (rx_sa->next_pn - secy->replay_window); 1180 spin_unlock(&rx_sa->lock); 1181 1182 if (late) { 1183 u64_stats_update_begin(&rxsc_stats->syncp); 1184 rxsc_stats->stats.InPktsLate++; 1185 u64_stats_update_end(&rxsc_stats->syncp); 1186 goto drop; 1187 } 1188 } 1189 1190 macsec_skb_cb(skb)->rx_sa = rx_sa; 1191 1192 /* Disabled && !changed text => skip validation */ 1193 if (hdr->tci_an & MACSEC_TCI_C || 1194 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1195 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1196 1197 if (IS_ERR(skb)) { 1198 /* the decrypt callback needs the reference */ 1199 if (PTR_ERR(skb) != -EINPROGRESS) { 1200 macsec_rxsa_put(rx_sa); 1201 macsec_rxsc_put(rx_sc); 1202 } 1203 rcu_read_unlock(); 1204 *pskb = NULL; 1205 return RX_HANDLER_CONSUMED; 1206 } 1207 1208 if (!macsec_post_decrypt(skb, secy, pn)) 1209 goto drop; 1210 1211 deliver: 1212 macsec_finalize_skb(skb, secy->icv_len, 1213 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1214 macsec_reset_skb(skb, secy->netdev); 1215 1216 if (rx_sa) 1217 macsec_rxsa_put(rx_sa); 1218 macsec_rxsc_put(rx_sc); 1219 1220 ret = gro_cells_receive(&macsec->gro_cells, skb); 1221 if (ret == NET_RX_SUCCESS) 1222 count_rx(dev, skb->len); 1223 else 1224 macsec->secy.netdev->stats.rx_dropped++; 1225 1226 rcu_read_unlock(); 1227 1228 *pskb = NULL; 1229 return RX_HANDLER_CONSUMED; 1230 1231 drop: 1232 macsec_rxsa_put(rx_sa); 1233 drop_nosa: 1234 macsec_rxsc_put(rx_sc); 1235 rcu_read_unlock(); 1236 drop_direct: 1237 kfree_skb(skb); 1238 *pskb = NULL; 1239 return RX_HANDLER_CONSUMED; 1240 1241 nosci: 1242 /* 10.6.1 if the SC is not found */ 1243 cbit = !!(hdr->tci_an & MACSEC_TCI_C); 1244 if (!cbit) 1245 macsec_finalize_skb(skb, DEFAULT_ICV_LEN, 1246 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1247 1248 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1249 struct sk_buff *nskb; 1250 1251 secy_stats = this_cpu_ptr(macsec->stats); 1252 1253 /* If validateFrames is Strict or the C bit in the 1254 * SecTAG is set, discard 1255 */ 1256 if (cbit || 1257 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1258 u64_stats_update_begin(&secy_stats->syncp); 1259 secy_stats->stats.InPktsNoSCI++; 1260 u64_stats_update_end(&secy_stats->syncp); 1261 continue; 1262 } 1263 1264 /* not strict, the frame (with the SecTAG and ICV 1265 * removed) is delivered to the Controlled Port. 1266 */ 1267 nskb = skb_clone(skb, GFP_ATOMIC); 1268 if (!nskb) 1269 break; 1270 1271 macsec_reset_skb(nskb, macsec->secy.netdev); 1272 1273 ret = netif_rx(nskb); 1274 if (ret == NET_RX_SUCCESS) { 1275 u64_stats_update_begin(&secy_stats->syncp); 1276 secy_stats->stats.InPktsUnknownSCI++; 1277 u64_stats_update_end(&secy_stats->syncp); 1278 } else { 1279 macsec->secy.netdev->stats.rx_dropped++; 1280 } 1281 } 1282 1283 rcu_read_unlock(); 1284 *pskb = skb; 1285 return RX_HANDLER_PASS; 1286 } 1287 1288 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) 1289 { 1290 struct crypto_aead *tfm; 1291 int ret; 1292 1293 tfm = crypto_alloc_aead("gcm(aes)", 0, 0); 1294 1295 if (IS_ERR(tfm)) 1296 return tfm; 1297 1298 ret = crypto_aead_setkey(tfm, key, key_len); 1299 if (ret < 0) 1300 goto fail; 1301 1302 ret = crypto_aead_setauthsize(tfm, icv_len); 1303 if (ret < 0) 1304 goto fail; 1305 1306 return tfm; 1307 fail: 1308 crypto_free_aead(tfm); 1309 return ERR_PTR(ret); 1310 } 1311 1312 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, 1313 int icv_len) 1314 { 1315 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); 1316 if (!rx_sa->stats) 1317 return -ENOMEM; 1318 1319 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1320 if (IS_ERR(rx_sa->key.tfm)) { 1321 free_percpu(rx_sa->stats); 1322 return PTR_ERR(rx_sa->key.tfm); 1323 } 1324 1325 rx_sa->active = false; 1326 rx_sa->next_pn = 1; 1327 atomic_set(&rx_sa->refcnt, 1); 1328 spin_lock_init(&rx_sa->lock); 1329 1330 return 0; 1331 } 1332 1333 static void clear_rx_sa(struct macsec_rx_sa *rx_sa) 1334 { 1335 rx_sa->active = false; 1336 1337 macsec_rxsa_put(rx_sa); 1338 } 1339 1340 static void free_rx_sc(struct macsec_rx_sc *rx_sc) 1341 { 1342 int i; 1343 1344 for (i = 0; i < MACSEC_NUM_AN; i++) { 1345 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]); 1346 1347 RCU_INIT_POINTER(rx_sc->sa[i], NULL); 1348 if (sa) 1349 clear_rx_sa(sa); 1350 } 1351 1352 macsec_rxsc_put(rx_sc); 1353 } 1354 1355 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) 1356 { 1357 struct macsec_rx_sc *rx_sc, __rcu **rx_scp; 1358 1359 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp); 1360 rx_sc; 1361 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) { 1362 if (rx_sc->sci == sci) { 1363 if (rx_sc->active) 1364 secy->n_rx_sc--; 1365 rcu_assign_pointer(*rx_scp, rx_sc->next); 1366 return rx_sc; 1367 } 1368 } 1369 1370 return NULL; 1371 } 1372 1373 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) 1374 { 1375 struct macsec_rx_sc *rx_sc; 1376 struct macsec_dev *macsec; 1377 struct net_device *real_dev = macsec_priv(dev)->real_dev; 1378 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 1379 struct macsec_secy *secy; 1380 1381 list_for_each_entry(macsec, &rxd->secys, secys) { 1382 if (find_rx_sc_rtnl(&macsec->secy, sci)) 1383 return ERR_PTR(-EEXIST); 1384 } 1385 1386 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); 1387 if (!rx_sc) 1388 return ERR_PTR(-ENOMEM); 1389 1390 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats); 1391 if (!rx_sc->stats) { 1392 kfree(rx_sc); 1393 return ERR_PTR(-ENOMEM); 1394 } 1395 1396 rx_sc->sci = sci; 1397 rx_sc->active = true; 1398 atomic_set(&rx_sc->refcnt, 1); 1399 1400 secy = &macsec_priv(dev)->secy; 1401 rcu_assign_pointer(rx_sc->next, secy->rx_sc); 1402 rcu_assign_pointer(secy->rx_sc, rx_sc); 1403 1404 if (rx_sc->active) 1405 secy->n_rx_sc++; 1406 1407 return rx_sc; 1408 } 1409 1410 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, 1411 int icv_len) 1412 { 1413 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); 1414 if (!tx_sa->stats) 1415 return -ENOMEM; 1416 1417 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1418 if (IS_ERR(tx_sa->key.tfm)) { 1419 free_percpu(tx_sa->stats); 1420 return PTR_ERR(tx_sa->key.tfm); 1421 } 1422 1423 tx_sa->active = false; 1424 atomic_set(&tx_sa->refcnt, 1); 1425 spin_lock_init(&tx_sa->lock); 1426 1427 return 0; 1428 } 1429 1430 static void clear_tx_sa(struct macsec_tx_sa *tx_sa) 1431 { 1432 tx_sa->active = false; 1433 1434 macsec_txsa_put(tx_sa); 1435 } 1436 1437 static struct genl_family macsec_fam; 1438 1439 static struct net_device *get_dev_from_nl(struct net *net, 1440 struct nlattr **attrs) 1441 { 1442 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]); 1443 struct net_device *dev; 1444 1445 dev = __dev_get_by_index(net, ifindex); 1446 if (!dev) 1447 return ERR_PTR(-ENODEV); 1448 1449 if (!netif_is_macsec(dev)) 1450 return ERR_PTR(-ENODEV); 1451 1452 return dev; 1453 } 1454 1455 static sci_t nla_get_sci(const struct nlattr *nla) 1456 { 1457 return (__force sci_t)nla_get_u64(nla); 1458 } 1459 1460 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, 1461 int padattr) 1462 { 1463 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); 1464 } 1465 1466 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, 1467 struct nlattr **attrs, 1468 struct nlattr **tb_sa, 1469 struct net_device **devp, 1470 struct macsec_secy **secyp, 1471 struct macsec_tx_sc **scp, 1472 u8 *assoc_num) 1473 { 1474 struct net_device *dev; 1475 struct macsec_secy *secy; 1476 struct macsec_tx_sc *tx_sc; 1477 struct macsec_tx_sa *tx_sa; 1478 1479 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1480 return ERR_PTR(-EINVAL); 1481 1482 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1483 1484 dev = get_dev_from_nl(net, attrs); 1485 if (IS_ERR(dev)) 1486 return ERR_CAST(dev); 1487 1488 if (*assoc_num >= MACSEC_NUM_AN) 1489 return ERR_PTR(-EINVAL); 1490 1491 secy = &macsec_priv(dev)->secy; 1492 tx_sc = &secy->tx_sc; 1493 1494 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]); 1495 if (!tx_sa) 1496 return ERR_PTR(-ENODEV); 1497 1498 *devp = dev; 1499 *scp = tx_sc; 1500 *secyp = secy; 1501 return tx_sa; 1502 } 1503 1504 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net, 1505 struct nlattr **attrs, 1506 struct nlattr **tb_rxsc, 1507 struct net_device **devp, 1508 struct macsec_secy **secyp) 1509 { 1510 struct net_device *dev; 1511 struct macsec_secy *secy; 1512 struct macsec_rx_sc *rx_sc; 1513 sci_t sci; 1514 1515 dev = get_dev_from_nl(net, attrs); 1516 if (IS_ERR(dev)) 1517 return ERR_CAST(dev); 1518 1519 secy = &macsec_priv(dev)->secy; 1520 1521 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1522 return ERR_PTR(-EINVAL); 1523 1524 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1525 rx_sc = find_rx_sc_rtnl(secy, sci); 1526 if (!rx_sc) 1527 return ERR_PTR(-ENODEV); 1528 1529 *secyp = secy; 1530 *devp = dev; 1531 1532 return rx_sc; 1533 } 1534 1535 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, 1536 struct nlattr **attrs, 1537 struct nlattr **tb_rxsc, 1538 struct nlattr **tb_sa, 1539 struct net_device **devp, 1540 struct macsec_secy **secyp, 1541 struct macsec_rx_sc **scp, 1542 u8 *assoc_num) 1543 { 1544 struct macsec_rx_sc *rx_sc; 1545 struct macsec_rx_sa *rx_sa; 1546 1547 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1548 return ERR_PTR(-EINVAL); 1549 1550 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1551 if (*assoc_num >= MACSEC_NUM_AN) 1552 return ERR_PTR(-EINVAL); 1553 1554 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); 1555 if (IS_ERR(rx_sc)) 1556 return ERR_CAST(rx_sc); 1557 1558 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]); 1559 if (!rx_sa) 1560 return ERR_PTR(-ENODEV); 1561 1562 *scp = rx_sc; 1563 return rx_sa; 1564 } 1565 1566 1567 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { 1568 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, 1569 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, 1570 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, 1571 }; 1572 1573 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { 1574 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, 1575 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, 1576 }; 1577 1578 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { 1579 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, 1580 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, 1581 [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 }, 1582 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, 1583 .len = MACSEC_KEYID_LEN, }, 1584 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1585 .len = MACSEC_MAX_KEY_LEN, }, 1586 }; 1587 1588 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) 1589 { 1590 if (!attrs[MACSEC_ATTR_SA_CONFIG]) 1591 return -EINVAL; 1592 1593 if (nla_parse_nested(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], 1594 macsec_genl_sa_policy)) 1595 return -EINVAL; 1596 1597 return 0; 1598 } 1599 1600 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) 1601 { 1602 if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) 1603 return -EINVAL; 1604 1605 if (nla_parse_nested(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], 1606 macsec_genl_rxsc_policy)) 1607 return -EINVAL; 1608 1609 return 0; 1610 } 1611 1612 static bool validate_add_rxsa(struct nlattr **attrs) 1613 { 1614 if (!attrs[MACSEC_SA_ATTR_AN] || 1615 !attrs[MACSEC_SA_ATTR_KEY] || 1616 !attrs[MACSEC_SA_ATTR_KEYID]) 1617 return false; 1618 1619 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1620 return false; 1621 1622 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1623 return false; 1624 1625 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1626 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1627 return false; 1628 } 1629 1630 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1631 return false; 1632 1633 return true; 1634 } 1635 1636 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) 1637 { 1638 struct net_device *dev; 1639 struct nlattr **attrs = info->attrs; 1640 struct macsec_secy *secy; 1641 struct macsec_rx_sc *rx_sc; 1642 struct macsec_rx_sa *rx_sa; 1643 unsigned char assoc_num; 1644 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1645 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1646 int err; 1647 1648 if (!attrs[MACSEC_ATTR_IFINDEX]) 1649 return -EINVAL; 1650 1651 if (parse_sa_config(attrs, tb_sa)) 1652 return -EINVAL; 1653 1654 if (parse_rxsc_config(attrs, tb_rxsc)) 1655 return -EINVAL; 1656 1657 if (!validate_add_rxsa(tb_sa)) 1658 return -EINVAL; 1659 1660 rtnl_lock(); 1661 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 1662 if (IS_ERR(rx_sc)) { 1663 rtnl_unlock(); 1664 return PTR_ERR(rx_sc); 1665 } 1666 1667 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1668 1669 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1670 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n", 1671 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1672 rtnl_unlock(); 1673 return -EINVAL; 1674 } 1675 1676 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); 1677 if (rx_sa) { 1678 rtnl_unlock(); 1679 return -EBUSY; 1680 } 1681 1682 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1683 if (!rx_sa) { 1684 rtnl_unlock(); 1685 return -ENOMEM; 1686 } 1687 1688 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1689 secy->key_len, secy->icv_len); 1690 if (err < 0) { 1691 kfree(rx_sa); 1692 rtnl_unlock(); 1693 return err; 1694 } 1695 1696 if (tb_sa[MACSEC_SA_ATTR_PN]) { 1697 spin_lock_bh(&rx_sa->lock); 1698 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 1699 spin_unlock_bh(&rx_sa->lock); 1700 } 1701 1702 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1703 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1704 1705 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1706 rx_sa->sc = rx_sc; 1707 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); 1708 1709 rtnl_unlock(); 1710 1711 return 0; 1712 } 1713 1714 static bool validate_add_rxsc(struct nlattr **attrs) 1715 { 1716 if (!attrs[MACSEC_RXSC_ATTR_SCI]) 1717 return false; 1718 1719 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { 1720 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) 1721 return false; 1722 } 1723 1724 return true; 1725 } 1726 1727 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) 1728 { 1729 struct net_device *dev; 1730 sci_t sci = MACSEC_UNDEF_SCI; 1731 struct nlattr **attrs = info->attrs; 1732 struct macsec_rx_sc *rx_sc; 1733 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1734 1735 if (!attrs[MACSEC_ATTR_IFINDEX]) 1736 return -EINVAL; 1737 1738 if (parse_rxsc_config(attrs, tb_rxsc)) 1739 return -EINVAL; 1740 1741 if (!validate_add_rxsc(tb_rxsc)) 1742 return -EINVAL; 1743 1744 rtnl_lock(); 1745 dev = get_dev_from_nl(genl_info_net(info), attrs); 1746 if (IS_ERR(dev)) { 1747 rtnl_unlock(); 1748 return PTR_ERR(dev); 1749 } 1750 1751 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1752 1753 rx_sc = create_rx_sc(dev, sci); 1754 if (IS_ERR(rx_sc)) { 1755 rtnl_unlock(); 1756 return PTR_ERR(rx_sc); 1757 } 1758 1759 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) 1760 rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 1761 1762 rtnl_unlock(); 1763 1764 return 0; 1765 } 1766 1767 static bool validate_add_txsa(struct nlattr **attrs) 1768 { 1769 if (!attrs[MACSEC_SA_ATTR_AN] || 1770 !attrs[MACSEC_SA_ATTR_PN] || 1771 !attrs[MACSEC_SA_ATTR_KEY] || 1772 !attrs[MACSEC_SA_ATTR_KEYID]) 1773 return false; 1774 1775 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1776 return false; 1777 1778 if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1779 return false; 1780 1781 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1782 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1783 return false; 1784 } 1785 1786 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1787 return false; 1788 1789 return true; 1790 } 1791 1792 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) 1793 { 1794 struct net_device *dev; 1795 struct nlattr **attrs = info->attrs; 1796 struct macsec_secy *secy; 1797 struct macsec_tx_sc *tx_sc; 1798 struct macsec_tx_sa *tx_sa; 1799 unsigned char assoc_num; 1800 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1801 int err; 1802 1803 if (!attrs[MACSEC_ATTR_IFINDEX]) 1804 return -EINVAL; 1805 1806 if (parse_sa_config(attrs, tb_sa)) 1807 return -EINVAL; 1808 1809 if (!validate_add_txsa(tb_sa)) 1810 return -EINVAL; 1811 1812 rtnl_lock(); 1813 dev = get_dev_from_nl(genl_info_net(info), attrs); 1814 if (IS_ERR(dev)) { 1815 rtnl_unlock(); 1816 return PTR_ERR(dev); 1817 } 1818 1819 secy = &macsec_priv(dev)->secy; 1820 tx_sc = &secy->tx_sc; 1821 1822 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1823 1824 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1825 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n", 1826 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1827 rtnl_unlock(); 1828 return -EINVAL; 1829 } 1830 1831 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); 1832 if (tx_sa) { 1833 rtnl_unlock(); 1834 return -EBUSY; 1835 } 1836 1837 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 1838 if (!tx_sa) { 1839 rtnl_unlock(); 1840 return -ENOMEM; 1841 } 1842 1843 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1844 secy->key_len, secy->icv_len); 1845 if (err < 0) { 1846 kfree(tx_sa); 1847 rtnl_unlock(); 1848 return err; 1849 } 1850 1851 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1852 1853 spin_lock_bh(&tx_sa->lock); 1854 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 1855 spin_unlock_bh(&tx_sa->lock); 1856 1857 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1858 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1859 1860 if (assoc_num == tx_sc->encoding_sa && tx_sa->active) 1861 secy->operational = true; 1862 1863 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); 1864 1865 rtnl_unlock(); 1866 1867 return 0; 1868 } 1869 1870 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) 1871 { 1872 struct nlattr **attrs = info->attrs; 1873 struct net_device *dev; 1874 struct macsec_secy *secy; 1875 struct macsec_rx_sc *rx_sc; 1876 struct macsec_rx_sa *rx_sa; 1877 u8 assoc_num; 1878 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1879 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1880 1881 if (!attrs[MACSEC_ATTR_IFINDEX]) 1882 return -EINVAL; 1883 1884 if (parse_sa_config(attrs, tb_sa)) 1885 return -EINVAL; 1886 1887 if (parse_rxsc_config(attrs, tb_rxsc)) 1888 return -EINVAL; 1889 1890 rtnl_lock(); 1891 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 1892 &dev, &secy, &rx_sc, &assoc_num); 1893 if (IS_ERR(rx_sa)) { 1894 rtnl_unlock(); 1895 return PTR_ERR(rx_sa); 1896 } 1897 1898 if (rx_sa->active) { 1899 rtnl_unlock(); 1900 return -EBUSY; 1901 } 1902 1903 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); 1904 clear_rx_sa(rx_sa); 1905 1906 rtnl_unlock(); 1907 1908 return 0; 1909 } 1910 1911 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) 1912 { 1913 struct nlattr **attrs = info->attrs; 1914 struct net_device *dev; 1915 struct macsec_secy *secy; 1916 struct macsec_rx_sc *rx_sc; 1917 sci_t sci; 1918 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1919 1920 if (!attrs[MACSEC_ATTR_IFINDEX]) 1921 return -EINVAL; 1922 1923 if (parse_rxsc_config(attrs, tb_rxsc)) 1924 return -EINVAL; 1925 1926 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1927 return -EINVAL; 1928 1929 rtnl_lock(); 1930 dev = get_dev_from_nl(genl_info_net(info), info->attrs); 1931 if (IS_ERR(dev)) { 1932 rtnl_unlock(); 1933 return PTR_ERR(dev); 1934 } 1935 1936 secy = &macsec_priv(dev)->secy; 1937 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1938 1939 rx_sc = del_rx_sc(secy, sci); 1940 if (!rx_sc) { 1941 rtnl_unlock(); 1942 return -ENODEV; 1943 } 1944 1945 free_rx_sc(rx_sc); 1946 rtnl_unlock(); 1947 1948 return 0; 1949 } 1950 1951 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) 1952 { 1953 struct nlattr **attrs = info->attrs; 1954 struct net_device *dev; 1955 struct macsec_secy *secy; 1956 struct macsec_tx_sc *tx_sc; 1957 struct macsec_tx_sa *tx_sa; 1958 u8 assoc_num; 1959 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1960 1961 if (!attrs[MACSEC_ATTR_IFINDEX]) 1962 return -EINVAL; 1963 1964 if (parse_sa_config(attrs, tb_sa)) 1965 return -EINVAL; 1966 1967 rtnl_lock(); 1968 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 1969 &dev, &secy, &tx_sc, &assoc_num); 1970 if (IS_ERR(tx_sa)) { 1971 rtnl_unlock(); 1972 return PTR_ERR(tx_sa); 1973 } 1974 1975 if (tx_sa->active) { 1976 rtnl_unlock(); 1977 return -EBUSY; 1978 } 1979 1980 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); 1981 clear_tx_sa(tx_sa); 1982 1983 rtnl_unlock(); 1984 1985 return 0; 1986 } 1987 1988 static bool validate_upd_sa(struct nlattr **attrs) 1989 { 1990 if (!attrs[MACSEC_SA_ATTR_AN] || 1991 attrs[MACSEC_SA_ATTR_KEY] || 1992 attrs[MACSEC_SA_ATTR_KEYID]) 1993 return false; 1994 1995 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1996 return false; 1997 1998 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1999 return false; 2000 2001 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 2002 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 2003 return false; 2004 } 2005 2006 return true; 2007 } 2008 2009 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) 2010 { 2011 struct nlattr **attrs = info->attrs; 2012 struct net_device *dev; 2013 struct macsec_secy *secy; 2014 struct macsec_tx_sc *tx_sc; 2015 struct macsec_tx_sa *tx_sa; 2016 u8 assoc_num; 2017 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2018 2019 if (!attrs[MACSEC_ATTR_IFINDEX]) 2020 return -EINVAL; 2021 2022 if (parse_sa_config(attrs, tb_sa)) 2023 return -EINVAL; 2024 2025 if (!validate_upd_sa(tb_sa)) 2026 return -EINVAL; 2027 2028 rtnl_lock(); 2029 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2030 &dev, &secy, &tx_sc, &assoc_num); 2031 if (IS_ERR(tx_sa)) { 2032 rtnl_unlock(); 2033 return PTR_ERR(tx_sa); 2034 } 2035 2036 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2037 spin_lock_bh(&tx_sa->lock); 2038 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 2039 spin_unlock_bh(&tx_sa->lock); 2040 } 2041 2042 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2043 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2044 2045 if (assoc_num == tx_sc->encoding_sa) 2046 secy->operational = tx_sa->active; 2047 2048 rtnl_unlock(); 2049 2050 return 0; 2051 } 2052 2053 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) 2054 { 2055 struct nlattr **attrs = info->attrs; 2056 struct net_device *dev; 2057 struct macsec_secy *secy; 2058 struct macsec_rx_sc *rx_sc; 2059 struct macsec_rx_sa *rx_sa; 2060 u8 assoc_num; 2061 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2062 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2063 2064 if (!attrs[MACSEC_ATTR_IFINDEX]) 2065 return -EINVAL; 2066 2067 if (parse_rxsc_config(attrs, tb_rxsc)) 2068 return -EINVAL; 2069 2070 if (parse_sa_config(attrs, tb_sa)) 2071 return -EINVAL; 2072 2073 if (!validate_upd_sa(tb_sa)) 2074 return -EINVAL; 2075 2076 rtnl_lock(); 2077 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2078 &dev, &secy, &rx_sc, &assoc_num); 2079 if (IS_ERR(rx_sa)) { 2080 rtnl_unlock(); 2081 return PTR_ERR(rx_sa); 2082 } 2083 2084 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2085 spin_lock_bh(&rx_sa->lock); 2086 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 2087 spin_unlock_bh(&rx_sa->lock); 2088 } 2089 2090 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2091 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2092 2093 rtnl_unlock(); 2094 return 0; 2095 } 2096 2097 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) 2098 { 2099 struct nlattr **attrs = info->attrs; 2100 struct net_device *dev; 2101 struct macsec_secy *secy; 2102 struct macsec_rx_sc *rx_sc; 2103 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2104 2105 if (!attrs[MACSEC_ATTR_IFINDEX]) 2106 return -EINVAL; 2107 2108 if (parse_rxsc_config(attrs, tb_rxsc)) 2109 return -EINVAL; 2110 2111 if (!validate_add_rxsc(tb_rxsc)) 2112 return -EINVAL; 2113 2114 rtnl_lock(); 2115 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 2116 if (IS_ERR(rx_sc)) { 2117 rtnl_unlock(); 2118 return PTR_ERR(rx_sc); 2119 } 2120 2121 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { 2122 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 2123 2124 if (rx_sc->active != new) 2125 secy->n_rx_sc += new ? 1 : -1; 2126 2127 rx_sc->active = new; 2128 } 2129 2130 rtnl_unlock(); 2131 2132 return 0; 2133 } 2134 2135 static int copy_tx_sa_stats(struct sk_buff *skb, 2136 struct macsec_tx_sa_stats __percpu *pstats) 2137 { 2138 struct macsec_tx_sa_stats sum = {0, }; 2139 int cpu; 2140 2141 for_each_possible_cpu(cpu) { 2142 const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu); 2143 2144 sum.OutPktsProtected += stats->OutPktsProtected; 2145 sum.OutPktsEncrypted += stats->OutPktsEncrypted; 2146 } 2147 2148 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) || 2149 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted)) 2150 return -EMSGSIZE; 2151 2152 return 0; 2153 } 2154 2155 static int copy_rx_sa_stats(struct sk_buff *skb, 2156 struct macsec_rx_sa_stats __percpu *pstats) 2157 { 2158 struct macsec_rx_sa_stats sum = {0, }; 2159 int cpu; 2160 2161 for_each_possible_cpu(cpu) { 2162 const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu); 2163 2164 sum.InPktsOK += stats->InPktsOK; 2165 sum.InPktsInvalid += stats->InPktsInvalid; 2166 sum.InPktsNotValid += stats->InPktsNotValid; 2167 sum.InPktsNotUsingSA += stats->InPktsNotUsingSA; 2168 sum.InPktsUnusedSA += stats->InPktsUnusedSA; 2169 } 2170 2171 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) || 2172 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) || 2173 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) || 2174 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) || 2175 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA)) 2176 return -EMSGSIZE; 2177 2178 return 0; 2179 } 2180 2181 static int copy_rx_sc_stats(struct sk_buff *skb, 2182 struct pcpu_rx_sc_stats __percpu *pstats) 2183 { 2184 struct macsec_rx_sc_stats sum = {0, }; 2185 int cpu; 2186 2187 for_each_possible_cpu(cpu) { 2188 const struct pcpu_rx_sc_stats *stats; 2189 struct macsec_rx_sc_stats tmp; 2190 unsigned int start; 2191 2192 stats = per_cpu_ptr(pstats, cpu); 2193 do { 2194 start = u64_stats_fetch_begin_irq(&stats->syncp); 2195 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2196 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2197 2198 sum.InOctetsValidated += tmp.InOctetsValidated; 2199 sum.InOctetsDecrypted += tmp.InOctetsDecrypted; 2200 sum.InPktsUnchecked += tmp.InPktsUnchecked; 2201 sum.InPktsDelayed += tmp.InPktsDelayed; 2202 sum.InPktsOK += tmp.InPktsOK; 2203 sum.InPktsInvalid += tmp.InPktsInvalid; 2204 sum.InPktsLate += tmp.InPktsLate; 2205 sum.InPktsNotValid += tmp.InPktsNotValid; 2206 sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA; 2207 sum.InPktsUnusedSA += tmp.InPktsUnusedSA; 2208 } 2209 2210 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, 2211 sum.InOctetsValidated, 2212 MACSEC_RXSC_STATS_ATTR_PAD) || 2213 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, 2214 sum.InOctetsDecrypted, 2215 MACSEC_RXSC_STATS_ATTR_PAD) || 2216 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, 2217 sum.InPktsUnchecked, 2218 MACSEC_RXSC_STATS_ATTR_PAD) || 2219 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, 2220 sum.InPktsDelayed, 2221 MACSEC_RXSC_STATS_ATTR_PAD) || 2222 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, 2223 sum.InPktsOK, 2224 MACSEC_RXSC_STATS_ATTR_PAD) || 2225 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, 2226 sum.InPktsInvalid, 2227 MACSEC_RXSC_STATS_ATTR_PAD) || 2228 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, 2229 sum.InPktsLate, 2230 MACSEC_RXSC_STATS_ATTR_PAD) || 2231 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, 2232 sum.InPktsNotValid, 2233 MACSEC_RXSC_STATS_ATTR_PAD) || 2234 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2235 sum.InPktsNotUsingSA, 2236 MACSEC_RXSC_STATS_ATTR_PAD) || 2237 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, 2238 sum.InPktsUnusedSA, 2239 MACSEC_RXSC_STATS_ATTR_PAD)) 2240 return -EMSGSIZE; 2241 2242 return 0; 2243 } 2244 2245 static int copy_tx_sc_stats(struct sk_buff *skb, 2246 struct pcpu_tx_sc_stats __percpu *pstats) 2247 { 2248 struct macsec_tx_sc_stats sum = {0, }; 2249 int cpu; 2250 2251 for_each_possible_cpu(cpu) { 2252 const struct pcpu_tx_sc_stats *stats; 2253 struct macsec_tx_sc_stats tmp; 2254 unsigned int start; 2255 2256 stats = per_cpu_ptr(pstats, cpu); 2257 do { 2258 start = u64_stats_fetch_begin_irq(&stats->syncp); 2259 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2260 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2261 2262 sum.OutPktsProtected += tmp.OutPktsProtected; 2263 sum.OutPktsEncrypted += tmp.OutPktsEncrypted; 2264 sum.OutOctetsProtected += tmp.OutOctetsProtected; 2265 sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted; 2266 } 2267 2268 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, 2269 sum.OutPktsProtected, 2270 MACSEC_TXSC_STATS_ATTR_PAD) || 2271 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2272 sum.OutPktsEncrypted, 2273 MACSEC_TXSC_STATS_ATTR_PAD) || 2274 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, 2275 sum.OutOctetsProtected, 2276 MACSEC_TXSC_STATS_ATTR_PAD) || 2277 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, 2278 sum.OutOctetsEncrypted, 2279 MACSEC_TXSC_STATS_ATTR_PAD)) 2280 return -EMSGSIZE; 2281 2282 return 0; 2283 } 2284 2285 static int copy_secy_stats(struct sk_buff *skb, 2286 struct pcpu_secy_stats __percpu *pstats) 2287 { 2288 struct macsec_dev_stats sum = {0, }; 2289 int cpu; 2290 2291 for_each_possible_cpu(cpu) { 2292 const struct pcpu_secy_stats *stats; 2293 struct macsec_dev_stats tmp; 2294 unsigned int start; 2295 2296 stats = per_cpu_ptr(pstats, cpu); 2297 do { 2298 start = u64_stats_fetch_begin_irq(&stats->syncp); 2299 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2300 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2301 2302 sum.OutPktsUntagged += tmp.OutPktsUntagged; 2303 sum.InPktsUntagged += tmp.InPktsUntagged; 2304 sum.OutPktsTooLong += tmp.OutPktsTooLong; 2305 sum.InPktsNoTag += tmp.InPktsNoTag; 2306 sum.InPktsBadTag += tmp.InPktsBadTag; 2307 sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI; 2308 sum.InPktsNoSCI += tmp.InPktsNoSCI; 2309 sum.InPktsOverrun += tmp.InPktsOverrun; 2310 } 2311 2312 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, 2313 sum.OutPktsUntagged, 2314 MACSEC_SECY_STATS_ATTR_PAD) || 2315 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, 2316 sum.InPktsUntagged, 2317 MACSEC_SECY_STATS_ATTR_PAD) || 2318 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, 2319 sum.OutPktsTooLong, 2320 MACSEC_SECY_STATS_ATTR_PAD) || 2321 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, 2322 sum.InPktsNoTag, 2323 MACSEC_SECY_STATS_ATTR_PAD) || 2324 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, 2325 sum.InPktsBadTag, 2326 MACSEC_SECY_STATS_ATTR_PAD) || 2327 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, 2328 sum.InPktsUnknownSCI, 2329 MACSEC_SECY_STATS_ATTR_PAD) || 2330 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, 2331 sum.InPktsNoSCI, 2332 MACSEC_SECY_STATS_ATTR_PAD) || 2333 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, 2334 sum.InPktsOverrun, 2335 MACSEC_SECY_STATS_ATTR_PAD)) 2336 return -EMSGSIZE; 2337 2338 return 0; 2339 } 2340 2341 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) 2342 { 2343 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2344 struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY); 2345 2346 if (!secy_nest) 2347 return 1; 2348 2349 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, 2350 MACSEC_SECY_ATTR_PAD) || 2351 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, 2352 MACSEC_DEFAULT_CIPHER_ID, 2353 MACSEC_SECY_ATTR_PAD) || 2354 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 2355 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 2356 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 2357 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) || 2358 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) || 2359 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) || 2360 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) || 2361 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) || 2362 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) || 2363 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa)) 2364 goto cancel; 2365 2366 if (secy->replay_protect) { 2367 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window)) 2368 goto cancel; 2369 } 2370 2371 nla_nest_end(skb, secy_nest); 2372 return 0; 2373 2374 cancel: 2375 nla_nest_cancel(skb, secy_nest); 2376 return 1; 2377 } 2378 2379 static int dump_secy(struct macsec_secy *secy, struct net_device *dev, 2380 struct sk_buff *skb, struct netlink_callback *cb) 2381 { 2382 struct macsec_rx_sc *rx_sc; 2383 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2384 struct nlattr *txsa_list, *rxsc_list; 2385 int i, j; 2386 void *hdr; 2387 struct nlattr *attr; 2388 2389 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 2390 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); 2391 if (!hdr) 2392 return -EMSGSIZE; 2393 2394 genl_dump_check_consistent(cb, hdr, &macsec_fam); 2395 2396 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 2397 goto nla_put_failure; 2398 2399 if (nla_put_secy(secy, skb)) 2400 goto nla_put_failure; 2401 2402 attr = nla_nest_start(skb, MACSEC_ATTR_TXSC_STATS); 2403 if (!attr) 2404 goto nla_put_failure; 2405 if (copy_tx_sc_stats(skb, tx_sc->stats)) { 2406 nla_nest_cancel(skb, attr); 2407 goto nla_put_failure; 2408 } 2409 nla_nest_end(skb, attr); 2410 2411 attr = nla_nest_start(skb, MACSEC_ATTR_SECY_STATS); 2412 if (!attr) 2413 goto nla_put_failure; 2414 if (copy_secy_stats(skb, macsec_priv(dev)->stats)) { 2415 nla_nest_cancel(skb, attr); 2416 goto nla_put_failure; 2417 } 2418 nla_nest_end(skb, attr); 2419 2420 txsa_list = nla_nest_start(skb, MACSEC_ATTR_TXSA_LIST); 2421 if (!txsa_list) 2422 goto nla_put_failure; 2423 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { 2424 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); 2425 struct nlattr *txsa_nest; 2426 2427 if (!tx_sa) 2428 continue; 2429 2430 txsa_nest = nla_nest_start(skb, j++); 2431 if (!txsa_nest) { 2432 nla_nest_cancel(skb, txsa_list); 2433 goto nla_put_failure; 2434 } 2435 2436 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2437 nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) || 2438 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || 2439 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { 2440 nla_nest_cancel(skb, txsa_nest); 2441 nla_nest_cancel(skb, txsa_list); 2442 goto nla_put_failure; 2443 } 2444 2445 attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS); 2446 if (!attr) { 2447 nla_nest_cancel(skb, txsa_nest); 2448 nla_nest_cancel(skb, txsa_list); 2449 goto nla_put_failure; 2450 } 2451 if (copy_tx_sa_stats(skb, tx_sa->stats)) { 2452 nla_nest_cancel(skb, attr); 2453 nla_nest_cancel(skb, txsa_nest); 2454 nla_nest_cancel(skb, txsa_list); 2455 goto nla_put_failure; 2456 } 2457 nla_nest_end(skb, attr); 2458 2459 nla_nest_end(skb, txsa_nest); 2460 } 2461 nla_nest_end(skb, txsa_list); 2462 2463 rxsc_list = nla_nest_start(skb, MACSEC_ATTR_RXSC_LIST); 2464 if (!rxsc_list) 2465 goto nla_put_failure; 2466 2467 j = 1; 2468 for_each_rxsc_rtnl(secy, rx_sc) { 2469 int k; 2470 struct nlattr *rxsa_list; 2471 struct nlattr *rxsc_nest = nla_nest_start(skb, j++); 2472 2473 if (!rxsc_nest) { 2474 nla_nest_cancel(skb, rxsc_list); 2475 goto nla_put_failure; 2476 } 2477 2478 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || 2479 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci, 2480 MACSEC_RXSC_ATTR_PAD)) { 2481 nla_nest_cancel(skb, rxsc_nest); 2482 nla_nest_cancel(skb, rxsc_list); 2483 goto nla_put_failure; 2484 } 2485 2486 attr = nla_nest_start(skb, MACSEC_RXSC_ATTR_STATS); 2487 if (!attr) { 2488 nla_nest_cancel(skb, rxsc_nest); 2489 nla_nest_cancel(skb, rxsc_list); 2490 goto nla_put_failure; 2491 } 2492 if (copy_rx_sc_stats(skb, rx_sc->stats)) { 2493 nla_nest_cancel(skb, attr); 2494 nla_nest_cancel(skb, rxsc_nest); 2495 nla_nest_cancel(skb, rxsc_list); 2496 goto nla_put_failure; 2497 } 2498 nla_nest_end(skb, attr); 2499 2500 rxsa_list = nla_nest_start(skb, MACSEC_RXSC_ATTR_SA_LIST); 2501 if (!rxsa_list) { 2502 nla_nest_cancel(skb, rxsc_nest); 2503 nla_nest_cancel(skb, rxsc_list); 2504 goto nla_put_failure; 2505 } 2506 2507 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { 2508 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); 2509 struct nlattr *rxsa_nest; 2510 2511 if (!rx_sa) 2512 continue; 2513 2514 rxsa_nest = nla_nest_start(skb, k++); 2515 if (!rxsa_nest) { 2516 nla_nest_cancel(skb, rxsa_list); 2517 nla_nest_cancel(skb, rxsc_nest); 2518 nla_nest_cancel(skb, rxsc_list); 2519 goto nla_put_failure; 2520 } 2521 2522 attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS); 2523 if (!attr) { 2524 nla_nest_cancel(skb, rxsa_list); 2525 nla_nest_cancel(skb, rxsc_nest); 2526 nla_nest_cancel(skb, rxsc_list); 2527 goto nla_put_failure; 2528 } 2529 if (copy_rx_sa_stats(skb, rx_sa->stats)) { 2530 nla_nest_cancel(skb, attr); 2531 nla_nest_cancel(skb, rxsa_list); 2532 nla_nest_cancel(skb, rxsc_nest); 2533 nla_nest_cancel(skb, rxsc_list); 2534 goto nla_put_failure; 2535 } 2536 nla_nest_end(skb, attr); 2537 2538 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2539 nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) || 2540 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || 2541 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { 2542 nla_nest_cancel(skb, rxsa_nest); 2543 nla_nest_cancel(skb, rxsc_nest); 2544 nla_nest_cancel(skb, rxsc_list); 2545 goto nla_put_failure; 2546 } 2547 nla_nest_end(skb, rxsa_nest); 2548 } 2549 2550 nla_nest_end(skb, rxsa_list); 2551 nla_nest_end(skb, rxsc_nest); 2552 } 2553 2554 nla_nest_end(skb, rxsc_list); 2555 2556 genlmsg_end(skb, hdr); 2557 2558 return 0; 2559 2560 nla_put_failure: 2561 genlmsg_cancel(skb, hdr); 2562 return -EMSGSIZE; 2563 } 2564 2565 static int macsec_generation = 1; /* protected by RTNL */ 2566 2567 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 2568 { 2569 struct net *net = sock_net(skb->sk); 2570 struct net_device *dev; 2571 int dev_idx, d; 2572 2573 dev_idx = cb->args[0]; 2574 2575 d = 0; 2576 rtnl_lock(); 2577 2578 cb->seq = macsec_generation; 2579 2580 for_each_netdev(net, dev) { 2581 struct macsec_secy *secy; 2582 2583 if (d < dev_idx) 2584 goto next; 2585 2586 if (!netif_is_macsec(dev)) 2587 goto next; 2588 2589 secy = &macsec_priv(dev)->secy; 2590 if (dump_secy(secy, dev, skb, cb) < 0) 2591 goto done; 2592 next: 2593 d++; 2594 } 2595 2596 done: 2597 rtnl_unlock(); 2598 cb->args[0] = d; 2599 return skb->len; 2600 } 2601 2602 static const struct genl_ops macsec_genl_ops[] = { 2603 { 2604 .cmd = MACSEC_CMD_GET_TXSC, 2605 .dumpit = macsec_dump_txsc, 2606 .policy = macsec_genl_policy, 2607 }, 2608 { 2609 .cmd = MACSEC_CMD_ADD_RXSC, 2610 .doit = macsec_add_rxsc, 2611 .policy = macsec_genl_policy, 2612 .flags = GENL_ADMIN_PERM, 2613 }, 2614 { 2615 .cmd = MACSEC_CMD_DEL_RXSC, 2616 .doit = macsec_del_rxsc, 2617 .policy = macsec_genl_policy, 2618 .flags = GENL_ADMIN_PERM, 2619 }, 2620 { 2621 .cmd = MACSEC_CMD_UPD_RXSC, 2622 .doit = macsec_upd_rxsc, 2623 .policy = macsec_genl_policy, 2624 .flags = GENL_ADMIN_PERM, 2625 }, 2626 { 2627 .cmd = MACSEC_CMD_ADD_TXSA, 2628 .doit = macsec_add_txsa, 2629 .policy = macsec_genl_policy, 2630 .flags = GENL_ADMIN_PERM, 2631 }, 2632 { 2633 .cmd = MACSEC_CMD_DEL_TXSA, 2634 .doit = macsec_del_txsa, 2635 .policy = macsec_genl_policy, 2636 .flags = GENL_ADMIN_PERM, 2637 }, 2638 { 2639 .cmd = MACSEC_CMD_UPD_TXSA, 2640 .doit = macsec_upd_txsa, 2641 .policy = macsec_genl_policy, 2642 .flags = GENL_ADMIN_PERM, 2643 }, 2644 { 2645 .cmd = MACSEC_CMD_ADD_RXSA, 2646 .doit = macsec_add_rxsa, 2647 .policy = macsec_genl_policy, 2648 .flags = GENL_ADMIN_PERM, 2649 }, 2650 { 2651 .cmd = MACSEC_CMD_DEL_RXSA, 2652 .doit = macsec_del_rxsa, 2653 .policy = macsec_genl_policy, 2654 .flags = GENL_ADMIN_PERM, 2655 }, 2656 { 2657 .cmd = MACSEC_CMD_UPD_RXSA, 2658 .doit = macsec_upd_rxsa, 2659 .policy = macsec_genl_policy, 2660 .flags = GENL_ADMIN_PERM, 2661 }, 2662 }; 2663 2664 static struct genl_family macsec_fam __ro_after_init = { 2665 .name = MACSEC_GENL_NAME, 2666 .hdrsize = 0, 2667 .version = MACSEC_GENL_VERSION, 2668 .maxattr = MACSEC_ATTR_MAX, 2669 .netnsok = true, 2670 .module = THIS_MODULE, 2671 .ops = macsec_genl_ops, 2672 .n_ops = ARRAY_SIZE(macsec_genl_ops), 2673 }; 2674 2675 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, 2676 struct net_device *dev) 2677 { 2678 struct macsec_dev *macsec = netdev_priv(dev); 2679 struct macsec_secy *secy = &macsec->secy; 2680 struct pcpu_secy_stats *secy_stats; 2681 int ret, len; 2682 2683 /* 10.5 */ 2684 if (!secy->protect_frames) { 2685 secy_stats = this_cpu_ptr(macsec->stats); 2686 u64_stats_update_begin(&secy_stats->syncp); 2687 secy_stats->stats.OutPktsUntagged++; 2688 u64_stats_update_end(&secy_stats->syncp); 2689 skb->dev = macsec->real_dev; 2690 len = skb->len; 2691 ret = dev_queue_xmit(skb); 2692 count_tx(dev, ret, len); 2693 return ret; 2694 } 2695 2696 if (!secy->operational) { 2697 kfree_skb(skb); 2698 dev->stats.tx_dropped++; 2699 return NETDEV_TX_OK; 2700 } 2701 2702 skb = macsec_encrypt(skb, dev); 2703 if (IS_ERR(skb)) { 2704 if (PTR_ERR(skb) != -EINPROGRESS) 2705 dev->stats.tx_dropped++; 2706 return NETDEV_TX_OK; 2707 } 2708 2709 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 2710 2711 macsec_encrypt_finish(skb, dev); 2712 len = skb->len; 2713 ret = dev_queue_xmit(skb); 2714 count_tx(dev, ret, len); 2715 return ret; 2716 } 2717 2718 #define MACSEC_FEATURES \ 2719 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 2720 static struct lock_class_key macsec_netdev_addr_lock_key; 2721 2722 static int macsec_dev_init(struct net_device *dev) 2723 { 2724 struct macsec_dev *macsec = macsec_priv(dev); 2725 struct net_device *real_dev = macsec->real_dev; 2726 int err; 2727 2728 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 2729 if (!dev->tstats) 2730 return -ENOMEM; 2731 2732 err = gro_cells_init(&macsec->gro_cells, dev); 2733 if (err) { 2734 free_percpu(dev->tstats); 2735 return err; 2736 } 2737 2738 dev->features = real_dev->features & MACSEC_FEATURES; 2739 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; 2740 2741 dev->needed_headroom = real_dev->needed_headroom + 2742 MACSEC_NEEDED_HEADROOM; 2743 dev->needed_tailroom = real_dev->needed_tailroom + 2744 MACSEC_NEEDED_TAILROOM; 2745 2746 if (is_zero_ether_addr(dev->dev_addr)) 2747 eth_hw_addr_inherit(dev, real_dev); 2748 if (is_zero_ether_addr(dev->broadcast)) 2749 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 2750 2751 return 0; 2752 } 2753 2754 static void macsec_dev_uninit(struct net_device *dev) 2755 { 2756 struct macsec_dev *macsec = macsec_priv(dev); 2757 2758 gro_cells_destroy(&macsec->gro_cells); 2759 free_percpu(dev->tstats); 2760 } 2761 2762 static netdev_features_t macsec_fix_features(struct net_device *dev, 2763 netdev_features_t features) 2764 { 2765 struct macsec_dev *macsec = macsec_priv(dev); 2766 struct net_device *real_dev = macsec->real_dev; 2767 2768 features &= (real_dev->features & MACSEC_FEATURES) | 2769 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; 2770 features |= NETIF_F_LLTX; 2771 2772 return features; 2773 } 2774 2775 static int macsec_dev_open(struct net_device *dev) 2776 { 2777 struct macsec_dev *macsec = macsec_priv(dev); 2778 struct net_device *real_dev = macsec->real_dev; 2779 int err; 2780 2781 if (!(real_dev->flags & IFF_UP)) 2782 return -ENETDOWN; 2783 2784 err = dev_uc_add(real_dev, dev->dev_addr); 2785 if (err < 0) 2786 return err; 2787 2788 if (dev->flags & IFF_ALLMULTI) { 2789 err = dev_set_allmulti(real_dev, 1); 2790 if (err < 0) 2791 goto del_unicast; 2792 } 2793 2794 if (dev->flags & IFF_PROMISC) { 2795 err = dev_set_promiscuity(real_dev, 1); 2796 if (err < 0) 2797 goto clear_allmulti; 2798 } 2799 2800 if (netif_carrier_ok(real_dev)) 2801 netif_carrier_on(dev); 2802 2803 return 0; 2804 clear_allmulti: 2805 if (dev->flags & IFF_ALLMULTI) 2806 dev_set_allmulti(real_dev, -1); 2807 del_unicast: 2808 dev_uc_del(real_dev, dev->dev_addr); 2809 netif_carrier_off(dev); 2810 return err; 2811 } 2812 2813 static int macsec_dev_stop(struct net_device *dev) 2814 { 2815 struct macsec_dev *macsec = macsec_priv(dev); 2816 struct net_device *real_dev = macsec->real_dev; 2817 2818 netif_carrier_off(dev); 2819 2820 dev_mc_unsync(real_dev, dev); 2821 dev_uc_unsync(real_dev, dev); 2822 2823 if (dev->flags & IFF_ALLMULTI) 2824 dev_set_allmulti(real_dev, -1); 2825 2826 if (dev->flags & IFF_PROMISC) 2827 dev_set_promiscuity(real_dev, -1); 2828 2829 dev_uc_del(real_dev, dev->dev_addr); 2830 2831 return 0; 2832 } 2833 2834 static void macsec_dev_change_rx_flags(struct net_device *dev, int change) 2835 { 2836 struct net_device *real_dev = macsec_priv(dev)->real_dev; 2837 2838 if (!(dev->flags & IFF_UP)) 2839 return; 2840 2841 if (change & IFF_ALLMULTI) 2842 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 2843 2844 if (change & IFF_PROMISC) 2845 dev_set_promiscuity(real_dev, 2846 dev->flags & IFF_PROMISC ? 1 : -1); 2847 } 2848 2849 static void macsec_dev_set_rx_mode(struct net_device *dev) 2850 { 2851 struct net_device *real_dev = macsec_priv(dev)->real_dev; 2852 2853 dev_mc_sync(real_dev, dev); 2854 dev_uc_sync(real_dev, dev); 2855 } 2856 2857 static int macsec_set_mac_address(struct net_device *dev, void *p) 2858 { 2859 struct macsec_dev *macsec = macsec_priv(dev); 2860 struct net_device *real_dev = macsec->real_dev; 2861 struct sockaddr *addr = p; 2862 int err; 2863 2864 if (!is_valid_ether_addr(addr->sa_data)) 2865 return -EADDRNOTAVAIL; 2866 2867 if (!(dev->flags & IFF_UP)) 2868 goto out; 2869 2870 err = dev_uc_add(real_dev, addr->sa_data); 2871 if (err < 0) 2872 return err; 2873 2874 dev_uc_del(real_dev, dev->dev_addr); 2875 2876 out: 2877 ether_addr_copy(dev->dev_addr, addr->sa_data); 2878 return 0; 2879 } 2880 2881 static int macsec_change_mtu(struct net_device *dev, int new_mtu) 2882 { 2883 struct macsec_dev *macsec = macsec_priv(dev); 2884 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); 2885 2886 if (macsec->real_dev->mtu - extra < new_mtu) 2887 return -ERANGE; 2888 2889 dev->mtu = new_mtu; 2890 2891 return 0; 2892 } 2893 2894 static void macsec_get_stats64(struct net_device *dev, 2895 struct rtnl_link_stats64 *s) 2896 { 2897 int cpu; 2898 2899 if (!dev->tstats) 2900 return; 2901 2902 for_each_possible_cpu(cpu) { 2903 struct pcpu_sw_netstats *stats; 2904 struct pcpu_sw_netstats tmp; 2905 int start; 2906 2907 stats = per_cpu_ptr(dev->tstats, cpu); 2908 do { 2909 start = u64_stats_fetch_begin_irq(&stats->syncp); 2910 tmp.rx_packets = stats->rx_packets; 2911 tmp.rx_bytes = stats->rx_bytes; 2912 tmp.tx_packets = stats->tx_packets; 2913 tmp.tx_bytes = stats->tx_bytes; 2914 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2915 2916 s->rx_packets += tmp.rx_packets; 2917 s->rx_bytes += tmp.rx_bytes; 2918 s->tx_packets += tmp.tx_packets; 2919 s->tx_bytes += tmp.tx_bytes; 2920 } 2921 2922 s->rx_dropped = dev->stats.rx_dropped; 2923 s->tx_dropped = dev->stats.tx_dropped; 2924 } 2925 2926 static int macsec_get_iflink(const struct net_device *dev) 2927 { 2928 return macsec_priv(dev)->real_dev->ifindex; 2929 } 2930 2931 2932 static int macsec_get_nest_level(struct net_device *dev) 2933 { 2934 return macsec_priv(dev)->nest_level; 2935 } 2936 2937 2938 static const struct net_device_ops macsec_netdev_ops = { 2939 .ndo_init = macsec_dev_init, 2940 .ndo_uninit = macsec_dev_uninit, 2941 .ndo_open = macsec_dev_open, 2942 .ndo_stop = macsec_dev_stop, 2943 .ndo_fix_features = macsec_fix_features, 2944 .ndo_change_mtu = macsec_change_mtu, 2945 .ndo_set_rx_mode = macsec_dev_set_rx_mode, 2946 .ndo_change_rx_flags = macsec_dev_change_rx_flags, 2947 .ndo_set_mac_address = macsec_set_mac_address, 2948 .ndo_start_xmit = macsec_start_xmit, 2949 .ndo_get_stats64 = macsec_get_stats64, 2950 .ndo_get_iflink = macsec_get_iflink, 2951 .ndo_get_lock_subclass = macsec_get_nest_level, 2952 }; 2953 2954 static const struct device_type macsec_type = { 2955 .name = "macsec", 2956 }; 2957 2958 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { 2959 [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, 2960 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, 2961 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, 2962 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, 2963 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, 2964 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, 2965 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, 2966 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, 2967 [IFLA_MACSEC_ES] = { .type = NLA_U8 }, 2968 [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, 2969 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, 2970 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, 2971 }; 2972 2973 static void macsec_free_netdev(struct net_device *dev) 2974 { 2975 struct macsec_dev *macsec = macsec_priv(dev); 2976 struct net_device *real_dev = macsec->real_dev; 2977 2978 free_percpu(macsec->stats); 2979 free_percpu(macsec->secy.tx_sc.stats); 2980 2981 dev_put(real_dev); 2982 free_netdev(dev); 2983 } 2984 2985 static void macsec_setup(struct net_device *dev) 2986 { 2987 ether_setup(dev); 2988 dev->min_mtu = 0; 2989 dev->max_mtu = ETH_MAX_MTU; 2990 dev->priv_flags |= IFF_NO_QUEUE; 2991 dev->netdev_ops = &macsec_netdev_ops; 2992 dev->destructor = macsec_free_netdev; 2993 SET_NETDEV_DEVTYPE(dev, &macsec_type); 2994 2995 eth_zero_addr(dev->broadcast); 2996 } 2997 2998 static void macsec_changelink_common(struct net_device *dev, 2999 struct nlattr *data[]) 3000 { 3001 struct macsec_secy *secy; 3002 struct macsec_tx_sc *tx_sc; 3003 3004 secy = &macsec_priv(dev)->secy; 3005 tx_sc = &secy->tx_sc; 3006 3007 if (data[IFLA_MACSEC_ENCODING_SA]) { 3008 struct macsec_tx_sa *tx_sa; 3009 3010 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]); 3011 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]); 3012 3013 secy->operational = tx_sa && tx_sa->active; 3014 } 3015 3016 if (data[IFLA_MACSEC_WINDOW]) 3017 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); 3018 3019 if (data[IFLA_MACSEC_ENCRYPT]) 3020 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); 3021 3022 if (data[IFLA_MACSEC_PROTECT]) 3023 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]); 3024 3025 if (data[IFLA_MACSEC_INC_SCI]) 3026 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 3027 3028 if (data[IFLA_MACSEC_ES]) 3029 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]); 3030 3031 if (data[IFLA_MACSEC_SCB]) 3032 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]); 3033 3034 if (data[IFLA_MACSEC_REPLAY_PROTECT]) 3035 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]); 3036 3037 if (data[IFLA_MACSEC_VALIDATION]) 3038 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); 3039 } 3040 3041 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], 3042 struct nlattr *data[]) 3043 { 3044 if (!data) 3045 return 0; 3046 3047 if (data[IFLA_MACSEC_CIPHER_SUITE] || 3048 data[IFLA_MACSEC_ICV_LEN] || 3049 data[IFLA_MACSEC_SCI] || 3050 data[IFLA_MACSEC_PORT]) 3051 return -EINVAL; 3052 3053 macsec_changelink_common(dev, data); 3054 3055 return 0; 3056 } 3057 3058 static void macsec_del_dev(struct macsec_dev *macsec) 3059 { 3060 int i; 3061 3062 while (macsec->secy.rx_sc) { 3063 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); 3064 3065 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); 3066 free_rx_sc(rx_sc); 3067 } 3068 3069 for (i = 0; i < MACSEC_NUM_AN; i++) { 3070 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); 3071 3072 if (sa) { 3073 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); 3074 clear_tx_sa(sa); 3075 } 3076 } 3077 } 3078 3079 static void macsec_common_dellink(struct net_device *dev, struct list_head *head) 3080 { 3081 struct macsec_dev *macsec = macsec_priv(dev); 3082 struct net_device *real_dev = macsec->real_dev; 3083 3084 unregister_netdevice_queue(dev, head); 3085 list_del_rcu(&macsec->secys); 3086 macsec_del_dev(macsec); 3087 netdev_upper_dev_unlink(real_dev, dev); 3088 3089 macsec_generation++; 3090 } 3091 3092 static void macsec_dellink(struct net_device *dev, struct list_head *head) 3093 { 3094 struct macsec_dev *macsec = macsec_priv(dev); 3095 struct net_device *real_dev = macsec->real_dev; 3096 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3097 3098 macsec_common_dellink(dev, head); 3099 3100 if (list_empty(&rxd->secys)) { 3101 netdev_rx_handler_unregister(real_dev); 3102 kfree(rxd); 3103 } 3104 } 3105 3106 static int register_macsec_dev(struct net_device *real_dev, 3107 struct net_device *dev) 3108 { 3109 struct macsec_dev *macsec = macsec_priv(dev); 3110 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3111 3112 if (!rxd) { 3113 int err; 3114 3115 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL); 3116 if (!rxd) 3117 return -ENOMEM; 3118 3119 INIT_LIST_HEAD(&rxd->secys); 3120 3121 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 3122 rxd); 3123 if (err < 0) { 3124 kfree(rxd); 3125 return err; 3126 } 3127 } 3128 3129 list_add_tail_rcu(&macsec->secys, &rxd->secys); 3130 return 0; 3131 } 3132 3133 static bool sci_exists(struct net_device *dev, sci_t sci) 3134 { 3135 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev); 3136 struct macsec_dev *macsec; 3137 3138 list_for_each_entry(macsec, &rxd->secys, secys) { 3139 if (macsec->secy.sci == sci) 3140 return true; 3141 } 3142 3143 return false; 3144 } 3145 3146 static sci_t dev_to_sci(struct net_device *dev, __be16 port) 3147 { 3148 return make_sci(dev->dev_addr, port); 3149 } 3150 3151 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) 3152 { 3153 struct macsec_dev *macsec = macsec_priv(dev); 3154 struct macsec_secy *secy = &macsec->secy; 3155 3156 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); 3157 if (!macsec->stats) 3158 return -ENOMEM; 3159 3160 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); 3161 if (!secy->tx_sc.stats) { 3162 free_percpu(macsec->stats); 3163 return -ENOMEM; 3164 } 3165 3166 if (sci == MACSEC_UNDEF_SCI) 3167 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3168 3169 secy->netdev = dev; 3170 secy->operational = true; 3171 secy->key_len = DEFAULT_SAK_LEN; 3172 secy->icv_len = icv_len; 3173 secy->validate_frames = MACSEC_VALIDATE_DEFAULT; 3174 secy->protect_frames = true; 3175 secy->replay_protect = false; 3176 3177 secy->sci = sci; 3178 secy->tx_sc.active = true; 3179 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; 3180 secy->tx_sc.encrypt = DEFAULT_ENCRYPT; 3181 secy->tx_sc.send_sci = DEFAULT_SEND_SCI; 3182 secy->tx_sc.end_station = false; 3183 secy->tx_sc.scb = false; 3184 3185 return 0; 3186 } 3187 3188 static int macsec_newlink(struct net *net, struct net_device *dev, 3189 struct nlattr *tb[], struct nlattr *data[]) 3190 { 3191 struct macsec_dev *macsec = macsec_priv(dev); 3192 struct net_device *real_dev; 3193 int err; 3194 sci_t sci; 3195 u8 icv_len = DEFAULT_ICV_LEN; 3196 rx_handler_func_t *rx_handler; 3197 3198 if (!tb[IFLA_LINK]) 3199 return -EINVAL; 3200 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); 3201 if (!real_dev) 3202 return -ENODEV; 3203 3204 dev->priv_flags |= IFF_MACSEC; 3205 3206 macsec->real_dev = real_dev; 3207 3208 if (data && data[IFLA_MACSEC_ICV_LEN]) 3209 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3210 dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true); 3211 3212 rx_handler = rtnl_dereference(real_dev->rx_handler); 3213 if (rx_handler && rx_handler != macsec_handle_frame) 3214 return -EBUSY; 3215 3216 err = register_netdevice(dev); 3217 if (err < 0) 3218 return err; 3219 3220 dev_hold(real_dev); 3221 3222 macsec->nest_level = dev_get_nest_level(real_dev) + 1; 3223 netdev_lockdep_set_classes(dev); 3224 lockdep_set_class_and_subclass(&dev->addr_list_lock, 3225 &macsec_netdev_addr_lock_key, 3226 macsec_get_nest_level(dev)); 3227 3228 err = netdev_upper_dev_link(real_dev, dev); 3229 if (err < 0) 3230 goto unregister; 3231 3232 /* need to be already registered so that ->init has run and 3233 * the MAC addr is set 3234 */ 3235 if (data && data[IFLA_MACSEC_SCI]) 3236 sci = nla_get_sci(data[IFLA_MACSEC_SCI]); 3237 else if (data && data[IFLA_MACSEC_PORT]) 3238 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT])); 3239 else 3240 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3241 3242 if (rx_handler && sci_exists(real_dev, sci)) { 3243 err = -EBUSY; 3244 goto unlink; 3245 } 3246 3247 err = macsec_add_dev(dev, sci, icv_len); 3248 if (err) 3249 goto unlink; 3250 3251 if (data) 3252 macsec_changelink_common(dev, data); 3253 3254 err = register_macsec_dev(real_dev, dev); 3255 if (err < 0) 3256 goto del_dev; 3257 3258 macsec_generation++; 3259 3260 return 0; 3261 3262 del_dev: 3263 macsec_del_dev(macsec); 3264 unlink: 3265 netdev_upper_dev_unlink(real_dev, dev); 3266 unregister: 3267 unregister_netdevice(dev); 3268 return err; 3269 } 3270 3271 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[]) 3272 { 3273 u64 csid = MACSEC_DEFAULT_CIPHER_ID; 3274 u8 icv_len = DEFAULT_ICV_LEN; 3275 int flag; 3276 bool es, scb, sci; 3277 3278 if (!data) 3279 return 0; 3280 3281 if (data[IFLA_MACSEC_CIPHER_SUITE]) 3282 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); 3283 3284 if (data[IFLA_MACSEC_ICV_LEN]) { 3285 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3286 if (icv_len != DEFAULT_ICV_LEN) { 3287 char dummy_key[DEFAULT_SAK_LEN] = { 0 }; 3288 struct crypto_aead *dummy_tfm; 3289 3290 dummy_tfm = macsec_alloc_tfm(dummy_key, 3291 DEFAULT_SAK_LEN, 3292 icv_len); 3293 if (IS_ERR(dummy_tfm)) 3294 return PTR_ERR(dummy_tfm); 3295 crypto_free_aead(dummy_tfm); 3296 } 3297 } 3298 3299 switch (csid) { 3300 case MACSEC_DEFAULT_CIPHER_ID: 3301 case MACSEC_DEFAULT_CIPHER_ALT: 3302 if (icv_len < MACSEC_MIN_ICV_LEN || 3303 icv_len > MACSEC_STD_ICV_LEN) 3304 return -EINVAL; 3305 break; 3306 default: 3307 return -EINVAL; 3308 } 3309 3310 if (data[IFLA_MACSEC_ENCODING_SA]) { 3311 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) 3312 return -EINVAL; 3313 } 3314 3315 for (flag = IFLA_MACSEC_ENCODING_SA + 1; 3316 flag < IFLA_MACSEC_VALIDATION; 3317 flag++) { 3318 if (data[flag]) { 3319 if (nla_get_u8(data[flag]) > 1) 3320 return -EINVAL; 3321 } 3322 } 3323 3324 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; 3325 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; 3326 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; 3327 3328 if ((sci && (scb || es)) || (scb && es)) 3329 return -EINVAL; 3330 3331 if (data[IFLA_MACSEC_VALIDATION] && 3332 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 3333 return -EINVAL; 3334 3335 if ((data[IFLA_MACSEC_REPLAY_PROTECT] && 3336 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && 3337 !data[IFLA_MACSEC_WINDOW]) 3338 return -EINVAL; 3339 3340 return 0; 3341 } 3342 3343 static struct net *macsec_get_link_net(const struct net_device *dev) 3344 { 3345 return dev_net(macsec_priv(dev)->real_dev); 3346 } 3347 3348 static size_t macsec_get_size(const struct net_device *dev) 3349 { 3350 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */ 3351 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */ 3352 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */ 3353 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */ 3354 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */ 3355 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */ 3356 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */ 3357 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */ 3358 nla_total_size(1) + /* IFLA_MACSEC_ES */ 3359 nla_total_size(1) + /* IFLA_MACSEC_SCB */ 3360 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */ 3361 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */ 3362 0; 3363 } 3364 3365 static int macsec_fill_info(struct sk_buff *skb, 3366 const struct net_device *dev) 3367 { 3368 struct macsec_secy *secy = &macsec_priv(dev)->secy; 3369 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 3370 3371 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, 3372 IFLA_MACSEC_PAD) || 3373 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 3374 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, 3375 MACSEC_DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) || 3376 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 3377 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 3378 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 3379 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) || 3380 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) || 3381 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || 3382 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || 3383 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || 3384 0) 3385 goto nla_put_failure; 3386 3387 if (secy->replay_protect) { 3388 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window)) 3389 goto nla_put_failure; 3390 } 3391 3392 return 0; 3393 3394 nla_put_failure: 3395 return -EMSGSIZE; 3396 } 3397 3398 static struct rtnl_link_ops macsec_link_ops __read_mostly = { 3399 .kind = "macsec", 3400 .priv_size = sizeof(struct macsec_dev), 3401 .maxtype = IFLA_MACSEC_MAX, 3402 .policy = macsec_rtnl_policy, 3403 .setup = macsec_setup, 3404 .validate = macsec_validate_attr, 3405 .newlink = macsec_newlink, 3406 .changelink = macsec_changelink, 3407 .dellink = macsec_dellink, 3408 .get_size = macsec_get_size, 3409 .fill_info = macsec_fill_info, 3410 .get_link_net = macsec_get_link_net, 3411 }; 3412 3413 static bool is_macsec_master(struct net_device *dev) 3414 { 3415 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame; 3416 } 3417 3418 static int macsec_notify(struct notifier_block *this, unsigned long event, 3419 void *ptr) 3420 { 3421 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); 3422 LIST_HEAD(head); 3423 3424 if (!is_macsec_master(real_dev)) 3425 return NOTIFY_DONE; 3426 3427 switch (event) { 3428 case NETDEV_UNREGISTER: { 3429 struct macsec_dev *m, *n; 3430 struct macsec_rxh_data *rxd; 3431 3432 rxd = macsec_data_rtnl(real_dev); 3433 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 3434 macsec_common_dellink(m->secy.netdev, &head); 3435 } 3436 3437 netdev_rx_handler_unregister(real_dev); 3438 kfree(rxd); 3439 3440 unregister_netdevice_many(&head); 3441 break; 3442 } 3443 case NETDEV_CHANGEMTU: { 3444 struct macsec_dev *m; 3445 struct macsec_rxh_data *rxd; 3446 3447 rxd = macsec_data_rtnl(real_dev); 3448 list_for_each_entry(m, &rxd->secys, secys) { 3449 struct net_device *dev = m->secy.netdev; 3450 unsigned int mtu = real_dev->mtu - (m->secy.icv_len + 3451 macsec_extra_len(true)); 3452 3453 if (dev->mtu > mtu) 3454 dev_set_mtu(dev, mtu); 3455 } 3456 } 3457 } 3458 3459 return NOTIFY_OK; 3460 } 3461 3462 static struct notifier_block macsec_notifier = { 3463 .notifier_call = macsec_notify, 3464 }; 3465 3466 static int __init macsec_init(void) 3467 { 3468 int err; 3469 3470 pr_info("MACsec IEEE 802.1AE\n"); 3471 err = register_netdevice_notifier(&macsec_notifier); 3472 if (err) 3473 return err; 3474 3475 err = rtnl_link_register(&macsec_link_ops); 3476 if (err) 3477 goto notifier; 3478 3479 err = genl_register_family(&macsec_fam); 3480 if (err) 3481 goto rtnl; 3482 3483 return 0; 3484 3485 rtnl: 3486 rtnl_link_unregister(&macsec_link_ops); 3487 notifier: 3488 unregister_netdevice_notifier(&macsec_notifier); 3489 return err; 3490 } 3491 3492 static void __exit macsec_exit(void) 3493 { 3494 genl_unregister_family(&macsec_fam); 3495 rtnl_link_unregister(&macsec_link_ops); 3496 unregister_netdevice_notifier(&macsec_notifier); 3497 rcu_barrier(); 3498 } 3499 3500 module_init(macsec_init); 3501 module_exit(macsec_exit); 3502 3503 MODULE_ALIAS_RTNL_LINK("macsec"); 3504 3505 MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); 3506 MODULE_LICENSE("GPL v2"); 3507