1 /* 2 * drivers/net/macsec.c - MACsec device 3 * 4 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 */ 11 12 #include <linux/types.h> 13 #include <linux/skbuff.h> 14 #include <linux/socket.h> 15 #include <linux/module.h> 16 #include <crypto/aead.h> 17 #include <linux/etherdevice.h> 18 #include <linux/rtnetlink.h> 19 #include <net/genetlink.h> 20 #include <net/sock.h> 21 22 #include <uapi/linux/if_macsec.h> 23 24 typedef u64 __bitwise sci_t; 25 26 #define MACSEC_SCI_LEN 8 27 28 /* SecTAG length = macsec_eth_header without the optional SCI */ 29 #define MACSEC_TAG_LEN 6 30 31 struct macsec_eth_header { 32 struct ethhdr eth; 33 /* SecTAG */ 34 u8 tci_an; 35 #if defined(__LITTLE_ENDIAN_BITFIELD) 36 u8 short_length:6, 37 unused:2; 38 #elif defined(__BIG_ENDIAN_BITFIELD) 39 u8 unused:2, 40 short_length:6; 41 #else 42 #error "Please fix <asm/byteorder.h>" 43 #endif 44 __be32 packet_number; 45 u8 secure_channel_id[8]; /* optional */ 46 } __packed; 47 48 #define MACSEC_TCI_VERSION 0x80 49 #define MACSEC_TCI_ES 0x40 /* end station */ 50 #define MACSEC_TCI_SC 0x20 /* SCI present */ 51 #define MACSEC_TCI_SCB 0x10 /* epon */ 52 #define MACSEC_TCI_E 0x08 /* encryption */ 53 #define MACSEC_TCI_C 0x04 /* changed text */ 54 #define MACSEC_AN_MASK 0x03 /* association number */ 55 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C) 56 57 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ 58 #define MIN_NON_SHORT_LEN 48 59 60 #define GCM_AES_IV_LEN 12 61 #define DEFAULT_ICV_LEN 16 62 63 #define MACSEC_NUM_AN 4 /* 2 bits for the association number */ 64 65 #define for_each_rxsc(secy, sc) \ 66 for (sc = rcu_dereference_bh(secy->rx_sc); \ 67 sc; \ 68 sc = rcu_dereference_bh(sc->next)) 69 #define for_each_rxsc_rtnl(secy, sc) \ 70 for (sc = rtnl_dereference(secy->rx_sc); \ 71 sc; \ 72 sc = rtnl_dereference(sc->next)) 73 74 struct gcm_iv { 75 union { 76 u8 secure_channel_id[8]; 77 sci_t sci; 78 }; 79 __be32 pn; 80 }; 81 82 /** 83 * struct macsec_key - SA key 84 * @id: user-provided key identifier 85 * @tfm: crypto struct, key storage 86 */ 87 struct macsec_key { 88 u8 id[MACSEC_KEYID_LEN]; 89 struct crypto_aead *tfm; 90 }; 91 92 struct macsec_rx_sc_stats { 93 __u64 InOctetsValidated; 94 __u64 InOctetsDecrypted; 95 __u64 InPktsUnchecked; 96 __u64 InPktsDelayed; 97 __u64 InPktsOK; 98 __u64 InPktsInvalid; 99 __u64 InPktsLate; 100 __u64 InPktsNotValid; 101 __u64 InPktsNotUsingSA; 102 __u64 InPktsUnusedSA; 103 }; 104 105 struct macsec_rx_sa_stats { 106 __u32 InPktsOK; 107 __u32 InPktsInvalid; 108 __u32 InPktsNotValid; 109 __u32 InPktsNotUsingSA; 110 __u32 InPktsUnusedSA; 111 }; 112 113 struct macsec_tx_sa_stats { 114 __u32 OutPktsProtected; 115 __u32 OutPktsEncrypted; 116 }; 117 118 struct macsec_tx_sc_stats { 119 __u64 OutPktsProtected; 120 __u64 OutPktsEncrypted; 121 __u64 OutOctetsProtected; 122 __u64 OutOctetsEncrypted; 123 }; 124 125 struct macsec_dev_stats { 126 __u64 OutPktsUntagged; 127 __u64 InPktsUntagged; 128 __u64 OutPktsTooLong; 129 __u64 InPktsNoTag; 130 __u64 InPktsBadTag; 131 __u64 InPktsUnknownSCI; 132 __u64 InPktsNoSCI; 133 __u64 InPktsOverrun; 134 }; 135 136 /** 137 * struct macsec_rx_sa - receive secure association 138 * @active: 139 * @next_pn: packet number expected for the next packet 140 * @lock: protects next_pn manipulations 141 * @key: key structure 142 * @stats: per-SA stats 143 */ 144 struct macsec_rx_sa { 145 struct macsec_key key; 146 spinlock_t lock; 147 u32 next_pn; 148 atomic_t refcnt; 149 bool active; 150 struct macsec_rx_sa_stats __percpu *stats; 151 struct macsec_rx_sc *sc; 152 struct rcu_head rcu; 153 }; 154 155 struct pcpu_rx_sc_stats { 156 struct macsec_rx_sc_stats stats; 157 struct u64_stats_sync syncp; 158 }; 159 160 /** 161 * struct macsec_rx_sc - receive secure channel 162 * @sci: secure channel identifier for this SC 163 * @active: channel is active 164 * @sa: array of secure associations 165 * @stats: per-SC stats 166 */ 167 struct macsec_rx_sc { 168 struct macsec_rx_sc __rcu *next; 169 sci_t sci; 170 bool active; 171 struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN]; 172 struct pcpu_rx_sc_stats __percpu *stats; 173 atomic_t refcnt; 174 struct rcu_head rcu_head; 175 }; 176 177 /** 178 * struct macsec_tx_sa - transmit secure association 179 * @active: 180 * @next_pn: packet number to use for the next packet 181 * @lock: protects next_pn manipulations 182 * @key: key structure 183 * @stats: per-SA stats 184 */ 185 struct macsec_tx_sa { 186 struct macsec_key key; 187 spinlock_t lock; 188 u32 next_pn; 189 atomic_t refcnt; 190 bool active; 191 struct macsec_tx_sa_stats __percpu *stats; 192 struct rcu_head rcu; 193 }; 194 195 struct pcpu_tx_sc_stats { 196 struct macsec_tx_sc_stats stats; 197 struct u64_stats_sync syncp; 198 }; 199 200 /** 201 * struct macsec_tx_sc - transmit secure channel 202 * @active: 203 * @encoding_sa: association number of the SA currently in use 204 * @encrypt: encrypt packets on transmit, or authenticate only 205 * @send_sci: always include the SCI in the SecTAG 206 * @end_station: 207 * @scb: single copy broadcast flag 208 * @sa: array of secure associations 209 * @stats: stats for this TXSC 210 */ 211 struct macsec_tx_sc { 212 bool active; 213 u8 encoding_sa; 214 bool encrypt; 215 bool send_sci; 216 bool end_station; 217 bool scb; 218 struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN]; 219 struct pcpu_tx_sc_stats __percpu *stats; 220 }; 221 222 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT 223 224 /** 225 * struct macsec_secy - MACsec Security Entity 226 * @netdev: netdevice for this SecY 227 * @n_rx_sc: number of receive secure channels configured on this SecY 228 * @sci: secure channel identifier used for tx 229 * @key_len: length of keys used by the cipher suite 230 * @icv_len: length of ICV used by the cipher suite 231 * @validate_frames: validation mode 232 * @operational: MAC_Operational flag 233 * @protect_frames: enable protection for this SecY 234 * @replay_protect: enable packet number checks on receive 235 * @replay_window: size of the replay window 236 * @tx_sc: transmit secure channel 237 * @rx_sc: linked list of receive secure channels 238 */ 239 struct macsec_secy { 240 struct net_device *netdev; 241 unsigned int n_rx_sc; 242 sci_t sci; 243 u16 key_len; 244 u16 icv_len; 245 enum macsec_validation_type validate_frames; 246 bool operational; 247 bool protect_frames; 248 bool replay_protect; 249 u32 replay_window; 250 struct macsec_tx_sc tx_sc; 251 struct macsec_rx_sc __rcu *rx_sc; 252 }; 253 254 struct pcpu_secy_stats { 255 struct macsec_dev_stats stats; 256 struct u64_stats_sync syncp; 257 }; 258 259 /** 260 * struct macsec_dev - private data 261 * @secy: SecY config 262 * @real_dev: pointer to underlying netdevice 263 * @stats: MACsec device stats 264 * @secys: linked list of SecY's on the underlying device 265 */ 266 struct macsec_dev { 267 struct macsec_secy secy; 268 struct net_device *real_dev; 269 struct pcpu_secy_stats __percpu *stats; 270 struct list_head secys; 271 }; 272 273 /** 274 * struct macsec_rxh_data - rx_handler private argument 275 * @secys: linked list of SecY's on this underlying device 276 */ 277 struct macsec_rxh_data { 278 struct list_head secys; 279 }; 280 281 static struct macsec_dev *macsec_priv(const struct net_device *dev) 282 { 283 return (struct macsec_dev *)netdev_priv(dev); 284 } 285 286 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev) 287 { 288 return rcu_dereference_bh(dev->rx_handler_data); 289 } 290 291 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev) 292 { 293 return rtnl_dereference(dev->rx_handler_data); 294 } 295 296 struct macsec_cb { 297 struct aead_request *req; 298 union { 299 struct macsec_tx_sa *tx_sa; 300 struct macsec_rx_sa *rx_sa; 301 }; 302 u8 assoc_num; 303 bool valid; 304 bool has_sci; 305 }; 306 307 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) 308 { 309 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr); 310 311 if (!sa || !sa->active) 312 return NULL; 313 314 if (!atomic_inc_not_zero(&sa->refcnt)) 315 return NULL; 316 317 return sa; 318 } 319 320 static void free_rx_sc_rcu(struct rcu_head *head) 321 { 322 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); 323 324 free_percpu(rx_sc->stats); 325 kfree(rx_sc); 326 } 327 328 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) 329 { 330 return atomic_inc_not_zero(&sc->refcnt) ? sc : NULL; 331 } 332 333 static void macsec_rxsc_put(struct macsec_rx_sc *sc) 334 { 335 if (atomic_dec_and_test(&sc->refcnt)) 336 call_rcu(&sc->rcu_head, free_rx_sc_rcu); 337 } 338 339 static void free_rxsa(struct rcu_head *head) 340 { 341 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); 342 343 crypto_free_aead(sa->key.tfm); 344 free_percpu(sa->stats); 345 macsec_rxsc_put(sa->sc); 346 kfree(sa); 347 } 348 349 static void macsec_rxsa_put(struct macsec_rx_sa *sa) 350 { 351 if (atomic_dec_and_test(&sa->refcnt)) 352 call_rcu(&sa->rcu, free_rxsa); 353 } 354 355 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) 356 { 357 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr); 358 359 if (!sa || !sa->active) 360 return NULL; 361 362 if (!atomic_inc_not_zero(&sa->refcnt)) 363 return NULL; 364 365 return sa; 366 } 367 368 static void free_txsa(struct rcu_head *head) 369 { 370 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); 371 372 crypto_free_aead(sa->key.tfm); 373 free_percpu(sa->stats); 374 kfree(sa); 375 } 376 377 static void macsec_txsa_put(struct macsec_tx_sa *sa) 378 { 379 if (atomic_dec_and_test(&sa->refcnt)) 380 call_rcu(&sa->rcu, free_txsa); 381 } 382 383 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) 384 { 385 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb)); 386 return (struct macsec_cb *)skb->cb; 387 } 388 389 #define MACSEC_PORT_ES (htons(0x0001)) 390 #define MACSEC_PORT_SCB (0x0000) 391 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) 392 393 #define DEFAULT_SAK_LEN 16 394 #define DEFAULT_SEND_SCI true 395 #define DEFAULT_ENCRYPT false 396 #define DEFAULT_ENCODING_SA 0 397 398 static sci_t make_sci(u8 *addr, __be16 port) 399 { 400 sci_t sci; 401 402 memcpy(&sci, addr, ETH_ALEN); 403 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port)); 404 405 return sci; 406 } 407 408 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) 409 { 410 sci_t sci; 411 412 if (sci_present) 413 memcpy(&sci, hdr->secure_channel_id, 414 sizeof(hdr->secure_channel_id)); 415 else 416 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); 417 418 return sci; 419 } 420 421 static unsigned int macsec_sectag_len(bool sci_present) 422 { 423 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); 424 } 425 426 static unsigned int macsec_hdr_len(bool sci_present) 427 { 428 return macsec_sectag_len(sci_present) + ETH_HLEN; 429 } 430 431 static unsigned int macsec_extra_len(bool sci_present) 432 { 433 return macsec_sectag_len(sci_present) + sizeof(__be16); 434 } 435 436 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ 437 static void macsec_fill_sectag(struct macsec_eth_header *h, 438 const struct macsec_secy *secy, u32 pn) 439 { 440 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 441 442 memset(&h->tci_an, 0, macsec_sectag_len(tx_sc->send_sci)); 443 h->eth.h_proto = htons(ETH_P_MACSEC); 444 445 if (tx_sc->send_sci || 446 (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb)) { 447 h->tci_an |= MACSEC_TCI_SC; 448 memcpy(&h->secure_channel_id, &secy->sci, 449 sizeof(h->secure_channel_id)); 450 } else { 451 if (tx_sc->end_station) 452 h->tci_an |= MACSEC_TCI_ES; 453 if (tx_sc->scb) 454 h->tci_an |= MACSEC_TCI_SCB; 455 } 456 457 h->packet_number = htonl(pn); 458 459 /* with GCM, C/E clear for !encrypt, both set for encrypt */ 460 if (tx_sc->encrypt) 461 h->tci_an |= MACSEC_TCI_CONFID; 462 else if (secy->icv_len != DEFAULT_ICV_LEN) 463 h->tci_an |= MACSEC_TCI_C; 464 465 h->tci_an |= tx_sc->encoding_sa; 466 } 467 468 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) 469 { 470 if (data_len < MIN_NON_SHORT_LEN) 471 h->short_length = data_len; 472 } 473 474 /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */ 475 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len) 476 { 477 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; 478 int len = skb->len - 2 * ETH_ALEN; 479 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; 480 481 /* a) It comprises at least 17 octets */ 482 if (skb->len <= 16) 483 return false; 484 485 /* b) MACsec EtherType: already checked */ 486 487 /* c) V bit is clear */ 488 if (h->tci_an & MACSEC_TCI_VERSION) 489 return false; 490 491 /* d) ES or SCB => !SC */ 492 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && 493 (h->tci_an & MACSEC_TCI_SC)) 494 return false; 495 496 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */ 497 if (h->unused) 498 return false; 499 500 /* rx.pn != 0 (figure 10-5) */ 501 if (!h->packet_number) 502 return false; 503 504 /* length check, f) g) h) i) */ 505 if (h->short_length) 506 return len == extra_len + h->short_length; 507 return len >= extra_len + MIN_NON_SHORT_LEN; 508 } 509 510 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) 511 #define MACSEC_NEEDED_TAILROOM MACSEC_MAX_ICV_LEN 512 513 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) 514 { 515 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; 516 517 gcm_iv->sci = sci; 518 gcm_iv->pn = htonl(pn); 519 } 520 521 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) 522 { 523 return (struct macsec_eth_header *)skb_mac_header(skb); 524 } 525 526 static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy) 527 { 528 u32 pn; 529 530 spin_lock_bh(&tx_sa->lock); 531 pn = tx_sa->next_pn; 532 533 tx_sa->next_pn++; 534 if (tx_sa->next_pn == 0) { 535 pr_debug("PN wrapped, transitioning to !oper\n"); 536 tx_sa->active = false; 537 if (secy->protect_frames) 538 secy->operational = false; 539 } 540 spin_unlock_bh(&tx_sa->lock); 541 542 return pn; 543 } 544 545 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) 546 { 547 struct macsec_dev *macsec = netdev_priv(dev); 548 549 skb->dev = macsec->real_dev; 550 skb_reset_mac_header(skb); 551 skb->protocol = eth_hdr(skb)->h_proto; 552 } 553 554 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, 555 struct macsec_tx_sa *tx_sa) 556 { 557 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); 558 559 u64_stats_update_begin(&txsc_stats->syncp); 560 if (tx_sc->encrypt) { 561 txsc_stats->stats.OutOctetsEncrypted += skb->len; 562 txsc_stats->stats.OutPktsEncrypted++; 563 this_cpu_inc(tx_sa->stats->OutPktsEncrypted); 564 } else { 565 txsc_stats->stats.OutOctetsProtected += skb->len; 566 txsc_stats->stats.OutPktsProtected++; 567 this_cpu_inc(tx_sa->stats->OutPktsProtected); 568 } 569 u64_stats_update_end(&txsc_stats->syncp); 570 } 571 572 static void count_tx(struct net_device *dev, int ret, int len) 573 { 574 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 575 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 576 577 u64_stats_update_begin(&stats->syncp); 578 stats->tx_packets++; 579 stats->tx_bytes += len; 580 u64_stats_update_end(&stats->syncp); 581 } else { 582 dev->stats.tx_dropped++; 583 } 584 } 585 586 static void macsec_encrypt_done(struct crypto_async_request *base, int err) 587 { 588 struct sk_buff *skb = base->data; 589 struct net_device *dev = skb->dev; 590 struct macsec_dev *macsec = macsec_priv(dev); 591 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; 592 int len, ret; 593 594 aead_request_free(macsec_skb_cb(skb)->req); 595 596 rcu_read_lock_bh(); 597 macsec_encrypt_finish(skb, dev); 598 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 599 len = skb->len; 600 ret = dev_queue_xmit(skb); 601 count_tx(dev, ret, len); 602 rcu_read_unlock_bh(); 603 604 macsec_txsa_put(sa); 605 dev_put(dev); 606 } 607 608 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 609 unsigned char **iv, 610 struct scatterlist **sg) 611 { 612 size_t size, iv_offset, sg_offset; 613 struct aead_request *req; 614 void *tmp; 615 616 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm); 617 iv_offset = size; 618 size += GCM_AES_IV_LEN; 619 620 size = ALIGN(size, __alignof__(struct scatterlist)); 621 sg_offset = size; 622 size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1); 623 624 tmp = kmalloc(size, GFP_ATOMIC); 625 if (!tmp) 626 return NULL; 627 628 *iv = (unsigned char *)(tmp + iv_offset); 629 *sg = (struct scatterlist *)(tmp + sg_offset); 630 req = tmp; 631 632 aead_request_set_tfm(req, tfm); 633 634 return req; 635 } 636 637 static struct sk_buff *macsec_encrypt(struct sk_buff *skb, 638 struct net_device *dev) 639 { 640 int ret; 641 struct scatterlist *sg; 642 unsigned char *iv; 643 struct ethhdr *eth; 644 struct macsec_eth_header *hh; 645 size_t unprotected_len; 646 struct aead_request *req; 647 struct macsec_secy *secy; 648 struct macsec_tx_sc *tx_sc; 649 struct macsec_tx_sa *tx_sa; 650 struct macsec_dev *macsec = macsec_priv(dev); 651 u32 pn; 652 653 secy = &macsec->secy; 654 tx_sc = &secy->tx_sc; 655 656 /* 10.5.1 TX SA assignment */ 657 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]); 658 if (!tx_sa) { 659 secy->operational = false; 660 kfree_skb(skb); 661 return ERR_PTR(-EINVAL); 662 } 663 664 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || 665 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { 666 struct sk_buff *nskb = skb_copy_expand(skb, 667 MACSEC_NEEDED_HEADROOM, 668 MACSEC_NEEDED_TAILROOM, 669 GFP_ATOMIC); 670 if (likely(nskb)) { 671 consume_skb(skb); 672 skb = nskb; 673 } else { 674 macsec_txsa_put(tx_sa); 675 kfree_skb(skb); 676 return ERR_PTR(-ENOMEM); 677 } 678 } else { 679 skb = skb_unshare(skb, GFP_ATOMIC); 680 if (!skb) { 681 macsec_txsa_put(tx_sa); 682 return ERR_PTR(-ENOMEM); 683 } 684 } 685 686 unprotected_len = skb->len; 687 eth = eth_hdr(skb); 688 hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(tx_sc->send_sci)); 689 memmove(hh, eth, 2 * ETH_ALEN); 690 691 pn = tx_sa_update_pn(tx_sa, secy); 692 if (pn == 0) { 693 macsec_txsa_put(tx_sa); 694 kfree_skb(skb); 695 return ERR_PTR(-ENOLINK); 696 } 697 macsec_fill_sectag(hh, secy, pn); 698 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); 699 700 skb_put(skb, secy->icv_len); 701 702 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { 703 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 704 705 u64_stats_update_begin(&secy_stats->syncp); 706 secy_stats->stats.OutPktsTooLong++; 707 u64_stats_update_end(&secy_stats->syncp); 708 709 macsec_txsa_put(tx_sa); 710 kfree_skb(skb); 711 return ERR_PTR(-EINVAL); 712 } 713 714 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg); 715 if (!req) { 716 macsec_txsa_put(tx_sa); 717 kfree_skb(skb); 718 return ERR_PTR(-ENOMEM); 719 } 720 721 macsec_fill_iv(iv, secy->sci, pn); 722 723 sg_init_table(sg, MAX_SKB_FRAGS + 1); 724 skb_to_sgvec(skb, sg, 0, skb->len); 725 726 if (tx_sc->encrypt) { 727 int len = skb->len - macsec_hdr_len(tx_sc->send_sci) - 728 secy->icv_len; 729 aead_request_set_crypt(req, sg, sg, len, iv); 730 aead_request_set_ad(req, macsec_hdr_len(tx_sc->send_sci)); 731 } else { 732 aead_request_set_crypt(req, sg, sg, 0, iv); 733 aead_request_set_ad(req, skb->len - secy->icv_len); 734 } 735 736 macsec_skb_cb(skb)->req = req; 737 macsec_skb_cb(skb)->tx_sa = tx_sa; 738 aead_request_set_callback(req, 0, macsec_encrypt_done, skb); 739 740 dev_hold(skb->dev); 741 ret = crypto_aead_encrypt(req); 742 if (ret == -EINPROGRESS) { 743 return ERR_PTR(ret); 744 } else if (ret != 0) { 745 dev_put(skb->dev); 746 kfree_skb(skb); 747 aead_request_free(req); 748 macsec_txsa_put(tx_sa); 749 return ERR_PTR(-EINVAL); 750 } 751 752 dev_put(skb->dev); 753 aead_request_free(req); 754 macsec_txsa_put(tx_sa); 755 756 return skb; 757 } 758 759 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn) 760 { 761 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 762 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats); 763 struct macsec_eth_header *hdr = macsec_ethhdr(skb); 764 u32 lowest_pn = 0; 765 766 spin_lock(&rx_sa->lock); 767 if (rx_sa->next_pn >= secy->replay_window) 768 lowest_pn = rx_sa->next_pn - secy->replay_window; 769 770 /* Now perform replay protection check again 771 * (see IEEE 802.1AE-2006 figure 10-5) 772 */ 773 if (secy->replay_protect && pn < lowest_pn) { 774 spin_unlock(&rx_sa->lock); 775 u64_stats_update_begin(&rxsc_stats->syncp); 776 rxsc_stats->stats.InPktsLate++; 777 u64_stats_update_end(&rxsc_stats->syncp); 778 return false; 779 } 780 781 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { 782 u64_stats_update_begin(&rxsc_stats->syncp); 783 if (hdr->tci_an & MACSEC_TCI_E) 784 rxsc_stats->stats.InOctetsDecrypted += skb->len; 785 else 786 rxsc_stats->stats.InOctetsValidated += skb->len; 787 u64_stats_update_end(&rxsc_stats->syncp); 788 } 789 790 if (!macsec_skb_cb(skb)->valid) { 791 spin_unlock(&rx_sa->lock); 792 793 /* 10.6.5 */ 794 if (hdr->tci_an & MACSEC_TCI_C || 795 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 796 u64_stats_update_begin(&rxsc_stats->syncp); 797 rxsc_stats->stats.InPktsNotValid++; 798 u64_stats_update_end(&rxsc_stats->syncp); 799 return false; 800 } 801 802 u64_stats_update_begin(&rxsc_stats->syncp); 803 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) { 804 rxsc_stats->stats.InPktsInvalid++; 805 this_cpu_inc(rx_sa->stats->InPktsInvalid); 806 } else if (pn < lowest_pn) { 807 rxsc_stats->stats.InPktsDelayed++; 808 } else { 809 rxsc_stats->stats.InPktsUnchecked++; 810 } 811 u64_stats_update_end(&rxsc_stats->syncp); 812 } else { 813 u64_stats_update_begin(&rxsc_stats->syncp); 814 if (pn < lowest_pn) { 815 rxsc_stats->stats.InPktsDelayed++; 816 } else { 817 rxsc_stats->stats.InPktsOK++; 818 this_cpu_inc(rx_sa->stats->InPktsOK); 819 } 820 u64_stats_update_end(&rxsc_stats->syncp); 821 822 if (pn >= rx_sa->next_pn) 823 rx_sa->next_pn = pn + 1; 824 spin_unlock(&rx_sa->lock); 825 } 826 827 return true; 828 } 829 830 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) 831 { 832 skb->pkt_type = PACKET_HOST; 833 skb->protocol = eth_type_trans(skb, dev); 834 835 skb_reset_network_header(skb); 836 if (!skb_transport_header_was_set(skb)) 837 skb_reset_transport_header(skb); 838 skb_reset_mac_len(skb); 839 } 840 841 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) 842 { 843 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); 844 skb_pull(skb, hdr_len); 845 pskb_trim_unique(skb, skb->len - icv_len); 846 } 847 848 static void count_rx(struct net_device *dev, int len) 849 { 850 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 851 852 u64_stats_update_begin(&stats->syncp); 853 stats->rx_packets++; 854 stats->rx_bytes += len; 855 u64_stats_update_end(&stats->syncp); 856 } 857 858 static void macsec_decrypt_done(struct crypto_async_request *base, int err) 859 { 860 struct sk_buff *skb = base->data; 861 struct net_device *dev = skb->dev; 862 struct macsec_dev *macsec = macsec_priv(dev); 863 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 864 int len, ret; 865 u32 pn; 866 867 aead_request_free(macsec_skb_cb(skb)->req); 868 869 rcu_read_lock_bh(); 870 pn = ntohl(macsec_ethhdr(skb)->packet_number); 871 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { 872 rcu_read_unlock_bh(); 873 kfree_skb(skb); 874 goto out; 875 } 876 877 macsec_finalize_skb(skb, macsec->secy.icv_len, 878 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 879 macsec_reset_skb(skb, macsec->secy.netdev); 880 881 len = skb->len; 882 ret = netif_rx(skb); 883 if (ret == NET_RX_SUCCESS) 884 count_rx(dev, len); 885 else 886 macsec->secy.netdev->stats.rx_dropped++; 887 888 rcu_read_unlock_bh(); 889 890 out: 891 macsec_rxsa_put(rx_sa); 892 dev_put(dev); 893 } 894 895 static struct sk_buff *macsec_decrypt(struct sk_buff *skb, 896 struct net_device *dev, 897 struct macsec_rx_sa *rx_sa, 898 sci_t sci, 899 struct macsec_secy *secy) 900 { 901 int ret; 902 struct scatterlist *sg; 903 unsigned char *iv; 904 struct aead_request *req; 905 struct macsec_eth_header *hdr; 906 u16 icv_len = secy->icv_len; 907 908 macsec_skb_cb(skb)->valid = false; 909 skb = skb_share_check(skb, GFP_ATOMIC); 910 if (!skb) 911 return ERR_PTR(-ENOMEM); 912 913 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg); 914 if (!req) { 915 kfree_skb(skb); 916 return ERR_PTR(-ENOMEM); 917 } 918 919 hdr = (struct macsec_eth_header *)skb->data; 920 macsec_fill_iv(iv, sci, ntohl(hdr->packet_number)); 921 922 sg_init_table(sg, MAX_SKB_FRAGS + 1); 923 skb_to_sgvec(skb, sg, 0, skb->len); 924 925 if (hdr->tci_an & MACSEC_TCI_E) { 926 /* confidentiality: ethernet + macsec header 927 * authenticated, encrypted payload 928 */ 929 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci); 930 931 aead_request_set_crypt(req, sg, sg, len, iv); 932 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci)); 933 skb = skb_unshare(skb, GFP_ATOMIC); 934 if (!skb) { 935 aead_request_free(req); 936 return ERR_PTR(-ENOMEM); 937 } 938 } else { 939 /* integrity only: all headers + data authenticated */ 940 aead_request_set_crypt(req, sg, sg, icv_len, iv); 941 aead_request_set_ad(req, skb->len - icv_len); 942 } 943 944 macsec_skb_cb(skb)->req = req; 945 macsec_skb_cb(skb)->rx_sa = rx_sa; 946 skb->dev = dev; 947 aead_request_set_callback(req, 0, macsec_decrypt_done, skb); 948 949 dev_hold(dev); 950 ret = crypto_aead_decrypt(req); 951 if (ret == -EINPROGRESS) { 952 return ERR_PTR(ret); 953 } else if (ret != 0) { 954 /* decryption/authentication failed 955 * 10.6 if validateFrames is disabled, deliver anyway 956 */ 957 if (ret != -EBADMSG) { 958 kfree_skb(skb); 959 skb = ERR_PTR(ret); 960 } 961 } else { 962 macsec_skb_cb(skb)->valid = true; 963 } 964 dev_put(dev); 965 966 aead_request_free(req); 967 968 return skb; 969 } 970 971 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci) 972 { 973 struct macsec_rx_sc *rx_sc; 974 975 for_each_rxsc(secy, rx_sc) { 976 if (rx_sc->sci == sci) 977 return rx_sc; 978 } 979 980 return NULL; 981 } 982 983 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) 984 { 985 struct macsec_rx_sc *rx_sc; 986 987 for_each_rxsc_rtnl(secy, rx_sc) { 988 if (rx_sc->sci == sci) 989 return rx_sc; 990 } 991 992 return NULL; 993 } 994 995 static void handle_not_macsec(struct sk_buff *skb) 996 { 997 struct macsec_rxh_data *rxd; 998 struct macsec_dev *macsec; 999 1000 rcu_read_lock(); 1001 rxd = macsec_data_rcu(skb->dev); 1002 1003 /* 10.6 If the management control validateFrames is not 1004 * Strict, frames without a SecTAG are received, counted, and 1005 * delivered to the Controlled Port 1006 */ 1007 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1008 struct sk_buff *nskb; 1009 int ret; 1010 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 1011 1012 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1013 u64_stats_update_begin(&secy_stats->syncp); 1014 secy_stats->stats.InPktsNoTag++; 1015 u64_stats_update_end(&secy_stats->syncp); 1016 continue; 1017 } 1018 1019 /* deliver on this port */ 1020 nskb = skb_clone(skb, GFP_ATOMIC); 1021 if (!nskb) 1022 break; 1023 1024 nskb->dev = macsec->secy.netdev; 1025 1026 ret = netif_rx(nskb); 1027 if (ret == NET_RX_SUCCESS) { 1028 u64_stats_update_begin(&secy_stats->syncp); 1029 secy_stats->stats.InPktsUntagged++; 1030 u64_stats_update_end(&secy_stats->syncp); 1031 } else { 1032 macsec->secy.netdev->stats.rx_dropped++; 1033 } 1034 } 1035 1036 rcu_read_unlock(); 1037 } 1038 1039 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) 1040 { 1041 struct sk_buff *skb = *pskb; 1042 struct net_device *dev = skb->dev; 1043 struct macsec_eth_header *hdr; 1044 struct macsec_secy *secy = NULL; 1045 struct macsec_rx_sc *rx_sc; 1046 struct macsec_rx_sa *rx_sa; 1047 struct macsec_rxh_data *rxd; 1048 struct macsec_dev *macsec; 1049 sci_t sci; 1050 u32 pn; 1051 bool cbit; 1052 struct pcpu_rx_sc_stats *rxsc_stats; 1053 struct pcpu_secy_stats *secy_stats; 1054 bool pulled_sci; 1055 1056 if (skb_headroom(skb) < ETH_HLEN) 1057 goto drop_direct; 1058 1059 hdr = macsec_ethhdr(skb); 1060 if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) { 1061 handle_not_macsec(skb); 1062 1063 /* and deliver to the uncontrolled port */ 1064 return RX_HANDLER_PASS; 1065 } 1066 1067 skb = skb_unshare(skb, GFP_ATOMIC); 1068 if (!skb) { 1069 *pskb = NULL; 1070 return RX_HANDLER_CONSUMED; 1071 } 1072 1073 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); 1074 if (!pulled_sci) { 1075 if (!pskb_may_pull(skb, macsec_extra_len(false))) 1076 goto drop_direct; 1077 } 1078 1079 hdr = macsec_ethhdr(skb); 1080 1081 /* Frames with a SecTAG that has the TCI E bit set but the C 1082 * bit clear are discarded, as this reserved encoding is used 1083 * to identify frames with a SecTAG that are not to be 1084 * delivered to the Controlled Port. 1085 */ 1086 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E) 1087 return RX_HANDLER_PASS; 1088 1089 /* now, pull the extra length */ 1090 if (hdr->tci_an & MACSEC_TCI_SC) { 1091 if (!pulled_sci) 1092 goto drop_direct; 1093 } 1094 1095 /* ethernet header is part of crypto processing */ 1096 skb_push(skb, ETH_HLEN); 1097 1098 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); 1099 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; 1100 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); 1101 1102 rcu_read_lock(); 1103 rxd = macsec_data_rcu(skb->dev); 1104 1105 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1106 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); 1107 1108 if (sc) { 1109 secy = &macsec->secy; 1110 rx_sc = sc; 1111 break; 1112 } 1113 } 1114 1115 if (!secy) 1116 goto nosci; 1117 1118 dev = secy->netdev; 1119 macsec = macsec_priv(dev); 1120 secy_stats = this_cpu_ptr(macsec->stats); 1121 rxsc_stats = this_cpu_ptr(rx_sc->stats); 1122 1123 if (!macsec_validate_skb(skb, secy->icv_len)) { 1124 u64_stats_update_begin(&secy_stats->syncp); 1125 secy_stats->stats.InPktsBadTag++; 1126 u64_stats_update_end(&secy_stats->syncp); 1127 goto drop_nosa; 1128 } 1129 1130 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]); 1131 if (!rx_sa) { 1132 /* 10.6.1 if the SA is not in use */ 1133 1134 /* If validateFrames is Strict or the C bit in the 1135 * SecTAG is set, discard 1136 */ 1137 if (hdr->tci_an & MACSEC_TCI_C || 1138 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 1139 u64_stats_update_begin(&rxsc_stats->syncp); 1140 rxsc_stats->stats.InPktsNotUsingSA++; 1141 u64_stats_update_end(&rxsc_stats->syncp); 1142 goto drop_nosa; 1143 } 1144 1145 /* not Strict, the frame (with the SecTAG and ICV 1146 * removed) is delivered to the Controlled Port. 1147 */ 1148 u64_stats_update_begin(&rxsc_stats->syncp); 1149 rxsc_stats->stats.InPktsUnusedSA++; 1150 u64_stats_update_end(&rxsc_stats->syncp); 1151 goto deliver; 1152 } 1153 1154 /* First, PN check to avoid decrypting obviously wrong packets */ 1155 pn = ntohl(hdr->packet_number); 1156 if (secy->replay_protect) { 1157 bool late; 1158 1159 spin_lock(&rx_sa->lock); 1160 late = rx_sa->next_pn >= secy->replay_window && 1161 pn < (rx_sa->next_pn - secy->replay_window); 1162 spin_unlock(&rx_sa->lock); 1163 1164 if (late) { 1165 u64_stats_update_begin(&rxsc_stats->syncp); 1166 rxsc_stats->stats.InPktsLate++; 1167 u64_stats_update_end(&rxsc_stats->syncp); 1168 goto drop; 1169 } 1170 } 1171 1172 /* Disabled && !changed text => skip validation */ 1173 if (hdr->tci_an & MACSEC_TCI_C || 1174 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1175 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1176 1177 if (IS_ERR(skb)) { 1178 /* the decrypt callback needs the reference */ 1179 if (PTR_ERR(skb) != -EINPROGRESS) 1180 macsec_rxsa_put(rx_sa); 1181 rcu_read_unlock(); 1182 *pskb = NULL; 1183 return RX_HANDLER_CONSUMED; 1184 } 1185 1186 if (!macsec_post_decrypt(skb, secy, pn)) 1187 goto drop; 1188 1189 deliver: 1190 macsec_finalize_skb(skb, secy->icv_len, 1191 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1192 macsec_reset_skb(skb, secy->netdev); 1193 1194 if (rx_sa) 1195 macsec_rxsa_put(rx_sa); 1196 count_rx(dev, skb->len); 1197 1198 rcu_read_unlock(); 1199 1200 *pskb = skb; 1201 return RX_HANDLER_ANOTHER; 1202 1203 drop: 1204 macsec_rxsa_put(rx_sa); 1205 drop_nosa: 1206 rcu_read_unlock(); 1207 drop_direct: 1208 kfree_skb(skb); 1209 *pskb = NULL; 1210 return RX_HANDLER_CONSUMED; 1211 1212 nosci: 1213 /* 10.6.1 if the SC is not found */ 1214 cbit = !!(hdr->tci_an & MACSEC_TCI_C); 1215 if (!cbit) 1216 macsec_finalize_skb(skb, DEFAULT_ICV_LEN, 1217 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1218 1219 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1220 struct sk_buff *nskb; 1221 int ret; 1222 1223 secy_stats = this_cpu_ptr(macsec->stats); 1224 1225 /* If validateFrames is Strict or the C bit in the 1226 * SecTAG is set, discard 1227 */ 1228 if (cbit || 1229 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1230 u64_stats_update_begin(&secy_stats->syncp); 1231 secy_stats->stats.InPktsNoSCI++; 1232 u64_stats_update_end(&secy_stats->syncp); 1233 continue; 1234 } 1235 1236 /* not strict, the frame (with the SecTAG and ICV 1237 * removed) is delivered to the Controlled Port. 1238 */ 1239 nskb = skb_clone(skb, GFP_ATOMIC); 1240 if (!nskb) 1241 break; 1242 1243 macsec_reset_skb(nskb, macsec->secy.netdev); 1244 1245 ret = netif_rx(nskb); 1246 if (ret == NET_RX_SUCCESS) { 1247 u64_stats_update_begin(&secy_stats->syncp); 1248 secy_stats->stats.InPktsUnknownSCI++; 1249 u64_stats_update_end(&secy_stats->syncp); 1250 } else { 1251 macsec->secy.netdev->stats.rx_dropped++; 1252 } 1253 } 1254 1255 rcu_read_unlock(); 1256 *pskb = skb; 1257 return RX_HANDLER_PASS; 1258 } 1259 1260 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) 1261 { 1262 struct crypto_aead *tfm; 1263 int ret; 1264 1265 tfm = crypto_alloc_aead("gcm(aes)", 0, 0); 1266 if (!tfm || IS_ERR(tfm)) 1267 return NULL; 1268 1269 ret = crypto_aead_setkey(tfm, key, key_len); 1270 if (ret < 0) { 1271 crypto_free_aead(tfm); 1272 return NULL; 1273 } 1274 1275 ret = crypto_aead_setauthsize(tfm, icv_len); 1276 if (ret < 0) { 1277 crypto_free_aead(tfm); 1278 return NULL; 1279 } 1280 1281 return tfm; 1282 } 1283 1284 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, 1285 int icv_len) 1286 { 1287 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); 1288 if (!rx_sa->stats) 1289 return -1; 1290 1291 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1292 if (!rx_sa->key.tfm) { 1293 free_percpu(rx_sa->stats); 1294 return -1; 1295 } 1296 1297 rx_sa->active = false; 1298 rx_sa->next_pn = 1; 1299 atomic_set(&rx_sa->refcnt, 1); 1300 spin_lock_init(&rx_sa->lock); 1301 1302 return 0; 1303 } 1304 1305 static void clear_rx_sa(struct macsec_rx_sa *rx_sa) 1306 { 1307 rx_sa->active = false; 1308 1309 macsec_rxsa_put(rx_sa); 1310 } 1311 1312 static void free_rx_sc(struct macsec_rx_sc *rx_sc) 1313 { 1314 int i; 1315 1316 for (i = 0; i < MACSEC_NUM_AN; i++) { 1317 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]); 1318 1319 RCU_INIT_POINTER(rx_sc->sa[i], NULL); 1320 if (sa) 1321 clear_rx_sa(sa); 1322 } 1323 1324 macsec_rxsc_put(rx_sc); 1325 } 1326 1327 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) 1328 { 1329 struct macsec_rx_sc *rx_sc, __rcu **rx_scp; 1330 1331 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp); 1332 rx_sc; 1333 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) { 1334 if (rx_sc->sci == sci) { 1335 if (rx_sc->active) 1336 secy->n_rx_sc--; 1337 rcu_assign_pointer(*rx_scp, rx_sc->next); 1338 return rx_sc; 1339 } 1340 } 1341 1342 return NULL; 1343 } 1344 1345 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) 1346 { 1347 struct macsec_rx_sc *rx_sc; 1348 struct macsec_dev *macsec; 1349 struct net_device *real_dev = macsec_priv(dev)->real_dev; 1350 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 1351 struct macsec_secy *secy; 1352 1353 list_for_each_entry(macsec, &rxd->secys, secys) { 1354 if (find_rx_sc_rtnl(&macsec->secy, sci)) 1355 return ERR_PTR(-EEXIST); 1356 } 1357 1358 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); 1359 if (!rx_sc) 1360 return ERR_PTR(-ENOMEM); 1361 1362 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats); 1363 if (!rx_sc->stats) { 1364 kfree(rx_sc); 1365 return ERR_PTR(-ENOMEM); 1366 } 1367 1368 rx_sc->sci = sci; 1369 rx_sc->active = true; 1370 atomic_set(&rx_sc->refcnt, 1); 1371 1372 secy = &macsec_priv(dev)->secy; 1373 rcu_assign_pointer(rx_sc->next, secy->rx_sc); 1374 rcu_assign_pointer(secy->rx_sc, rx_sc); 1375 1376 if (rx_sc->active) 1377 secy->n_rx_sc++; 1378 1379 return rx_sc; 1380 } 1381 1382 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, 1383 int icv_len) 1384 { 1385 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); 1386 if (!tx_sa->stats) 1387 return -1; 1388 1389 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1390 if (!tx_sa->key.tfm) { 1391 free_percpu(tx_sa->stats); 1392 return -1; 1393 } 1394 1395 tx_sa->active = false; 1396 atomic_set(&tx_sa->refcnt, 1); 1397 spin_lock_init(&tx_sa->lock); 1398 1399 return 0; 1400 } 1401 1402 static void clear_tx_sa(struct macsec_tx_sa *tx_sa) 1403 { 1404 tx_sa->active = false; 1405 1406 macsec_txsa_put(tx_sa); 1407 } 1408 1409 static struct genl_family macsec_fam = { 1410 .id = GENL_ID_GENERATE, 1411 .name = MACSEC_GENL_NAME, 1412 .hdrsize = 0, 1413 .version = MACSEC_GENL_VERSION, 1414 .maxattr = MACSEC_ATTR_MAX, 1415 .netnsok = true, 1416 }; 1417 1418 static struct net_device *get_dev_from_nl(struct net *net, 1419 struct nlattr **attrs) 1420 { 1421 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]); 1422 struct net_device *dev; 1423 1424 dev = __dev_get_by_index(net, ifindex); 1425 if (!dev) 1426 return ERR_PTR(-ENODEV); 1427 1428 if (!netif_is_macsec(dev)) 1429 return ERR_PTR(-ENODEV); 1430 1431 return dev; 1432 } 1433 1434 static sci_t nla_get_sci(const struct nlattr *nla) 1435 { 1436 return (__force sci_t)nla_get_u64(nla); 1437 } 1438 1439 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, 1440 int padattr) 1441 { 1442 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); 1443 } 1444 1445 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, 1446 struct nlattr **attrs, 1447 struct nlattr **tb_sa, 1448 struct net_device **devp, 1449 struct macsec_secy **secyp, 1450 struct macsec_tx_sc **scp, 1451 u8 *assoc_num) 1452 { 1453 struct net_device *dev; 1454 struct macsec_secy *secy; 1455 struct macsec_tx_sc *tx_sc; 1456 struct macsec_tx_sa *tx_sa; 1457 1458 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1459 return ERR_PTR(-EINVAL); 1460 1461 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1462 1463 dev = get_dev_from_nl(net, attrs); 1464 if (IS_ERR(dev)) 1465 return ERR_CAST(dev); 1466 1467 if (*assoc_num >= MACSEC_NUM_AN) 1468 return ERR_PTR(-EINVAL); 1469 1470 secy = &macsec_priv(dev)->secy; 1471 tx_sc = &secy->tx_sc; 1472 1473 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]); 1474 if (!tx_sa) 1475 return ERR_PTR(-ENODEV); 1476 1477 *devp = dev; 1478 *scp = tx_sc; 1479 *secyp = secy; 1480 return tx_sa; 1481 } 1482 1483 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net, 1484 struct nlattr **attrs, 1485 struct nlattr **tb_rxsc, 1486 struct net_device **devp, 1487 struct macsec_secy **secyp) 1488 { 1489 struct net_device *dev; 1490 struct macsec_secy *secy; 1491 struct macsec_rx_sc *rx_sc; 1492 sci_t sci; 1493 1494 dev = get_dev_from_nl(net, attrs); 1495 if (IS_ERR(dev)) 1496 return ERR_CAST(dev); 1497 1498 secy = &macsec_priv(dev)->secy; 1499 1500 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1501 return ERR_PTR(-EINVAL); 1502 1503 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1504 rx_sc = find_rx_sc_rtnl(secy, sci); 1505 if (!rx_sc) 1506 return ERR_PTR(-ENODEV); 1507 1508 *secyp = secy; 1509 *devp = dev; 1510 1511 return rx_sc; 1512 } 1513 1514 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, 1515 struct nlattr **attrs, 1516 struct nlattr **tb_rxsc, 1517 struct nlattr **tb_sa, 1518 struct net_device **devp, 1519 struct macsec_secy **secyp, 1520 struct macsec_rx_sc **scp, 1521 u8 *assoc_num) 1522 { 1523 struct macsec_rx_sc *rx_sc; 1524 struct macsec_rx_sa *rx_sa; 1525 1526 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1527 return ERR_PTR(-EINVAL); 1528 1529 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1530 if (*assoc_num >= MACSEC_NUM_AN) 1531 return ERR_PTR(-EINVAL); 1532 1533 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); 1534 if (IS_ERR(rx_sc)) 1535 return ERR_CAST(rx_sc); 1536 1537 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]); 1538 if (!rx_sa) 1539 return ERR_PTR(-ENODEV); 1540 1541 *scp = rx_sc; 1542 return rx_sa; 1543 } 1544 1545 1546 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { 1547 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, 1548 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, 1549 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, 1550 }; 1551 1552 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { 1553 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, 1554 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, 1555 }; 1556 1557 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { 1558 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, 1559 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, 1560 [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 }, 1561 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, 1562 .len = MACSEC_KEYID_LEN, }, 1563 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1564 .len = MACSEC_MAX_KEY_LEN, }, 1565 }; 1566 1567 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) 1568 { 1569 if (!attrs[MACSEC_ATTR_SA_CONFIG]) 1570 return -EINVAL; 1571 1572 if (nla_parse_nested(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], 1573 macsec_genl_sa_policy)) 1574 return -EINVAL; 1575 1576 return 0; 1577 } 1578 1579 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) 1580 { 1581 if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) 1582 return -EINVAL; 1583 1584 if (nla_parse_nested(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], 1585 macsec_genl_rxsc_policy)) 1586 return -EINVAL; 1587 1588 return 0; 1589 } 1590 1591 static bool validate_add_rxsa(struct nlattr **attrs) 1592 { 1593 if (!attrs[MACSEC_SA_ATTR_AN] || 1594 !attrs[MACSEC_SA_ATTR_KEY] || 1595 !attrs[MACSEC_SA_ATTR_KEYID]) 1596 return false; 1597 1598 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1599 return false; 1600 1601 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1602 return false; 1603 1604 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1605 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1606 return false; 1607 } 1608 1609 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1610 return false; 1611 1612 return true; 1613 } 1614 1615 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) 1616 { 1617 struct net_device *dev; 1618 struct nlattr **attrs = info->attrs; 1619 struct macsec_secy *secy; 1620 struct macsec_rx_sc *rx_sc; 1621 struct macsec_rx_sa *rx_sa; 1622 unsigned char assoc_num; 1623 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1624 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1625 1626 if (!attrs[MACSEC_ATTR_IFINDEX]) 1627 return -EINVAL; 1628 1629 if (parse_sa_config(attrs, tb_sa)) 1630 return -EINVAL; 1631 1632 if (parse_rxsc_config(attrs, tb_rxsc)) 1633 return -EINVAL; 1634 1635 if (!validate_add_rxsa(tb_sa)) 1636 return -EINVAL; 1637 1638 rtnl_lock(); 1639 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 1640 if (IS_ERR(rx_sc) || !macsec_rxsc_get(rx_sc)) { 1641 rtnl_unlock(); 1642 return PTR_ERR(rx_sc); 1643 } 1644 1645 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1646 1647 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1648 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n", 1649 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1650 rtnl_unlock(); 1651 return -EINVAL; 1652 } 1653 1654 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); 1655 if (rx_sa) { 1656 rtnl_unlock(); 1657 return -EBUSY; 1658 } 1659 1660 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1661 if (!rx_sa || init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1662 secy->key_len, secy->icv_len)) { 1663 kfree(rx_sa); 1664 rtnl_unlock(); 1665 return -ENOMEM; 1666 } 1667 1668 if (tb_sa[MACSEC_SA_ATTR_PN]) { 1669 spin_lock_bh(&rx_sa->lock); 1670 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 1671 spin_unlock_bh(&rx_sa->lock); 1672 } 1673 1674 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1675 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1676 1677 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1678 rx_sa->sc = rx_sc; 1679 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); 1680 1681 rtnl_unlock(); 1682 1683 return 0; 1684 } 1685 1686 static bool validate_add_rxsc(struct nlattr **attrs) 1687 { 1688 if (!attrs[MACSEC_RXSC_ATTR_SCI]) 1689 return false; 1690 1691 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { 1692 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) 1693 return false; 1694 } 1695 1696 return true; 1697 } 1698 1699 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) 1700 { 1701 struct net_device *dev; 1702 sci_t sci = MACSEC_UNDEF_SCI; 1703 struct nlattr **attrs = info->attrs; 1704 struct macsec_rx_sc *rx_sc; 1705 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1706 1707 if (!attrs[MACSEC_ATTR_IFINDEX]) 1708 return -EINVAL; 1709 1710 if (parse_rxsc_config(attrs, tb_rxsc)) 1711 return -EINVAL; 1712 1713 if (!validate_add_rxsc(tb_rxsc)) 1714 return -EINVAL; 1715 1716 rtnl_lock(); 1717 dev = get_dev_from_nl(genl_info_net(info), attrs); 1718 if (IS_ERR(dev)) { 1719 rtnl_unlock(); 1720 return PTR_ERR(dev); 1721 } 1722 1723 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1724 1725 rx_sc = create_rx_sc(dev, sci); 1726 if (IS_ERR(rx_sc)) { 1727 rtnl_unlock(); 1728 return PTR_ERR(rx_sc); 1729 } 1730 1731 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) 1732 rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 1733 1734 rtnl_unlock(); 1735 1736 return 0; 1737 } 1738 1739 static bool validate_add_txsa(struct nlattr **attrs) 1740 { 1741 if (!attrs[MACSEC_SA_ATTR_AN] || 1742 !attrs[MACSEC_SA_ATTR_PN] || 1743 !attrs[MACSEC_SA_ATTR_KEY] || 1744 !attrs[MACSEC_SA_ATTR_KEYID]) 1745 return false; 1746 1747 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1748 return false; 1749 1750 if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1751 return false; 1752 1753 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1754 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1755 return false; 1756 } 1757 1758 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1759 return false; 1760 1761 return true; 1762 } 1763 1764 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) 1765 { 1766 struct net_device *dev; 1767 struct nlattr **attrs = info->attrs; 1768 struct macsec_secy *secy; 1769 struct macsec_tx_sc *tx_sc; 1770 struct macsec_tx_sa *tx_sa; 1771 unsigned char assoc_num; 1772 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1773 1774 if (!attrs[MACSEC_ATTR_IFINDEX]) 1775 return -EINVAL; 1776 1777 if (parse_sa_config(attrs, tb_sa)) 1778 return -EINVAL; 1779 1780 if (!validate_add_txsa(tb_sa)) 1781 return -EINVAL; 1782 1783 rtnl_lock(); 1784 dev = get_dev_from_nl(genl_info_net(info), attrs); 1785 if (IS_ERR(dev)) { 1786 rtnl_unlock(); 1787 return PTR_ERR(dev); 1788 } 1789 1790 secy = &macsec_priv(dev)->secy; 1791 tx_sc = &secy->tx_sc; 1792 1793 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1794 1795 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1796 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n", 1797 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1798 rtnl_unlock(); 1799 return -EINVAL; 1800 } 1801 1802 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); 1803 if (tx_sa) { 1804 rtnl_unlock(); 1805 return -EBUSY; 1806 } 1807 1808 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 1809 if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1810 secy->key_len, secy->icv_len)) { 1811 kfree(tx_sa); 1812 rtnl_unlock(); 1813 return -ENOMEM; 1814 } 1815 1816 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1817 1818 spin_lock_bh(&tx_sa->lock); 1819 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 1820 spin_unlock_bh(&tx_sa->lock); 1821 1822 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1823 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1824 1825 if (assoc_num == tx_sc->encoding_sa && tx_sa->active) 1826 secy->operational = true; 1827 1828 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); 1829 1830 rtnl_unlock(); 1831 1832 return 0; 1833 } 1834 1835 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) 1836 { 1837 struct nlattr **attrs = info->attrs; 1838 struct net_device *dev; 1839 struct macsec_secy *secy; 1840 struct macsec_rx_sc *rx_sc; 1841 struct macsec_rx_sa *rx_sa; 1842 u8 assoc_num; 1843 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1844 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1845 1846 if (!attrs[MACSEC_ATTR_IFINDEX]) 1847 return -EINVAL; 1848 1849 if (parse_sa_config(attrs, tb_sa)) 1850 return -EINVAL; 1851 1852 if (parse_rxsc_config(attrs, tb_rxsc)) 1853 return -EINVAL; 1854 1855 rtnl_lock(); 1856 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 1857 &dev, &secy, &rx_sc, &assoc_num); 1858 if (IS_ERR(rx_sa)) { 1859 rtnl_unlock(); 1860 return PTR_ERR(rx_sa); 1861 } 1862 1863 if (rx_sa->active) { 1864 rtnl_unlock(); 1865 return -EBUSY; 1866 } 1867 1868 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); 1869 clear_rx_sa(rx_sa); 1870 1871 rtnl_unlock(); 1872 1873 return 0; 1874 } 1875 1876 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) 1877 { 1878 struct nlattr **attrs = info->attrs; 1879 struct net_device *dev; 1880 struct macsec_secy *secy; 1881 struct macsec_rx_sc *rx_sc; 1882 sci_t sci; 1883 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1884 1885 if (!attrs[MACSEC_ATTR_IFINDEX]) 1886 return -EINVAL; 1887 1888 if (parse_rxsc_config(attrs, tb_rxsc)) 1889 return -EINVAL; 1890 1891 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1892 return -EINVAL; 1893 1894 rtnl_lock(); 1895 dev = get_dev_from_nl(genl_info_net(info), info->attrs); 1896 if (IS_ERR(dev)) { 1897 rtnl_unlock(); 1898 return PTR_ERR(dev); 1899 } 1900 1901 secy = &macsec_priv(dev)->secy; 1902 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1903 1904 rx_sc = del_rx_sc(secy, sci); 1905 if (!rx_sc) { 1906 rtnl_unlock(); 1907 return -ENODEV; 1908 } 1909 1910 free_rx_sc(rx_sc); 1911 rtnl_unlock(); 1912 1913 return 0; 1914 } 1915 1916 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) 1917 { 1918 struct nlattr **attrs = info->attrs; 1919 struct net_device *dev; 1920 struct macsec_secy *secy; 1921 struct macsec_tx_sc *tx_sc; 1922 struct macsec_tx_sa *tx_sa; 1923 u8 assoc_num; 1924 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1925 1926 if (!attrs[MACSEC_ATTR_IFINDEX]) 1927 return -EINVAL; 1928 1929 if (parse_sa_config(attrs, tb_sa)) 1930 return -EINVAL; 1931 1932 rtnl_lock(); 1933 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 1934 &dev, &secy, &tx_sc, &assoc_num); 1935 if (IS_ERR(tx_sa)) { 1936 rtnl_unlock(); 1937 return PTR_ERR(tx_sa); 1938 } 1939 1940 if (tx_sa->active) { 1941 rtnl_unlock(); 1942 return -EBUSY; 1943 } 1944 1945 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); 1946 clear_tx_sa(tx_sa); 1947 1948 rtnl_unlock(); 1949 1950 return 0; 1951 } 1952 1953 static bool validate_upd_sa(struct nlattr **attrs) 1954 { 1955 if (!attrs[MACSEC_SA_ATTR_AN] || 1956 attrs[MACSEC_SA_ATTR_KEY] || 1957 attrs[MACSEC_SA_ATTR_KEYID]) 1958 return false; 1959 1960 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1961 return false; 1962 1963 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1964 return false; 1965 1966 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1967 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1968 return false; 1969 } 1970 1971 return true; 1972 } 1973 1974 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) 1975 { 1976 struct nlattr **attrs = info->attrs; 1977 struct net_device *dev; 1978 struct macsec_secy *secy; 1979 struct macsec_tx_sc *tx_sc; 1980 struct macsec_tx_sa *tx_sa; 1981 u8 assoc_num; 1982 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1983 1984 if (!attrs[MACSEC_ATTR_IFINDEX]) 1985 return -EINVAL; 1986 1987 if (parse_sa_config(attrs, tb_sa)) 1988 return -EINVAL; 1989 1990 if (!validate_upd_sa(tb_sa)) 1991 return -EINVAL; 1992 1993 rtnl_lock(); 1994 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 1995 &dev, &secy, &tx_sc, &assoc_num); 1996 if (IS_ERR(tx_sa)) { 1997 rtnl_unlock(); 1998 return PTR_ERR(tx_sa); 1999 } 2000 2001 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2002 spin_lock_bh(&tx_sa->lock); 2003 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 2004 spin_unlock_bh(&tx_sa->lock); 2005 } 2006 2007 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2008 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2009 2010 if (assoc_num == tx_sc->encoding_sa) 2011 secy->operational = tx_sa->active; 2012 2013 rtnl_unlock(); 2014 2015 return 0; 2016 } 2017 2018 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) 2019 { 2020 struct nlattr **attrs = info->attrs; 2021 struct net_device *dev; 2022 struct macsec_secy *secy; 2023 struct macsec_rx_sc *rx_sc; 2024 struct macsec_rx_sa *rx_sa; 2025 u8 assoc_num; 2026 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2027 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2028 2029 if (!attrs[MACSEC_ATTR_IFINDEX]) 2030 return -EINVAL; 2031 2032 if (parse_rxsc_config(attrs, tb_rxsc)) 2033 return -EINVAL; 2034 2035 if (parse_sa_config(attrs, tb_sa)) 2036 return -EINVAL; 2037 2038 if (!validate_upd_sa(tb_sa)) 2039 return -EINVAL; 2040 2041 rtnl_lock(); 2042 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2043 &dev, &secy, &rx_sc, &assoc_num); 2044 if (IS_ERR(rx_sa)) { 2045 rtnl_unlock(); 2046 return PTR_ERR(rx_sa); 2047 } 2048 2049 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2050 spin_lock_bh(&rx_sa->lock); 2051 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 2052 spin_unlock_bh(&rx_sa->lock); 2053 } 2054 2055 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2056 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2057 2058 rtnl_unlock(); 2059 return 0; 2060 } 2061 2062 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) 2063 { 2064 struct nlattr **attrs = info->attrs; 2065 struct net_device *dev; 2066 struct macsec_secy *secy; 2067 struct macsec_rx_sc *rx_sc; 2068 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2069 2070 if (!attrs[MACSEC_ATTR_IFINDEX]) 2071 return -EINVAL; 2072 2073 if (parse_rxsc_config(attrs, tb_rxsc)) 2074 return -EINVAL; 2075 2076 if (!validate_add_rxsc(tb_rxsc)) 2077 return -EINVAL; 2078 2079 rtnl_lock(); 2080 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 2081 if (IS_ERR(rx_sc)) { 2082 rtnl_unlock(); 2083 return PTR_ERR(rx_sc); 2084 } 2085 2086 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { 2087 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 2088 2089 if (rx_sc->active != new) 2090 secy->n_rx_sc += new ? 1 : -1; 2091 2092 rx_sc->active = new; 2093 } 2094 2095 rtnl_unlock(); 2096 2097 return 0; 2098 } 2099 2100 static int copy_tx_sa_stats(struct sk_buff *skb, 2101 struct macsec_tx_sa_stats __percpu *pstats) 2102 { 2103 struct macsec_tx_sa_stats sum = {0, }; 2104 int cpu; 2105 2106 for_each_possible_cpu(cpu) { 2107 const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu); 2108 2109 sum.OutPktsProtected += stats->OutPktsProtected; 2110 sum.OutPktsEncrypted += stats->OutPktsEncrypted; 2111 } 2112 2113 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) || 2114 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted)) 2115 return -EMSGSIZE; 2116 2117 return 0; 2118 } 2119 2120 static int copy_rx_sa_stats(struct sk_buff *skb, 2121 struct macsec_rx_sa_stats __percpu *pstats) 2122 { 2123 struct macsec_rx_sa_stats sum = {0, }; 2124 int cpu; 2125 2126 for_each_possible_cpu(cpu) { 2127 const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu); 2128 2129 sum.InPktsOK += stats->InPktsOK; 2130 sum.InPktsInvalid += stats->InPktsInvalid; 2131 sum.InPktsNotValid += stats->InPktsNotValid; 2132 sum.InPktsNotUsingSA += stats->InPktsNotUsingSA; 2133 sum.InPktsUnusedSA += stats->InPktsUnusedSA; 2134 } 2135 2136 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) || 2137 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) || 2138 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) || 2139 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) || 2140 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA)) 2141 return -EMSGSIZE; 2142 2143 return 0; 2144 } 2145 2146 static int copy_rx_sc_stats(struct sk_buff *skb, 2147 struct pcpu_rx_sc_stats __percpu *pstats) 2148 { 2149 struct macsec_rx_sc_stats sum = {0, }; 2150 int cpu; 2151 2152 for_each_possible_cpu(cpu) { 2153 const struct pcpu_rx_sc_stats *stats; 2154 struct macsec_rx_sc_stats tmp; 2155 unsigned int start; 2156 2157 stats = per_cpu_ptr(pstats, cpu); 2158 do { 2159 start = u64_stats_fetch_begin_irq(&stats->syncp); 2160 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2161 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2162 2163 sum.InOctetsValidated += tmp.InOctetsValidated; 2164 sum.InOctetsDecrypted += tmp.InOctetsDecrypted; 2165 sum.InPktsUnchecked += tmp.InPktsUnchecked; 2166 sum.InPktsDelayed += tmp.InPktsDelayed; 2167 sum.InPktsOK += tmp.InPktsOK; 2168 sum.InPktsInvalid += tmp.InPktsInvalid; 2169 sum.InPktsLate += tmp.InPktsLate; 2170 sum.InPktsNotValid += tmp.InPktsNotValid; 2171 sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA; 2172 sum.InPktsUnusedSA += tmp.InPktsUnusedSA; 2173 } 2174 2175 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, 2176 sum.InOctetsValidated, 2177 MACSEC_RXSC_STATS_ATTR_PAD) || 2178 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, 2179 sum.InOctetsDecrypted, 2180 MACSEC_RXSC_STATS_ATTR_PAD) || 2181 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, 2182 sum.InPktsUnchecked, 2183 MACSEC_RXSC_STATS_ATTR_PAD) || 2184 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, 2185 sum.InPktsDelayed, 2186 MACSEC_RXSC_STATS_ATTR_PAD) || 2187 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, 2188 sum.InPktsOK, 2189 MACSEC_RXSC_STATS_ATTR_PAD) || 2190 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, 2191 sum.InPktsInvalid, 2192 MACSEC_RXSC_STATS_ATTR_PAD) || 2193 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, 2194 sum.InPktsLate, 2195 MACSEC_RXSC_STATS_ATTR_PAD) || 2196 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, 2197 sum.InPktsNotValid, 2198 MACSEC_RXSC_STATS_ATTR_PAD) || 2199 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2200 sum.InPktsNotUsingSA, 2201 MACSEC_RXSC_STATS_ATTR_PAD) || 2202 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, 2203 sum.InPktsUnusedSA, 2204 MACSEC_RXSC_STATS_ATTR_PAD)) 2205 return -EMSGSIZE; 2206 2207 return 0; 2208 } 2209 2210 static int copy_tx_sc_stats(struct sk_buff *skb, 2211 struct pcpu_tx_sc_stats __percpu *pstats) 2212 { 2213 struct macsec_tx_sc_stats sum = {0, }; 2214 int cpu; 2215 2216 for_each_possible_cpu(cpu) { 2217 const struct pcpu_tx_sc_stats *stats; 2218 struct macsec_tx_sc_stats tmp; 2219 unsigned int start; 2220 2221 stats = per_cpu_ptr(pstats, cpu); 2222 do { 2223 start = u64_stats_fetch_begin_irq(&stats->syncp); 2224 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2225 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2226 2227 sum.OutPktsProtected += tmp.OutPktsProtected; 2228 sum.OutPktsEncrypted += tmp.OutPktsEncrypted; 2229 sum.OutOctetsProtected += tmp.OutOctetsProtected; 2230 sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted; 2231 } 2232 2233 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, 2234 sum.OutPktsProtected, 2235 MACSEC_TXSC_STATS_ATTR_PAD) || 2236 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2237 sum.OutPktsEncrypted, 2238 MACSEC_TXSC_STATS_ATTR_PAD) || 2239 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, 2240 sum.OutOctetsProtected, 2241 MACSEC_TXSC_STATS_ATTR_PAD) || 2242 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, 2243 sum.OutOctetsEncrypted, 2244 MACSEC_TXSC_STATS_ATTR_PAD)) 2245 return -EMSGSIZE; 2246 2247 return 0; 2248 } 2249 2250 static int copy_secy_stats(struct sk_buff *skb, 2251 struct pcpu_secy_stats __percpu *pstats) 2252 { 2253 struct macsec_dev_stats sum = {0, }; 2254 int cpu; 2255 2256 for_each_possible_cpu(cpu) { 2257 const struct pcpu_secy_stats *stats; 2258 struct macsec_dev_stats tmp; 2259 unsigned int start; 2260 2261 stats = per_cpu_ptr(pstats, cpu); 2262 do { 2263 start = u64_stats_fetch_begin_irq(&stats->syncp); 2264 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2265 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2266 2267 sum.OutPktsUntagged += tmp.OutPktsUntagged; 2268 sum.InPktsUntagged += tmp.InPktsUntagged; 2269 sum.OutPktsTooLong += tmp.OutPktsTooLong; 2270 sum.InPktsNoTag += tmp.InPktsNoTag; 2271 sum.InPktsBadTag += tmp.InPktsBadTag; 2272 sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI; 2273 sum.InPktsNoSCI += tmp.InPktsNoSCI; 2274 sum.InPktsOverrun += tmp.InPktsOverrun; 2275 } 2276 2277 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, 2278 sum.OutPktsUntagged, 2279 MACSEC_SECY_STATS_ATTR_PAD) || 2280 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, 2281 sum.InPktsUntagged, 2282 MACSEC_SECY_STATS_ATTR_PAD) || 2283 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, 2284 sum.OutPktsTooLong, 2285 MACSEC_SECY_STATS_ATTR_PAD) || 2286 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, 2287 sum.InPktsNoTag, 2288 MACSEC_SECY_STATS_ATTR_PAD) || 2289 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, 2290 sum.InPktsBadTag, 2291 MACSEC_SECY_STATS_ATTR_PAD) || 2292 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, 2293 sum.InPktsUnknownSCI, 2294 MACSEC_SECY_STATS_ATTR_PAD) || 2295 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, 2296 sum.InPktsNoSCI, 2297 MACSEC_SECY_STATS_ATTR_PAD) || 2298 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, 2299 sum.InPktsOverrun, 2300 MACSEC_SECY_STATS_ATTR_PAD)) 2301 return -EMSGSIZE; 2302 2303 return 0; 2304 } 2305 2306 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) 2307 { 2308 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2309 struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY); 2310 2311 if (!secy_nest) 2312 return 1; 2313 2314 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, 2315 MACSEC_SECY_ATTR_PAD) || 2316 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, 2317 MACSEC_DEFAULT_CIPHER_ID, 2318 MACSEC_SECY_ATTR_PAD) || 2319 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 2320 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 2321 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 2322 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) || 2323 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) || 2324 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) || 2325 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) || 2326 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) || 2327 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) || 2328 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa)) 2329 goto cancel; 2330 2331 if (secy->replay_protect) { 2332 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window)) 2333 goto cancel; 2334 } 2335 2336 nla_nest_end(skb, secy_nest); 2337 return 0; 2338 2339 cancel: 2340 nla_nest_cancel(skb, secy_nest); 2341 return 1; 2342 } 2343 2344 static int dump_secy(struct macsec_secy *secy, struct net_device *dev, 2345 struct sk_buff *skb, struct netlink_callback *cb) 2346 { 2347 struct macsec_rx_sc *rx_sc; 2348 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2349 struct nlattr *txsa_list, *rxsc_list; 2350 int i, j; 2351 void *hdr; 2352 struct nlattr *attr; 2353 2354 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 2355 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); 2356 if (!hdr) 2357 return -EMSGSIZE; 2358 2359 genl_dump_check_consistent(cb, hdr, &macsec_fam); 2360 2361 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 2362 goto nla_put_failure; 2363 2364 if (nla_put_secy(secy, skb)) 2365 goto nla_put_failure; 2366 2367 attr = nla_nest_start(skb, MACSEC_ATTR_TXSC_STATS); 2368 if (!attr) 2369 goto nla_put_failure; 2370 if (copy_tx_sc_stats(skb, tx_sc->stats)) { 2371 nla_nest_cancel(skb, attr); 2372 goto nla_put_failure; 2373 } 2374 nla_nest_end(skb, attr); 2375 2376 attr = nla_nest_start(skb, MACSEC_ATTR_SECY_STATS); 2377 if (!attr) 2378 goto nla_put_failure; 2379 if (copy_secy_stats(skb, macsec_priv(dev)->stats)) { 2380 nla_nest_cancel(skb, attr); 2381 goto nla_put_failure; 2382 } 2383 nla_nest_end(skb, attr); 2384 2385 txsa_list = nla_nest_start(skb, MACSEC_ATTR_TXSA_LIST); 2386 if (!txsa_list) 2387 goto nla_put_failure; 2388 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { 2389 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); 2390 struct nlattr *txsa_nest; 2391 2392 if (!tx_sa) 2393 continue; 2394 2395 txsa_nest = nla_nest_start(skb, j++); 2396 if (!txsa_nest) { 2397 nla_nest_cancel(skb, txsa_list); 2398 goto nla_put_failure; 2399 } 2400 2401 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2402 nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) || 2403 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || 2404 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { 2405 nla_nest_cancel(skb, txsa_nest); 2406 nla_nest_cancel(skb, txsa_list); 2407 goto nla_put_failure; 2408 } 2409 2410 attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS); 2411 if (!attr) { 2412 nla_nest_cancel(skb, txsa_nest); 2413 nla_nest_cancel(skb, txsa_list); 2414 goto nla_put_failure; 2415 } 2416 if (copy_tx_sa_stats(skb, tx_sa->stats)) { 2417 nla_nest_cancel(skb, attr); 2418 nla_nest_cancel(skb, txsa_nest); 2419 nla_nest_cancel(skb, txsa_list); 2420 goto nla_put_failure; 2421 } 2422 nla_nest_end(skb, attr); 2423 2424 nla_nest_end(skb, txsa_nest); 2425 } 2426 nla_nest_end(skb, txsa_list); 2427 2428 rxsc_list = nla_nest_start(skb, MACSEC_ATTR_RXSC_LIST); 2429 if (!rxsc_list) 2430 goto nla_put_failure; 2431 2432 j = 1; 2433 for_each_rxsc_rtnl(secy, rx_sc) { 2434 int k; 2435 struct nlattr *rxsa_list; 2436 struct nlattr *rxsc_nest = nla_nest_start(skb, j++); 2437 2438 if (!rxsc_nest) { 2439 nla_nest_cancel(skb, rxsc_list); 2440 goto nla_put_failure; 2441 } 2442 2443 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || 2444 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci, 2445 MACSEC_RXSC_ATTR_PAD)) { 2446 nla_nest_cancel(skb, rxsc_nest); 2447 nla_nest_cancel(skb, rxsc_list); 2448 goto nla_put_failure; 2449 } 2450 2451 attr = nla_nest_start(skb, MACSEC_RXSC_ATTR_STATS); 2452 if (!attr) { 2453 nla_nest_cancel(skb, rxsc_nest); 2454 nla_nest_cancel(skb, rxsc_list); 2455 goto nla_put_failure; 2456 } 2457 if (copy_rx_sc_stats(skb, rx_sc->stats)) { 2458 nla_nest_cancel(skb, attr); 2459 nla_nest_cancel(skb, rxsc_nest); 2460 nla_nest_cancel(skb, rxsc_list); 2461 goto nla_put_failure; 2462 } 2463 nla_nest_end(skb, attr); 2464 2465 rxsa_list = nla_nest_start(skb, MACSEC_RXSC_ATTR_SA_LIST); 2466 if (!rxsa_list) { 2467 nla_nest_cancel(skb, rxsc_nest); 2468 nla_nest_cancel(skb, rxsc_list); 2469 goto nla_put_failure; 2470 } 2471 2472 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { 2473 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); 2474 struct nlattr *rxsa_nest; 2475 2476 if (!rx_sa) 2477 continue; 2478 2479 rxsa_nest = nla_nest_start(skb, k++); 2480 if (!rxsa_nest) { 2481 nla_nest_cancel(skb, rxsa_list); 2482 nla_nest_cancel(skb, rxsc_nest); 2483 nla_nest_cancel(skb, rxsc_list); 2484 goto nla_put_failure; 2485 } 2486 2487 attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS); 2488 if (!attr) { 2489 nla_nest_cancel(skb, rxsa_list); 2490 nla_nest_cancel(skb, rxsc_nest); 2491 nla_nest_cancel(skb, rxsc_list); 2492 goto nla_put_failure; 2493 } 2494 if (copy_rx_sa_stats(skb, rx_sa->stats)) { 2495 nla_nest_cancel(skb, attr); 2496 nla_nest_cancel(skb, rxsa_list); 2497 nla_nest_cancel(skb, rxsc_nest); 2498 nla_nest_cancel(skb, rxsc_list); 2499 goto nla_put_failure; 2500 } 2501 nla_nest_end(skb, attr); 2502 2503 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2504 nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) || 2505 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || 2506 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { 2507 nla_nest_cancel(skb, rxsa_nest); 2508 nla_nest_cancel(skb, rxsc_nest); 2509 nla_nest_cancel(skb, rxsc_list); 2510 goto nla_put_failure; 2511 } 2512 nla_nest_end(skb, rxsa_nest); 2513 } 2514 2515 nla_nest_end(skb, rxsa_list); 2516 nla_nest_end(skb, rxsc_nest); 2517 } 2518 2519 nla_nest_end(skb, rxsc_list); 2520 2521 genlmsg_end(skb, hdr); 2522 2523 return 0; 2524 2525 nla_put_failure: 2526 genlmsg_cancel(skb, hdr); 2527 return -EMSGSIZE; 2528 } 2529 2530 static int macsec_generation = 1; /* protected by RTNL */ 2531 2532 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 2533 { 2534 struct net *net = sock_net(skb->sk); 2535 struct net_device *dev; 2536 int dev_idx, d; 2537 2538 dev_idx = cb->args[0]; 2539 2540 d = 0; 2541 rtnl_lock(); 2542 2543 cb->seq = macsec_generation; 2544 2545 for_each_netdev(net, dev) { 2546 struct macsec_secy *secy; 2547 2548 if (d < dev_idx) 2549 goto next; 2550 2551 if (!netif_is_macsec(dev)) 2552 goto next; 2553 2554 secy = &macsec_priv(dev)->secy; 2555 if (dump_secy(secy, dev, skb, cb) < 0) 2556 goto done; 2557 next: 2558 d++; 2559 } 2560 2561 done: 2562 rtnl_unlock(); 2563 cb->args[0] = d; 2564 return skb->len; 2565 } 2566 2567 static const struct genl_ops macsec_genl_ops[] = { 2568 { 2569 .cmd = MACSEC_CMD_GET_TXSC, 2570 .dumpit = macsec_dump_txsc, 2571 .policy = macsec_genl_policy, 2572 }, 2573 { 2574 .cmd = MACSEC_CMD_ADD_RXSC, 2575 .doit = macsec_add_rxsc, 2576 .policy = macsec_genl_policy, 2577 .flags = GENL_ADMIN_PERM, 2578 }, 2579 { 2580 .cmd = MACSEC_CMD_DEL_RXSC, 2581 .doit = macsec_del_rxsc, 2582 .policy = macsec_genl_policy, 2583 .flags = GENL_ADMIN_PERM, 2584 }, 2585 { 2586 .cmd = MACSEC_CMD_UPD_RXSC, 2587 .doit = macsec_upd_rxsc, 2588 .policy = macsec_genl_policy, 2589 .flags = GENL_ADMIN_PERM, 2590 }, 2591 { 2592 .cmd = MACSEC_CMD_ADD_TXSA, 2593 .doit = macsec_add_txsa, 2594 .policy = macsec_genl_policy, 2595 .flags = GENL_ADMIN_PERM, 2596 }, 2597 { 2598 .cmd = MACSEC_CMD_DEL_TXSA, 2599 .doit = macsec_del_txsa, 2600 .policy = macsec_genl_policy, 2601 .flags = GENL_ADMIN_PERM, 2602 }, 2603 { 2604 .cmd = MACSEC_CMD_UPD_TXSA, 2605 .doit = macsec_upd_txsa, 2606 .policy = macsec_genl_policy, 2607 .flags = GENL_ADMIN_PERM, 2608 }, 2609 { 2610 .cmd = MACSEC_CMD_ADD_RXSA, 2611 .doit = macsec_add_rxsa, 2612 .policy = macsec_genl_policy, 2613 .flags = GENL_ADMIN_PERM, 2614 }, 2615 { 2616 .cmd = MACSEC_CMD_DEL_RXSA, 2617 .doit = macsec_del_rxsa, 2618 .policy = macsec_genl_policy, 2619 .flags = GENL_ADMIN_PERM, 2620 }, 2621 { 2622 .cmd = MACSEC_CMD_UPD_RXSA, 2623 .doit = macsec_upd_rxsa, 2624 .policy = macsec_genl_policy, 2625 .flags = GENL_ADMIN_PERM, 2626 }, 2627 }; 2628 2629 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, 2630 struct net_device *dev) 2631 { 2632 struct macsec_dev *macsec = netdev_priv(dev); 2633 struct macsec_secy *secy = &macsec->secy; 2634 struct pcpu_secy_stats *secy_stats; 2635 int ret, len; 2636 2637 /* 10.5 */ 2638 if (!secy->protect_frames) { 2639 secy_stats = this_cpu_ptr(macsec->stats); 2640 u64_stats_update_begin(&secy_stats->syncp); 2641 secy_stats->stats.OutPktsUntagged++; 2642 u64_stats_update_end(&secy_stats->syncp); 2643 skb->dev = macsec->real_dev; 2644 len = skb->len; 2645 ret = dev_queue_xmit(skb); 2646 count_tx(dev, ret, len); 2647 return ret; 2648 } 2649 2650 if (!secy->operational) { 2651 kfree_skb(skb); 2652 dev->stats.tx_dropped++; 2653 return NETDEV_TX_OK; 2654 } 2655 2656 skb = macsec_encrypt(skb, dev); 2657 if (IS_ERR(skb)) { 2658 if (PTR_ERR(skb) != -EINPROGRESS) 2659 dev->stats.tx_dropped++; 2660 return NETDEV_TX_OK; 2661 } 2662 2663 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 2664 2665 macsec_encrypt_finish(skb, dev); 2666 len = skb->len; 2667 ret = dev_queue_xmit(skb); 2668 count_tx(dev, ret, len); 2669 return ret; 2670 } 2671 2672 #define MACSEC_FEATURES \ 2673 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 2674 static int macsec_dev_init(struct net_device *dev) 2675 { 2676 struct macsec_dev *macsec = macsec_priv(dev); 2677 struct net_device *real_dev = macsec->real_dev; 2678 2679 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 2680 if (!dev->tstats) 2681 return -ENOMEM; 2682 2683 dev->features = real_dev->features & MACSEC_FEATURES; 2684 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; 2685 2686 dev->needed_headroom = real_dev->needed_headroom + 2687 MACSEC_NEEDED_HEADROOM; 2688 dev->needed_tailroom = real_dev->needed_tailroom + 2689 MACSEC_NEEDED_TAILROOM; 2690 2691 if (is_zero_ether_addr(dev->dev_addr)) 2692 eth_hw_addr_inherit(dev, real_dev); 2693 if (is_zero_ether_addr(dev->broadcast)) 2694 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 2695 2696 return 0; 2697 } 2698 2699 static void macsec_dev_uninit(struct net_device *dev) 2700 { 2701 free_percpu(dev->tstats); 2702 } 2703 2704 static netdev_features_t macsec_fix_features(struct net_device *dev, 2705 netdev_features_t features) 2706 { 2707 struct macsec_dev *macsec = macsec_priv(dev); 2708 struct net_device *real_dev = macsec->real_dev; 2709 2710 features &= real_dev->features & MACSEC_FEATURES; 2711 features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; 2712 2713 return features; 2714 } 2715 2716 static int macsec_dev_open(struct net_device *dev) 2717 { 2718 struct macsec_dev *macsec = macsec_priv(dev); 2719 struct net_device *real_dev = macsec->real_dev; 2720 int err; 2721 2722 if (!(real_dev->flags & IFF_UP)) 2723 return -ENETDOWN; 2724 2725 err = dev_uc_add(real_dev, dev->dev_addr); 2726 if (err < 0) 2727 return err; 2728 2729 if (dev->flags & IFF_ALLMULTI) { 2730 err = dev_set_allmulti(real_dev, 1); 2731 if (err < 0) 2732 goto del_unicast; 2733 } 2734 2735 if (dev->flags & IFF_PROMISC) { 2736 err = dev_set_promiscuity(real_dev, 1); 2737 if (err < 0) 2738 goto clear_allmulti; 2739 } 2740 2741 if (netif_carrier_ok(real_dev)) 2742 netif_carrier_on(dev); 2743 2744 return 0; 2745 clear_allmulti: 2746 if (dev->flags & IFF_ALLMULTI) 2747 dev_set_allmulti(real_dev, -1); 2748 del_unicast: 2749 dev_uc_del(real_dev, dev->dev_addr); 2750 netif_carrier_off(dev); 2751 return err; 2752 } 2753 2754 static int macsec_dev_stop(struct net_device *dev) 2755 { 2756 struct macsec_dev *macsec = macsec_priv(dev); 2757 struct net_device *real_dev = macsec->real_dev; 2758 2759 netif_carrier_off(dev); 2760 2761 dev_mc_unsync(real_dev, dev); 2762 dev_uc_unsync(real_dev, dev); 2763 2764 if (dev->flags & IFF_ALLMULTI) 2765 dev_set_allmulti(real_dev, -1); 2766 2767 if (dev->flags & IFF_PROMISC) 2768 dev_set_promiscuity(real_dev, -1); 2769 2770 dev_uc_del(real_dev, dev->dev_addr); 2771 2772 return 0; 2773 } 2774 2775 static void macsec_dev_change_rx_flags(struct net_device *dev, int change) 2776 { 2777 struct net_device *real_dev = macsec_priv(dev)->real_dev; 2778 2779 if (!(dev->flags & IFF_UP)) 2780 return; 2781 2782 if (change & IFF_ALLMULTI) 2783 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 2784 2785 if (change & IFF_PROMISC) 2786 dev_set_promiscuity(real_dev, 2787 dev->flags & IFF_PROMISC ? 1 : -1); 2788 } 2789 2790 static void macsec_dev_set_rx_mode(struct net_device *dev) 2791 { 2792 struct net_device *real_dev = macsec_priv(dev)->real_dev; 2793 2794 dev_mc_sync(real_dev, dev); 2795 dev_uc_sync(real_dev, dev); 2796 } 2797 2798 static int macsec_set_mac_address(struct net_device *dev, void *p) 2799 { 2800 struct macsec_dev *macsec = macsec_priv(dev); 2801 struct net_device *real_dev = macsec->real_dev; 2802 struct sockaddr *addr = p; 2803 int err; 2804 2805 if (!is_valid_ether_addr(addr->sa_data)) 2806 return -EADDRNOTAVAIL; 2807 2808 if (!(dev->flags & IFF_UP)) 2809 goto out; 2810 2811 err = dev_uc_add(real_dev, addr->sa_data); 2812 if (err < 0) 2813 return err; 2814 2815 dev_uc_del(real_dev, dev->dev_addr); 2816 2817 out: 2818 ether_addr_copy(dev->dev_addr, addr->sa_data); 2819 return 0; 2820 } 2821 2822 static int macsec_change_mtu(struct net_device *dev, int new_mtu) 2823 { 2824 struct macsec_dev *macsec = macsec_priv(dev); 2825 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); 2826 2827 if (macsec->real_dev->mtu - extra < new_mtu) 2828 return -ERANGE; 2829 2830 dev->mtu = new_mtu; 2831 2832 return 0; 2833 } 2834 2835 static struct rtnl_link_stats64 *macsec_get_stats64(struct net_device *dev, 2836 struct rtnl_link_stats64 *s) 2837 { 2838 int cpu; 2839 2840 if (!dev->tstats) 2841 return s; 2842 2843 for_each_possible_cpu(cpu) { 2844 struct pcpu_sw_netstats *stats; 2845 struct pcpu_sw_netstats tmp; 2846 int start; 2847 2848 stats = per_cpu_ptr(dev->tstats, cpu); 2849 do { 2850 start = u64_stats_fetch_begin_irq(&stats->syncp); 2851 tmp.rx_packets = stats->rx_packets; 2852 tmp.rx_bytes = stats->rx_bytes; 2853 tmp.tx_packets = stats->tx_packets; 2854 tmp.tx_bytes = stats->tx_bytes; 2855 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2856 2857 s->rx_packets += tmp.rx_packets; 2858 s->rx_bytes += tmp.rx_bytes; 2859 s->tx_packets += tmp.tx_packets; 2860 s->tx_bytes += tmp.tx_bytes; 2861 } 2862 2863 s->rx_dropped = dev->stats.rx_dropped; 2864 s->tx_dropped = dev->stats.tx_dropped; 2865 2866 return s; 2867 } 2868 2869 static int macsec_get_iflink(const struct net_device *dev) 2870 { 2871 return macsec_priv(dev)->real_dev->ifindex; 2872 } 2873 2874 static const struct net_device_ops macsec_netdev_ops = { 2875 .ndo_init = macsec_dev_init, 2876 .ndo_uninit = macsec_dev_uninit, 2877 .ndo_open = macsec_dev_open, 2878 .ndo_stop = macsec_dev_stop, 2879 .ndo_fix_features = macsec_fix_features, 2880 .ndo_change_mtu = macsec_change_mtu, 2881 .ndo_set_rx_mode = macsec_dev_set_rx_mode, 2882 .ndo_change_rx_flags = macsec_dev_change_rx_flags, 2883 .ndo_set_mac_address = macsec_set_mac_address, 2884 .ndo_start_xmit = macsec_start_xmit, 2885 .ndo_get_stats64 = macsec_get_stats64, 2886 .ndo_get_iflink = macsec_get_iflink, 2887 }; 2888 2889 static const struct device_type macsec_type = { 2890 .name = "macsec", 2891 }; 2892 2893 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { 2894 [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, 2895 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, 2896 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, 2897 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, 2898 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, 2899 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, 2900 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, 2901 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, 2902 [IFLA_MACSEC_ES] = { .type = NLA_U8 }, 2903 [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, 2904 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, 2905 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, 2906 }; 2907 2908 static void macsec_free_netdev(struct net_device *dev) 2909 { 2910 struct macsec_dev *macsec = macsec_priv(dev); 2911 struct net_device *real_dev = macsec->real_dev; 2912 2913 free_percpu(macsec->stats); 2914 free_percpu(macsec->secy.tx_sc.stats); 2915 2916 dev_put(real_dev); 2917 free_netdev(dev); 2918 } 2919 2920 static void macsec_setup(struct net_device *dev) 2921 { 2922 ether_setup(dev); 2923 dev->priv_flags |= IFF_NO_QUEUE; 2924 dev->netdev_ops = &macsec_netdev_ops; 2925 dev->destructor = macsec_free_netdev; 2926 2927 eth_zero_addr(dev->broadcast); 2928 } 2929 2930 static void macsec_changelink_common(struct net_device *dev, 2931 struct nlattr *data[]) 2932 { 2933 struct macsec_secy *secy; 2934 struct macsec_tx_sc *tx_sc; 2935 2936 secy = &macsec_priv(dev)->secy; 2937 tx_sc = &secy->tx_sc; 2938 2939 if (data[IFLA_MACSEC_ENCODING_SA]) { 2940 struct macsec_tx_sa *tx_sa; 2941 2942 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]); 2943 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]); 2944 2945 secy->operational = tx_sa && tx_sa->active; 2946 } 2947 2948 if (data[IFLA_MACSEC_WINDOW]) 2949 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); 2950 2951 if (data[IFLA_MACSEC_ENCRYPT]) 2952 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); 2953 2954 if (data[IFLA_MACSEC_PROTECT]) 2955 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]); 2956 2957 if (data[IFLA_MACSEC_INC_SCI]) 2958 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 2959 2960 if (data[IFLA_MACSEC_ES]) 2961 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]); 2962 2963 if (data[IFLA_MACSEC_SCB]) 2964 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]); 2965 2966 if (data[IFLA_MACSEC_REPLAY_PROTECT]) 2967 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]); 2968 2969 if (data[IFLA_MACSEC_VALIDATION]) 2970 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); 2971 } 2972 2973 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], 2974 struct nlattr *data[]) 2975 { 2976 if (!data) 2977 return 0; 2978 2979 if (data[IFLA_MACSEC_CIPHER_SUITE] || 2980 data[IFLA_MACSEC_ICV_LEN] || 2981 data[IFLA_MACSEC_SCI] || 2982 data[IFLA_MACSEC_PORT]) 2983 return -EINVAL; 2984 2985 macsec_changelink_common(dev, data); 2986 2987 return 0; 2988 } 2989 2990 static void macsec_del_dev(struct macsec_dev *macsec) 2991 { 2992 int i; 2993 2994 while (macsec->secy.rx_sc) { 2995 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); 2996 2997 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); 2998 free_rx_sc(rx_sc); 2999 } 3000 3001 for (i = 0; i < MACSEC_NUM_AN; i++) { 3002 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); 3003 3004 if (sa) { 3005 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); 3006 clear_tx_sa(sa); 3007 } 3008 } 3009 } 3010 3011 static void macsec_dellink(struct net_device *dev, struct list_head *head) 3012 { 3013 struct macsec_dev *macsec = macsec_priv(dev); 3014 struct net_device *real_dev = macsec->real_dev; 3015 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3016 3017 macsec_generation++; 3018 3019 unregister_netdevice_queue(dev, head); 3020 list_del_rcu(&macsec->secys); 3021 if (list_empty(&rxd->secys)) { 3022 netdev_rx_handler_unregister(real_dev); 3023 kfree(rxd); 3024 } 3025 3026 macsec_del_dev(macsec); 3027 } 3028 3029 static int register_macsec_dev(struct net_device *real_dev, 3030 struct net_device *dev) 3031 { 3032 struct macsec_dev *macsec = macsec_priv(dev); 3033 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3034 3035 if (!rxd) { 3036 int err; 3037 3038 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL); 3039 if (!rxd) 3040 return -ENOMEM; 3041 3042 INIT_LIST_HEAD(&rxd->secys); 3043 3044 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 3045 rxd); 3046 if (err < 0) { 3047 kfree(rxd); 3048 return err; 3049 } 3050 } 3051 3052 list_add_tail_rcu(&macsec->secys, &rxd->secys); 3053 return 0; 3054 } 3055 3056 static bool sci_exists(struct net_device *dev, sci_t sci) 3057 { 3058 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev); 3059 struct macsec_dev *macsec; 3060 3061 list_for_each_entry(macsec, &rxd->secys, secys) { 3062 if (macsec->secy.sci == sci) 3063 return true; 3064 } 3065 3066 return false; 3067 } 3068 3069 static sci_t dev_to_sci(struct net_device *dev, __be16 port) 3070 { 3071 return make_sci(dev->dev_addr, port); 3072 } 3073 3074 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) 3075 { 3076 struct macsec_dev *macsec = macsec_priv(dev); 3077 struct macsec_secy *secy = &macsec->secy; 3078 3079 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); 3080 if (!macsec->stats) 3081 return -ENOMEM; 3082 3083 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); 3084 if (!secy->tx_sc.stats) { 3085 free_percpu(macsec->stats); 3086 return -ENOMEM; 3087 } 3088 3089 if (sci == MACSEC_UNDEF_SCI) 3090 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3091 3092 secy->netdev = dev; 3093 secy->operational = true; 3094 secy->key_len = DEFAULT_SAK_LEN; 3095 secy->icv_len = icv_len; 3096 secy->validate_frames = MACSEC_VALIDATE_DEFAULT; 3097 secy->protect_frames = true; 3098 secy->replay_protect = false; 3099 3100 secy->sci = sci; 3101 secy->tx_sc.active = true; 3102 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; 3103 secy->tx_sc.encrypt = DEFAULT_ENCRYPT; 3104 secy->tx_sc.send_sci = DEFAULT_SEND_SCI; 3105 secy->tx_sc.end_station = false; 3106 secy->tx_sc.scb = false; 3107 3108 return 0; 3109 } 3110 3111 static int macsec_newlink(struct net *net, struct net_device *dev, 3112 struct nlattr *tb[], struct nlattr *data[]) 3113 { 3114 struct macsec_dev *macsec = macsec_priv(dev); 3115 struct net_device *real_dev; 3116 int err; 3117 sci_t sci; 3118 u8 icv_len = DEFAULT_ICV_LEN; 3119 rx_handler_func_t *rx_handler; 3120 3121 if (!tb[IFLA_LINK]) 3122 return -EINVAL; 3123 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); 3124 if (!real_dev) 3125 return -ENODEV; 3126 3127 dev->priv_flags |= IFF_MACSEC; 3128 3129 macsec->real_dev = real_dev; 3130 3131 if (data && data[IFLA_MACSEC_ICV_LEN]) 3132 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3133 dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true); 3134 3135 rx_handler = rtnl_dereference(real_dev->rx_handler); 3136 if (rx_handler && rx_handler != macsec_handle_frame) 3137 return -EBUSY; 3138 3139 err = register_netdevice(dev); 3140 if (err < 0) 3141 return err; 3142 3143 /* need to be already registered so that ->init has run and 3144 * the MAC addr is set 3145 */ 3146 if (data && data[IFLA_MACSEC_SCI]) 3147 sci = nla_get_sci(data[IFLA_MACSEC_SCI]); 3148 else if (data && data[IFLA_MACSEC_PORT]) 3149 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT])); 3150 else 3151 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3152 3153 if (rx_handler && sci_exists(real_dev, sci)) { 3154 err = -EBUSY; 3155 goto unregister; 3156 } 3157 3158 err = macsec_add_dev(dev, sci, icv_len); 3159 if (err) 3160 goto unregister; 3161 3162 if (data) 3163 macsec_changelink_common(dev, data); 3164 3165 err = register_macsec_dev(real_dev, dev); 3166 if (err < 0) 3167 goto del_dev; 3168 3169 macsec_generation++; 3170 3171 dev_hold(real_dev); 3172 3173 return 0; 3174 3175 del_dev: 3176 macsec_del_dev(macsec); 3177 unregister: 3178 unregister_netdevice(dev); 3179 return err; 3180 } 3181 3182 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[]) 3183 { 3184 u64 csid = MACSEC_DEFAULT_CIPHER_ID; 3185 u8 icv_len = DEFAULT_ICV_LEN; 3186 int flag; 3187 bool es, scb, sci; 3188 3189 if (!data) 3190 return 0; 3191 3192 if (data[IFLA_MACSEC_CIPHER_SUITE]) 3193 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); 3194 3195 if (data[IFLA_MACSEC_ICV_LEN]) 3196 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3197 3198 switch (csid) { 3199 case MACSEC_DEFAULT_CIPHER_ID: 3200 case MACSEC_DEFAULT_CIPHER_ALT: 3201 if (icv_len < MACSEC_MIN_ICV_LEN || 3202 icv_len > MACSEC_MAX_ICV_LEN) 3203 return -EINVAL; 3204 break; 3205 default: 3206 return -EINVAL; 3207 } 3208 3209 if (data[IFLA_MACSEC_ENCODING_SA]) { 3210 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) 3211 return -EINVAL; 3212 } 3213 3214 for (flag = IFLA_MACSEC_ENCODING_SA + 1; 3215 flag < IFLA_MACSEC_VALIDATION; 3216 flag++) { 3217 if (data[flag]) { 3218 if (nla_get_u8(data[flag]) > 1) 3219 return -EINVAL; 3220 } 3221 } 3222 3223 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; 3224 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; 3225 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; 3226 3227 if ((sci && (scb || es)) || (scb && es)) 3228 return -EINVAL; 3229 3230 if (data[IFLA_MACSEC_VALIDATION] && 3231 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 3232 return -EINVAL; 3233 3234 if ((data[IFLA_MACSEC_REPLAY_PROTECT] && 3235 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && 3236 !data[IFLA_MACSEC_WINDOW]) 3237 return -EINVAL; 3238 3239 return 0; 3240 } 3241 3242 static struct net *macsec_get_link_net(const struct net_device *dev) 3243 { 3244 return dev_net(macsec_priv(dev)->real_dev); 3245 } 3246 3247 static size_t macsec_get_size(const struct net_device *dev) 3248 { 3249 return 0 + 3250 nla_total_size_64bit(8) + /* SCI */ 3251 nla_total_size(1) + /* ICV_LEN */ 3252 nla_total_size_64bit(8) + /* CIPHER_SUITE */ 3253 nla_total_size(4) + /* WINDOW */ 3254 nla_total_size(1) + /* ENCODING_SA */ 3255 nla_total_size(1) + /* ENCRYPT */ 3256 nla_total_size(1) + /* PROTECT */ 3257 nla_total_size(1) + /* INC_SCI */ 3258 nla_total_size(1) + /* ES */ 3259 nla_total_size(1) + /* SCB */ 3260 nla_total_size(1) + /* REPLAY_PROTECT */ 3261 nla_total_size(1) + /* VALIDATION */ 3262 0; 3263 } 3264 3265 static int macsec_fill_info(struct sk_buff *skb, 3266 const struct net_device *dev) 3267 { 3268 struct macsec_secy *secy = &macsec_priv(dev)->secy; 3269 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 3270 3271 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, 3272 IFLA_MACSEC_PAD) || 3273 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 3274 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, 3275 MACSEC_DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) || 3276 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 3277 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 3278 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 3279 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) || 3280 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) || 3281 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || 3282 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || 3283 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || 3284 0) 3285 goto nla_put_failure; 3286 3287 if (secy->replay_protect) { 3288 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window)) 3289 goto nla_put_failure; 3290 } 3291 3292 return 0; 3293 3294 nla_put_failure: 3295 return -EMSGSIZE; 3296 } 3297 3298 static struct rtnl_link_ops macsec_link_ops __read_mostly = { 3299 .kind = "macsec", 3300 .priv_size = sizeof(struct macsec_dev), 3301 .maxtype = IFLA_MACSEC_MAX, 3302 .policy = macsec_rtnl_policy, 3303 .setup = macsec_setup, 3304 .validate = macsec_validate_attr, 3305 .newlink = macsec_newlink, 3306 .changelink = macsec_changelink, 3307 .dellink = macsec_dellink, 3308 .get_size = macsec_get_size, 3309 .fill_info = macsec_fill_info, 3310 .get_link_net = macsec_get_link_net, 3311 }; 3312 3313 static bool is_macsec_master(struct net_device *dev) 3314 { 3315 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame; 3316 } 3317 3318 static int macsec_notify(struct notifier_block *this, unsigned long event, 3319 void *ptr) 3320 { 3321 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); 3322 LIST_HEAD(head); 3323 3324 if (!is_macsec_master(real_dev)) 3325 return NOTIFY_DONE; 3326 3327 switch (event) { 3328 case NETDEV_UNREGISTER: { 3329 struct macsec_dev *m, *n; 3330 struct macsec_rxh_data *rxd; 3331 3332 rxd = macsec_data_rtnl(real_dev); 3333 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 3334 macsec_dellink(m->secy.netdev, &head); 3335 } 3336 unregister_netdevice_many(&head); 3337 break; 3338 } 3339 case NETDEV_CHANGEMTU: { 3340 struct macsec_dev *m; 3341 struct macsec_rxh_data *rxd; 3342 3343 rxd = macsec_data_rtnl(real_dev); 3344 list_for_each_entry(m, &rxd->secys, secys) { 3345 struct net_device *dev = m->secy.netdev; 3346 unsigned int mtu = real_dev->mtu - (m->secy.icv_len + 3347 macsec_extra_len(true)); 3348 3349 if (dev->mtu > mtu) 3350 dev_set_mtu(dev, mtu); 3351 } 3352 } 3353 } 3354 3355 return NOTIFY_OK; 3356 } 3357 3358 static struct notifier_block macsec_notifier = { 3359 .notifier_call = macsec_notify, 3360 }; 3361 3362 static int __init macsec_init(void) 3363 { 3364 int err; 3365 3366 pr_info("MACsec IEEE 802.1AE\n"); 3367 err = register_netdevice_notifier(&macsec_notifier); 3368 if (err) 3369 return err; 3370 3371 err = rtnl_link_register(&macsec_link_ops); 3372 if (err) 3373 goto notifier; 3374 3375 err = genl_register_family_with_ops(&macsec_fam, macsec_genl_ops); 3376 if (err) 3377 goto rtnl; 3378 3379 return 0; 3380 3381 rtnl: 3382 rtnl_link_unregister(&macsec_link_ops); 3383 notifier: 3384 unregister_netdevice_notifier(&macsec_notifier); 3385 return err; 3386 } 3387 3388 static void __exit macsec_exit(void) 3389 { 3390 genl_unregister_family(&macsec_fam); 3391 rtnl_link_unregister(&macsec_link_ops); 3392 unregister_netdevice_notifier(&macsec_notifier); 3393 rcu_barrier(); 3394 } 3395 3396 module_init(macsec_init); 3397 module_exit(macsec_exit); 3398 3399 MODULE_ALIAS_RTNL_LINK("macsec"); 3400 3401 MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); 3402 MODULE_LICENSE("GPL v2"); 3403