1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * drivers/net/macsec.c - MACsec device 4 * 5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> 6 */ 7 8 #include <linux/types.h> 9 #include <linux/skbuff.h> 10 #include <linux/socket.h> 11 #include <linux/module.h> 12 #include <crypto/aead.h> 13 #include <linux/etherdevice.h> 14 #include <linux/rtnetlink.h> 15 #include <linux/refcount.h> 16 #include <net/genetlink.h> 17 #include <net/sock.h> 18 #include <net/gro_cells.h> 19 20 #include <uapi/linux/if_macsec.h> 21 22 typedef u64 __bitwise sci_t; 23 24 #define MACSEC_SCI_LEN 8 25 26 /* SecTAG length = macsec_eth_header without the optional SCI */ 27 #define MACSEC_TAG_LEN 6 28 29 struct macsec_eth_header { 30 struct ethhdr eth; 31 /* SecTAG */ 32 u8 tci_an; 33 #if defined(__LITTLE_ENDIAN_BITFIELD) 34 u8 short_length:6, 35 unused:2; 36 #elif defined(__BIG_ENDIAN_BITFIELD) 37 u8 unused:2, 38 short_length:6; 39 #else 40 #error "Please fix <asm/byteorder.h>" 41 #endif 42 __be32 packet_number; 43 u8 secure_channel_id[8]; /* optional */ 44 } __packed; 45 46 #define MACSEC_TCI_VERSION 0x80 47 #define MACSEC_TCI_ES 0x40 /* end station */ 48 #define MACSEC_TCI_SC 0x20 /* SCI present */ 49 #define MACSEC_TCI_SCB 0x10 /* epon */ 50 #define MACSEC_TCI_E 0x08 /* encryption */ 51 #define MACSEC_TCI_C 0x04 /* changed text */ 52 #define MACSEC_AN_MASK 0x03 /* association number */ 53 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C) 54 55 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ 56 #define MIN_NON_SHORT_LEN 48 57 58 #define GCM_AES_IV_LEN 12 59 #define DEFAULT_ICV_LEN 16 60 61 #define MACSEC_NUM_AN 4 /* 2 bits for the association number */ 62 63 #define for_each_rxsc(secy, sc) \ 64 for (sc = rcu_dereference_bh(secy->rx_sc); \ 65 sc; \ 66 sc = rcu_dereference_bh(sc->next)) 67 #define for_each_rxsc_rtnl(secy, sc) \ 68 for (sc = rtnl_dereference(secy->rx_sc); \ 69 sc; \ 70 sc = rtnl_dereference(sc->next)) 71 72 struct gcm_iv { 73 union { 74 u8 secure_channel_id[8]; 75 sci_t sci; 76 }; 77 __be32 pn; 78 }; 79 80 /** 81 * struct macsec_key - SA key 82 * @id: user-provided key identifier 83 * @tfm: crypto struct, key storage 84 */ 85 struct macsec_key { 86 u8 id[MACSEC_KEYID_LEN]; 87 struct crypto_aead *tfm; 88 }; 89 90 struct macsec_rx_sc_stats { 91 __u64 InOctetsValidated; 92 __u64 InOctetsDecrypted; 93 __u64 InPktsUnchecked; 94 __u64 InPktsDelayed; 95 __u64 InPktsOK; 96 __u64 InPktsInvalid; 97 __u64 InPktsLate; 98 __u64 InPktsNotValid; 99 __u64 InPktsNotUsingSA; 100 __u64 InPktsUnusedSA; 101 }; 102 103 struct macsec_rx_sa_stats { 104 __u32 InPktsOK; 105 __u32 InPktsInvalid; 106 __u32 InPktsNotValid; 107 __u32 InPktsNotUsingSA; 108 __u32 InPktsUnusedSA; 109 }; 110 111 struct macsec_tx_sa_stats { 112 __u32 OutPktsProtected; 113 __u32 OutPktsEncrypted; 114 }; 115 116 struct macsec_tx_sc_stats { 117 __u64 OutPktsProtected; 118 __u64 OutPktsEncrypted; 119 __u64 OutOctetsProtected; 120 __u64 OutOctetsEncrypted; 121 }; 122 123 struct macsec_dev_stats { 124 __u64 OutPktsUntagged; 125 __u64 InPktsUntagged; 126 __u64 OutPktsTooLong; 127 __u64 InPktsNoTag; 128 __u64 InPktsBadTag; 129 __u64 InPktsUnknownSCI; 130 __u64 InPktsNoSCI; 131 __u64 InPktsOverrun; 132 }; 133 134 /** 135 * struct macsec_rx_sa - receive secure association 136 * @active: 137 * @next_pn: packet number expected for the next packet 138 * @lock: protects next_pn manipulations 139 * @key: key structure 140 * @stats: per-SA stats 141 */ 142 struct macsec_rx_sa { 143 struct macsec_key key; 144 spinlock_t lock; 145 u32 next_pn; 146 refcount_t refcnt; 147 bool active; 148 struct macsec_rx_sa_stats __percpu *stats; 149 struct macsec_rx_sc *sc; 150 struct rcu_head rcu; 151 }; 152 153 struct pcpu_rx_sc_stats { 154 struct macsec_rx_sc_stats stats; 155 struct u64_stats_sync syncp; 156 }; 157 158 /** 159 * struct macsec_rx_sc - receive secure channel 160 * @sci: secure channel identifier for this SC 161 * @active: channel is active 162 * @sa: array of secure associations 163 * @stats: per-SC stats 164 */ 165 struct macsec_rx_sc { 166 struct macsec_rx_sc __rcu *next; 167 sci_t sci; 168 bool active; 169 struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN]; 170 struct pcpu_rx_sc_stats __percpu *stats; 171 refcount_t refcnt; 172 struct rcu_head rcu_head; 173 }; 174 175 /** 176 * struct macsec_tx_sa - transmit secure association 177 * @active: 178 * @next_pn: packet number to use for the next packet 179 * @lock: protects next_pn manipulations 180 * @key: key structure 181 * @stats: per-SA stats 182 */ 183 struct macsec_tx_sa { 184 struct macsec_key key; 185 spinlock_t lock; 186 u32 next_pn; 187 refcount_t refcnt; 188 bool active; 189 struct macsec_tx_sa_stats __percpu *stats; 190 struct rcu_head rcu; 191 }; 192 193 struct pcpu_tx_sc_stats { 194 struct macsec_tx_sc_stats stats; 195 struct u64_stats_sync syncp; 196 }; 197 198 /** 199 * struct macsec_tx_sc - transmit secure channel 200 * @active: 201 * @encoding_sa: association number of the SA currently in use 202 * @encrypt: encrypt packets on transmit, or authenticate only 203 * @send_sci: always include the SCI in the SecTAG 204 * @end_station: 205 * @scb: single copy broadcast flag 206 * @sa: array of secure associations 207 * @stats: stats for this TXSC 208 */ 209 struct macsec_tx_sc { 210 bool active; 211 u8 encoding_sa; 212 bool encrypt; 213 bool send_sci; 214 bool end_station; 215 bool scb; 216 struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN]; 217 struct pcpu_tx_sc_stats __percpu *stats; 218 }; 219 220 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT 221 222 /** 223 * struct macsec_secy - MACsec Security Entity 224 * @netdev: netdevice for this SecY 225 * @n_rx_sc: number of receive secure channels configured on this SecY 226 * @sci: secure channel identifier used for tx 227 * @key_len: length of keys used by the cipher suite 228 * @icv_len: length of ICV used by the cipher suite 229 * @validate_frames: validation mode 230 * @operational: MAC_Operational flag 231 * @protect_frames: enable protection for this SecY 232 * @replay_protect: enable packet number checks on receive 233 * @replay_window: size of the replay window 234 * @tx_sc: transmit secure channel 235 * @rx_sc: linked list of receive secure channels 236 */ 237 struct macsec_secy { 238 struct net_device *netdev; 239 unsigned int n_rx_sc; 240 sci_t sci; 241 u16 key_len; 242 u16 icv_len; 243 enum macsec_validation_type validate_frames; 244 bool operational; 245 bool protect_frames; 246 bool replay_protect; 247 u32 replay_window; 248 struct macsec_tx_sc tx_sc; 249 struct macsec_rx_sc __rcu *rx_sc; 250 }; 251 252 struct pcpu_secy_stats { 253 struct macsec_dev_stats stats; 254 struct u64_stats_sync syncp; 255 }; 256 257 /** 258 * struct macsec_dev - private data 259 * @secy: SecY config 260 * @real_dev: pointer to underlying netdevice 261 * @stats: MACsec device stats 262 * @secys: linked list of SecY's on the underlying device 263 */ 264 struct macsec_dev { 265 struct macsec_secy secy; 266 struct net_device *real_dev; 267 struct pcpu_secy_stats __percpu *stats; 268 struct list_head secys; 269 struct gro_cells gro_cells; 270 unsigned int nest_level; 271 }; 272 273 /** 274 * struct macsec_rxh_data - rx_handler private argument 275 * @secys: linked list of SecY's on this underlying device 276 */ 277 struct macsec_rxh_data { 278 struct list_head secys; 279 }; 280 281 static struct macsec_dev *macsec_priv(const struct net_device *dev) 282 { 283 return (struct macsec_dev *)netdev_priv(dev); 284 } 285 286 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev) 287 { 288 return rcu_dereference_bh(dev->rx_handler_data); 289 } 290 291 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev) 292 { 293 return rtnl_dereference(dev->rx_handler_data); 294 } 295 296 struct macsec_cb { 297 struct aead_request *req; 298 union { 299 struct macsec_tx_sa *tx_sa; 300 struct macsec_rx_sa *rx_sa; 301 }; 302 u8 assoc_num; 303 bool valid; 304 bool has_sci; 305 }; 306 307 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) 308 { 309 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr); 310 311 if (!sa || !sa->active) 312 return NULL; 313 314 if (!refcount_inc_not_zero(&sa->refcnt)) 315 return NULL; 316 317 return sa; 318 } 319 320 static void free_rx_sc_rcu(struct rcu_head *head) 321 { 322 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); 323 324 free_percpu(rx_sc->stats); 325 kfree(rx_sc); 326 } 327 328 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) 329 { 330 return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL; 331 } 332 333 static void macsec_rxsc_put(struct macsec_rx_sc *sc) 334 { 335 if (refcount_dec_and_test(&sc->refcnt)) 336 call_rcu(&sc->rcu_head, free_rx_sc_rcu); 337 } 338 339 static void free_rxsa(struct rcu_head *head) 340 { 341 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); 342 343 crypto_free_aead(sa->key.tfm); 344 free_percpu(sa->stats); 345 kfree(sa); 346 } 347 348 static void macsec_rxsa_put(struct macsec_rx_sa *sa) 349 { 350 if (refcount_dec_and_test(&sa->refcnt)) 351 call_rcu(&sa->rcu, free_rxsa); 352 } 353 354 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) 355 { 356 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr); 357 358 if (!sa || !sa->active) 359 return NULL; 360 361 if (!refcount_inc_not_zero(&sa->refcnt)) 362 return NULL; 363 364 return sa; 365 } 366 367 static void free_txsa(struct rcu_head *head) 368 { 369 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); 370 371 crypto_free_aead(sa->key.tfm); 372 free_percpu(sa->stats); 373 kfree(sa); 374 } 375 376 static void macsec_txsa_put(struct macsec_tx_sa *sa) 377 { 378 if (refcount_dec_and_test(&sa->refcnt)) 379 call_rcu(&sa->rcu, free_txsa); 380 } 381 382 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) 383 { 384 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb)); 385 return (struct macsec_cb *)skb->cb; 386 } 387 388 #define MACSEC_PORT_ES (htons(0x0001)) 389 #define MACSEC_PORT_SCB (0x0000) 390 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) 391 392 #define MACSEC_GCM_AES_128_SAK_LEN 16 393 #define MACSEC_GCM_AES_256_SAK_LEN 32 394 395 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN 396 #define DEFAULT_SEND_SCI true 397 #define DEFAULT_ENCRYPT false 398 #define DEFAULT_ENCODING_SA 0 399 400 static bool send_sci(const struct macsec_secy *secy) 401 { 402 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 403 404 return tx_sc->send_sci || 405 (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb); 406 } 407 408 static sci_t make_sci(u8 *addr, __be16 port) 409 { 410 sci_t sci; 411 412 memcpy(&sci, addr, ETH_ALEN); 413 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port)); 414 415 return sci; 416 } 417 418 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) 419 { 420 sci_t sci; 421 422 if (sci_present) 423 memcpy(&sci, hdr->secure_channel_id, 424 sizeof(hdr->secure_channel_id)); 425 else 426 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); 427 428 return sci; 429 } 430 431 static unsigned int macsec_sectag_len(bool sci_present) 432 { 433 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); 434 } 435 436 static unsigned int macsec_hdr_len(bool sci_present) 437 { 438 return macsec_sectag_len(sci_present) + ETH_HLEN; 439 } 440 441 static unsigned int macsec_extra_len(bool sci_present) 442 { 443 return macsec_sectag_len(sci_present) + sizeof(__be16); 444 } 445 446 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ 447 static void macsec_fill_sectag(struct macsec_eth_header *h, 448 const struct macsec_secy *secy, u32 pn, 449 bool sci_present) 450 { 451 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 452 453 memset(&h->tci_an, 0, macsec_sectag_len(sci_present)); 454 h->eth.h_proto = htons(ETH_P_MACSEC); 455 456 if (sci_present) { 457 h->tci_an |= MACSEC_TCI_SC; 458 memcpy(&h->secure_channel_id, &secy->sci, 459 sizeof(h->secure_channel_id)); 460 } else { 461 if (tx_sc->end_station) 462 h->tci_an |= MACSEC_TCI_ES; 463 if (tx_sc->scb) 464 h->tci_an |= MACSEC_TCI_SCB; 465 } 466 467 h->packet_number = htonl(pn); 468 469 /* with GCM, C/E clear for !encrypt, both set for encrypt */ 470 if (tx_sc->encrypt) 471 h->tci_an |= MACSEC_TCI_CONFID; 472 else if (secy->icv_len != DEFAULT_ICV_LEN) 473 h->tci_an |= MACSEC_TCI_C; 474 475 h->tci_an |= tx_sc->encoding_sa; 476 } 477 478 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) 479 { 480 if (data_len < MIN_NON_SHORT_LEN) 481 h->short_length = data_len; 482 } 483 484 /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */ 485 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len) 486 { 487 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; 488 int len = skb->len - 2 * ETH_ALEN; 489 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; 490 491 /* a) It comprises at least 17 octets */ 492 if (skb->len <= 16) 493 return false; 494 495 /* b) MACsec EtherType: already checked */ 496 497 /* c) V bit is clear */ 498 if (h->tci_an & MACSEC_TCI_VERSION) 499 return false; 500 501 /* d) ES or SCB => !SC */ 502 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && 503 (h->tci_an & MACSEC_TCI_SC)) 504 return false; 505 506 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */ 507 if (h->unused) 508 return false; 509 510 /* rx.pn != 0 (figure 10-5) */ 511 if (!h->packet_number) 512 return false; 513 514 /* length check, f) g) h) i) */ 515 if (h->short_length) 516 return len == extra_len + h->short_length; 517 return len >= extra_len + MIN_NON_SHORT_LEN; 518 } 519 520 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) 521 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN 522 523 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) 524 { 525 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; 526 527 gcm_iv->sci = sci; 528 gcm_iv->pn = htonl(pn); 529 } 530 531 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) 532 { 533 return (struct macsec_eth_header *)skb_mac_header(skb); 534 } 535 536 static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy) 537 { 538 u32 pn; 539 540 spin_lock_bh(&tx_sa->lock); 541 pn = tx_sa->next_pn; 542 543 tx_sa->next_pn++; 544 if (tx_sa->next_pn == 0) { 545 pr_debug("PN wrapped, transitioning to !oper\n"); 546 tx_sa->active = false; 547 if (secy->protect_frames) 548 secy->operational = false; 549 } 550 spin_unlock_bh(&tx_sa->lock); 551 552 return pn; 553 } 554 555 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) 556 { 557 struct macsec_dev *macsec = netdev_priv(dev); 558 559 skb->dev = macsec->real_dev; 560 skb_reset_mac_header(skb); 561 skb->protocol = eth_hdr(skb)->h_proto; 562 } 563 564 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, 565 struct macsec_tx_sa *tx_sa) 566 { 567 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); 568 569 u64_stats_update_begin(&txsc_stats->syncp); 570 if (tx_sc->encrypt) { 571 txsc_stats->stats.OutOctetsEncrypted += skb->len; 572 txsc_stats->stats.OutPktsEncrypted++; 573 this_cpu_inc(tx_sa->stats->OutPktsEncrypted); 574 } else { 575 txsc_stats->stats.OutOctetsProtected += skb->len; 576 txsc_stats->stats.OutPktsProtected++; 577 this_cpu_inc(tx_sa->stats->OutPktsProtected); 578 } 579 u64_stats_update_end(&txsc_stats->syncp); 580 } 581 582 static void count_tx(struct net_device *dev, int ret, int len) 583 { 584 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 585 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 586 587 u64_stats_update_begin(&stats->syncp); 588 stats->tx_packets++; 589 stats->tx_bytes += len; 590 u64_stats_update_end(&stats->syncp); 591 } 592 } 593 594 static void macsec_encrypt_done(struct crypto_async_request *base, int err) 595 { 596 struct sk_buff *skb = base->data; 597 struct net_device *dev = skb->dev; 598 struct macsec_dev *macsec = macsec_priv(dev); 599 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; 600 int len, ret; 601 602 aead_request_free(macsec_skb_cb(skb)->req); 603 604 rcu_read_lock_bh(); 605 macsec_encrypt_finish(skb, dev); 606 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 607 len = skb->len; 608 ret = dev_queue_xmit(skb); 609 count_tx(dev, ret, len); 610 rcu_read_unlock_bh(); 611 612 macsec_txsa_put(sa); 613 dev_put(dev); 614 } 615 616 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 617 unsigned char **iv, 618 struct scatterlist **sg, 619 int num_frags) 620 { 621 size_t size, iv_offset, sg_offset; 622 struct aead_request *req; 623 void *tmp; 624 625 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm); 626 iv_offset = size; 627 size += GCM_AES_IV_LEN; 628 629 size = ALIGN(size, __alignof__(struct scatterlist)); 630 sg_offset = size; 631 size += sizeof(struct scatterlist) * num_frags; 632 633 tmp = kmalloc(size, GFP_ATOMIC); 634 if (!tmp) 635 return NULL; 636 637 *iv = (unsigned char *)(tmp + iv_offset); 638 *sg = (struct scatterlist *)(tmp + sg_offset); 639 req = tmp; 640 641 aead_request_set_tfm(req, tfm); 642 643 return req; 644 } 645 646 static struct sk_buff *macsec_encrypt(struct sk_buff *skb, 647 struct net_device *dev) 648 { 649 int ret; 650 struct scatterlist *sg; 651 struct sk_buff *trailer; 652 unsigned char *iv; 653 struct ethhdr *eth; 654 struct macsec_eth_header *hh; 655 size_t unprotected_len; 656 struct aead_request *req; 657 struct macsec_secy *secy; 658 struct macsec_tx_sc *tx_sc; 659 struct macsec_tx_sa *tx_sa; 660 struct macsec_dev *macsec = macsec_priv(dev); 661 bool sci_present; 662 u32 pn; 663 664 secy = &macsec->secy; 665 tx_sc = &secy->tx_sc; 666 667 /* 10.5.1 TX SA assignment */ 668 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]); 669 if (!tx_sa) { 670 secy->operational = false; 671 kfree_skb(skb); 672 return ERR_PTR(-EINVAL); 673 } 674 675 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || 676 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { 677 struct sk_buff *nskb = skb_copy_expand(skb, 678 MACSEC_NEEDED_HEADROOM, 679 MACSEC_NEEDED_TAILROOM, 680 GFP_ATOMIC); 681 if (likely(nskb)) { 682 consume_skb(skb); 683 skb = nskb; 684 } else { 685 macsec_txsa_put(tx_sa); 686 kfree_skb(skb); 687 return ERR_PTR(-ENOMEM); 688 } 689 } else { 690 skb = skb_unshare(skb, GFP_ATOMIC); 691 if (!skb) { 692 macsec_txsa_put(tx_sa); 693 return ERR_PTR(-ENOMEM); 694 } 695 } 696 697 unprotected_len = skb->len; 698 eth = eth_hdr(skb); 699 sci_present = send_sci(secy); 700 hh = skb_push(skb, macsec_extra_len(sci_present)); 701 memmove(hh, eth, 2 * ETH_ALEN); 702 703 pn = tx_sa_update_pn(tx_sa, secy); 704 if (pn == 0) { 705 macsec_txsa_put(tx_sa); 706 kfree_skb(skb); 707 return ERR_PTR(-ENOLINK); 708 } 709 macsec_fill_sectag(hh, secy, pn, sci_present); 710 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); 711 712 skb_put(skb, secy->icv_len); 713 714 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { 715 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 716 717 u64_stats_update_begin(&secy_stats->syncp); 718 secy_stats->stats.OutPktsTooLong++; 719 u64_stats_update_end(&secy_stats->syncp); 720 721 macsec_txsa_put(tx_sa); 722 kfree_skb(skb); 723 return ERR_PTR(-EINVAL); 724 } 725 726 ret = skb_cow_data(skb, 0, &trailer); 727 if (unlikely(ret < 0)) { 728 macsec_txsa_put(tx_sa); 729 kfree_skb(skb); 730 return ERR_PTR(ret); 731 } 732 733 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret); 734 if (!req) { 735 macsec_txsa_put(tx_sa); 736 kfree_skb(skb); 737 return ERR_PTR(-ENOMEM); 738 } 739 740 macsec_fill_iv(iv, secy->sci, pn); 741 742 sg_init_table(sg, ret); 743 ret = skb_to_sgvec(skb, sg, 0, skb->len); 744 if (unlikely(ret < 0)) { 745 aead_request_free(req); 746 macsec_txsa_put(tx_sa); 747 kfree_skb(skb); 748 return ERR_PTR(ret); 749 } 750 751 if (tx_sc->encrypt) { 752 int len = skb->len - macsec_hdr_len(sci_present) - 753 secy->icv_len; 754 aead_request_set_crypt(req, sg, sg, len, iv); 755 aead_request_set_ad(req, macsec_hdr_len(sci_present)); 756 } else { 757 aead_request_set_crypt(req, sg, sg, 0, iv); 758 aead_request_set_ad(req, skb->len - secy->icv_len); 759 } 760 761 macsec_skb_cb(skb)->req = req; 762 macsec_skb_cb(skb)->tx_sa = tx_sa; 763 aead_request_set_callback(req, 0, macsec_encrypt_done, skb); 764 765 dev_hold(skb->dev); 766 ret = crypto_aead_encrypt(req); 767 if (ret == -EINPROGRESS) { 768 return ERR_PTR(ret); 769 } else if (ret != 0) { 770 dev_put(skb->dev); 771 kfree_skb(skb); 772 aead_request_free(req); 773 macsec_txsa_put(tx_sa); 774 return ERR_PTR(-EINVAL); 775 } 776 777 dev_put(skb->dev); 778 aead_request_free(req); 779 macsec_txsa_put(tx_sa); 780 781 return skb; 782 } 783 784 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn) 785 { 786 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 787 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats); 788 struct macsec_eth_header *hdr = macsec_ethhdr(skb); 789 u32 lowest_pn = 0; 790 791 spin_lock(&rx_sa->lock); 792 if (rx_sa->next_pn >= secy->replay_window) 793 lowest_pn = rx_sa->next_pn - secy->replay_window; 794 795 /* Now perform replay protection check again 796 * (see IEEE 802.1AE-2006 figure 10-5) 797 */ 798 if (secy->replay_protect && pn < lowest_pn) { 799 spin_unlock(&rx_sa->lock); 800 u64_stats_update_begin(&rxsc_stats->syncp); 801 rxsc_stats->stats.InPktsLate++; 802 u64_stats_update_end(&rxsc_stats->syncp); 803 return false; 804 } 805 806 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { 807 u64_stats_update_begin(&rxsc_stats->syncp); 808 if (hdr->tci_an & MACSEC_TCI_E) 809 rxsc_stats->stats.InOctetsDecrypted += skb->len; 810 else 811 rxsc_stats->stats.InOctetsValidated += skb->len; 812 u64_stats_update_end(&rxsc_stats->syncp); 813 } 814 815 if (!macsec_skb_cb(skb)->valid) { 816 spin_unlock(&rx_sa->lock); 817 818 /* 10.6.5 */ 819 if (hdr->tci_an & MACSEC_TCI_C || 820 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 821 u64_stats_update_begin(&rxsc_stats->syncp); 822 rxsc_stats->stats.InPktsNotValid++; 823 u64_stats_update_end(&rxsc_stats->syncp); 824 return false; 825 } 826 827 u64_stats_update_begin(&rxsc_stats->syncp); 828 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) { 829 rxsc_stats->stats.InPktsInvalid++; 830 this_cpu_inc(rx_sa->stats->InPktsInvalid); 831 } else if (pn < lowest_pn) { 832 rxsc_stats->stats.InPktsDelayed++; 833 } else { 834 rxsc_stats->stats.InPktsUnchecked++; 835 } 836 u64_stats_update_end(&rxsc_stats->syncp); 837 } else { 838 u64_stats_update_begin(&rxsc_stats->syncp); 839 if (pn < lowest_pn) { 840 rxsc_stats->stats.InPktsDelayed++; 841 } else { 842 rxsc_stats->stats.InPktsOK++; 843 this_cpu_inc(rx_sa->stats->InPktsOK); 844 } 845 u64_stats_update_end(&rxsc_stats->syncp); 846 847 if (pn >= rx_sa->next_pn) 848 rx_sa->next_pn = pn + 1; 849 spin_unlock(&rx_sa->lock); 850 } 851 852 return true; 853 } 854 855 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) 856 { 857 skb->pkt_type = PACKET_HOST; 858 skb->protocol = eth_type_trans(skb, dev); 859 860 skb_reset_network_header(skb); 861 if (!skb_transport_header_was_set(skb)) 862 skb_reset_transport_header(skb); 863 skb_reset_mac_len(skb); 864 } 865 866 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) 867 { 868 skb->ip_summed = CHECKSUM_NONE; 869 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); 870 skb_pull(skb, hdr_len); 871 pskb_trim_unique(skb, skb->len - icv_len); 872 } 873 874 static void count_rx(struct net_device *dev, int len) 875 { 876 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 877 878 u64_stats_update_begin(&stats->syncp); 879 stats->rx_packets++; 880 stats->rx_bytes += len; 881 u64_stats_update_end(&stats->syncp); 882 } 883 884 static void macsec_decrypt_done(struct crypto_async_request *base, int err) 885 { 886 struct sk_buff *skb = base->data; 887 struct net_device *dev = skb->dev; 888 struct macsec_dev *macsec = macsec_priv(dev); 889 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 890 struct macsec_rx_sc *rx_sc = rx_sa->sc; 891 int len; 892 u32 pn; 893 894 aead_request_free(macsec_skb_cb(skb)->req); 895 896 if (!err) 897 macsec_skb_cb(skb)->valid = true; 898 899 rcu_read_lock_bh(); 900 pn = ntohl(macsec_ethhdr(skb)->packet_number); 901 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { 902 rcu_read_unlock_bh(); 903 kfree_skb(skb); 904 goto out; 905 } 906 907 macsec_finalize_skb(skb, macsec->secy.icv_len, 908 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 909 macsec_reset_skb(skb, macsec->secy.netdev); 910 911 len = skb->len; 912 if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) 913 count_rx(dev, len); 914 915 rcu_read_unlock_bh(); 916 917 out: 918 macsec_rxsa_put(rx_sa); 919 macsec_rxsc_put(rx_sc); 920 dev_put(dev); 921 } 922 923 static struct sk_buff *macsec_decrypt(struct sk_buff *skb, 924 struct net_device *dev, 925 struct macsec_rx_sa *rx_sa, 926 sci_t sci, 927 struct macsec_secy *secy) 928 { 929 int ret; 930 struct scatterlist *sg; 931 struct sk_buff *trailer; 932 unsigned char *iv; 933 struct aead_request *req; 934 struct macsec_eth_header *hdr; 935 u16 icv_len = secy->icv_len; 936 937 macsec_skb_cb(skb)->valid = false; 938 skb = skb_share_check(skb, GFP_ATOMIC); 939 if (!skb) 940 return ERR_PTR(-ENOMEM); 941 942 ret = skb_cow_data(skb, 0, &trailer); 943 if (unlikely(ret < 0)) { 944 kfree_skb(skb); 945 return ERR_PTR(ret); 946 } 947 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret); 948 if (!req) { 949 kfree_skb(skb); 950 return ERR_PTR(-ENOMEM); 951 } 952 953 hdr = (struct macsec_eth_header *)skb->data; 954 macsec_fill_iv(iv, sci, ntohl(hdr->packet_number)); 955 956 sg_init_table(sg, ret); 957 ret = skb_to_sgvec(skb, sg, 0, skb->len); 958 if (unlikely(ret < 0)) { 959 aead_request_free(req); 960 kfree_skb(skb); 961 return ERR_PTR(ret); 962 } 963 964 if (hdr->tci_an & MACSEC_TCI_E) { 965 /* confidentiality: ethernet + macsec header 966 * authenticated, encrypted payload 967 */ 968 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci); 969 970 aead_request_set_crypt(req, sg, sg, len, iv); 971 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci)); 972 skb = skb_unshare(skb, GFP_ATOMIC); 973 if (!skb) { 974 aead_request_free(req); 975 return ERR_PTR(-ENOMEM); 976 } 977 } else { 978 /* integrity only: all headers + data authenticated */ 979 aead_request_set_crypt(req, sg, sg, icv_len, iv); 980 aead_request_set_ad(req, skb->len - icv_len); 981 } 982 983 macsec_skb_cb(skb)->req = req; 984 skb->dev = dev; 985 aead_request_set_callback(req, 0, macsec_decrypt_done, skb); 986 987 dev_hold(dev); 988 ret = crypto_aead_decrypt(req); 989 if (ret == -EINPROGRESS) { 990 return ERR_PTR(ret); 991 } else if (ret != 0) { 992 /* decryption/authentication failed 993 * 10.6 if validateFrames is disabled, deliver anyway 994 */ 995 if (ret != -EBADMSG) { 996 kfree_skb(skb); 997 skb = ERR_PTR(ret); 998 } 999 } else { 1000 macsec_skb_cb(skb)->valid = true; 1001 } 1002 dev_put(dev); 1003 1004 aead_request_free(req); 1005 1006 return skb; 1007 } 1008 1009 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci) 1010 { 1011 struct macsec_rx_sc *rx_sc; 1012 1013 for_each_rxsc(secy, rx_sc) { 1014 if (rx_sc->sci == sci) 1015 return rx_sc; 1016 } 1017 1018 return NULL; 1019 } 1020 1021 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) 1022 { 1023 struct macsec_rx_sc *rx_sc; 1024 1025 for_each_rxsc_rtnl(secy, rx_sc) { 1026 if (rx_sc->sci == sci) 1027 return rx_sc; 1028 } 1029 1030 return NULL; 1031 } 1032 1033 static void handle_not_macsec(struct sk_buff *skb) 1034 { 1035 struct macsec_rxh_data *rxd; 1036 struct macsec_dev *macsec; 1037 1038 rcu_read_lock(); 1039 rxd = macsec_data_rcu(skb->dev); 1040 1041 /* 10.6 If the management control validateFrames is not 1042 * Strict, frames without a SecTAG are received, counted, and 1043 * delivered to the Controlled Port 1044 */ 1045 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1046 struct sk_buff *nskb; 1047 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 1048 1049 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1050 u64_stats_update_begin(&secy_stats->syncp); 1051 secy_stats->stats.InPktsNoTag++; 1052 u64_stats_update_end(&secy_stats->syncp); 1053 continue; 1054 } 1055 1056 /* deliver on this port */ 1057 nskb = skb_clone(skb, GFP_ATOMIC); 1058 if (!nskb) 1059 break; 1060 1061 nskb->dev = macsec->secy.netdev; 1062 1063 if (netif_rx(nskb) == NET_RX_SUCCESS) { 1064 u64_stats_update_begin(&secy_stats->syncp); 1065 secy_stats->stats.InPktsUntagged++; 1066 u64_stats_update_end(&secy_stats->syncp); 1067 } 1068 } 1069 1070 rcu_read_unlock(); 1071 } 1072 1073 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) 1074 { 1075 struct sk_buff *skb = *pskb; 1076 struct net_device *dev = skb->dev; 1077 struct macsec_eth_header *hdr; 1078 struct macsec_secy *secy = NULL; 1079 struct macsec_rx_sc *rx_sc; 1080 struct macsec_rx_sa *rx_sa; 1081 struct macsec_rxh_data *rxd; 1082 struct macsec_dev *macsec; 1083 sci_t sci; 1084 u32 pn; 1085 bool cbit; 1086 struct pcpu_rx_sc_stats *rxsc_stats; 1087 struct pcpu_secy_stats *secy_stats; 1088 bool pulled_sci; 1089 int ret; 1090 1091 if (skb_headroom(skb) < ETH_HLEN) 1092 goto drop_direct; 1093 1094 hdr = macsec_ethhdr(skb); 1095 if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) { 1096 handle_not_macsec(skb); 1097 1098 /* and deliver to the uncontrolled port */ 1099 return RX_HANDLER_PASS; 1100 } 1101 1102 skb = skb_unshare(skb, GFP_ATOMIC); 1103 *pskb = skb; 1104 if (!skb) 1105 return RX_HANDLER_CONSUMED; 1106 1107 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); 1108 if (!pulled_sci) { 1109 if (!pskb_may_pull(skb, macsec_extra_len(false))) 1110 goto drop_direct; 1111 } 1112 1113 hdr = macsec_ethhdr(skb); 1114 1115 /* Frames with a SecTAG that has the TCI E bit set but the C 1116 * bit clear are discarded, as this reserved encoding is used 1117 * to identify frames with a SecTAG that are not to be 1118 * delivered to the Controlled Port. 1119 */ 1120 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E) 1121 return RX_HANDLER_PASS; 1122 1123 /* now, pull the extra length */ 1124 if (hdr->tci_an & MACSEC_TCI_SC) { 1125 if (!pulled_sci) 1126 goto drop_direct; 1127 } 1128 1129 /* ethernet header is part of crypto processing */ 1130 skb_push(skb, ETH_HLEN); 1131 1132 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); 1133 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; 1134 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); 1135 1136 rcu_read_lock(); 1137 rxd = macsec_data_rcu(skb->dev); 1138 1139 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1140 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); 1141 1142 sc = sc ? macsec_rxsc_get(sc) : NULL; 1143 1144 if (sc) { 1145 secy = &macsec->secy; 1146 rx_sc = sc; 1147 break; 1148 } 1149 } 1150 1151 if (!secy) 1152 goto nosci; 1153 1154 dev = secy->netdev; 1155 macsec = macsec_priv(dev); 1156 secy_stats = this_cpu_ptr(macsec->stats); 1157 rxsc_stats = this_cpu_ptr(rx_sc->stats); 1158 1159 if (!macsec_validate_skb(skb, secy->icv_len)) { 1160 u64_stats_update_begin(&secy_stats->syncp); 1161 secy_stats->stats.InPktsBadTag++; 1162 u64_stats_update_end(&secy_stats->syncp); 1163 goto drop_nosa; 1164 } 1165 1166 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]); 1167 if (!rx_sa) { 1168 /* 10.6.1 if the SA is not in use */ 1169 1170 /* If validateFrames is Strict or the C bit in the 1171 * SecTAG is set, discard 1172 */ 1173 if (hdr->tci_an & MACSEC_TCI_C || 1174 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 1175 u64_stats_update_begin(&rxsc_stats->syncp); 1176 rxsc_stats->stats.InPktsNotUsingSA++; 1177 u64_stats_update_end(&rxsc_stats->syncp); 1178 goto drop_nosa; 1179 } 1180 1181 /* not Strict, the frame (with the SecTAG and ICV 1182 * removed) is delivered to the Controlled Port. 1183 */ 1184 u64_stats_update_begin(&rxsc_stats->syncp); 1185 rxsc_stats->stats.InPktsUnusedSA++; 1186 u64_stats_update_end(&rxsc_stats->syncp); 1187 goto deliver; 1188 } 1189 1190 /* First, PN check to avoid decrypting obviously wrong packets */ 1191 pn = ntohl(hdr->packet_number); 1192 if (secy->replay_protect) { 1193 bool late; 1194 1195 spin_lock(&rx_sa->lock); 1196 late = rx_sa->next_pn >= secy->replay_window && 1197 pn < (rx_sa->next_pn - secy->replay_window); 1198 spin_unlock(&rx_sa->lock); 1199 1200 if (late) { 1201 u64_stats_update_begin(&rxsc_stats->syncp); 1202 rxsc_stats->stats.InPktsLate++; 1203 u64_stats_update_end(&rxsc_stats->syncp); 1204 goto drop; 1205 } 1206 } 1207 1208 macsec_skb_cb(skb)->rx_sa = rx_sa; 1209 1210 /* Disabled && !changed text => skip validation */ 1211 if (hdr->tci_an & MACSEC_TCI_C || 1212 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1213 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1214 1215 if (IS_ERR(skb)) { 1216 /* the decrypt callback needs the reference */ 1217 if (PTR_ERR(skb) != -EINPROGRESS) { 1218 macsec_rxsa_put(rx_sa); 1219 macsec_rxsc_put(rx_sc); 1220 } 1221 rcu_read_unlock(); 1222 *pskb = NULL; 1223 return RX_HANDLER_CONSUMED; 1224 } 1225 1226 if (!macsec_post_decrypt(skb, secy, pn)) 1227 goto drop; 1228 1229 deliver: 1230 macsec_finalize_skb(skb, secy->icv_len, 1231 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1232 macsec_reset_skb(skb, secy->netdev); 1233 1234 if (rx_sa) 1235 macsec_rxsa_put(rx_sa); 1236 macsec_rxsc_put(rx_sc); 1237 1238 skb_orphan(skb); 1239 ret = gro_cells_receive(&macsec->gro_cells, skb); 1240 if (ret == NET_RX_SUCCESS) 1241 count_rx(dev, skb->len); 1242 else 1243 macsec->secy.netdev->stats.rx_dropped++; 1244 1245 rcu_read_unlock(); 1246 1247 *pskb = NULL; 1248 return RX_HANDLER_CONSUMED; 1249 1250 drop: 1251 macsec_rxsa_put(rx_sa); 1252 drop_nosa: 1253 macsec_rxsc_put(rx_sc); 1254 rcu_read_unlock(); 1255 drop_direct: 1256 kfree_skb(skb); 1257 *pskb = NULL; 1258 return RX_HANDLER_CONSUMED; 1259 1260 nosci: 1261 /* 10.6.1 if the SC is not found */ 1262 cbit = !!(hdr->tci_an & MACSEC_TCI_C); 1263 if (!cbit) 1264 macsec_finalize_skb(skb, DEFAULT_ICV_LEN, 1265 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1266 1267 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1268 struct sk_buff *nskb; 1269 1270 secy_stats = this_cpu_ptr(macsec->stats); 1271 1272 /* If validateFrames is Strict or the C bit in the 1273 * SecTAG is set, discard 1274 */ 1275 if (cbit || 1276 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1277 u64_stats_update_begin(&secy_stats->syncp); 1278 secy_stats->stats.InPktsNoSCI++; 1279 u64_stats_update_end(&secy_stats->syncp); 1280 continue; 1281 } 1282 1283 /* not strict, the frame (with the SecTAG and ICV 1284 * removed) is delivered to the Controlled Port. 1285 */ 1286 nskb = skb_clone(skb, GFP_ATOMIC); 1287 if (!nskb) 1288 break; 1289 1290 macsec_reset_skb(nskb, macsec->secy.netdev); 1291 1292 ret = netif_rx(nskb); 1293 if (ret == NET_RX_SUCCESS) { 1294 u64_stats_update_begin(&secy_stats->syncp); 1295 secy_stats->stats.InPktsUnknownSCI++; 1296 u64_stats_update_end(&secy_stats->syncp); 1297 } else { 1298 macsec->secy.netdev->stats.rx_dropped++; 1299 } 1300 } 1301 1302 rcu_read_unlock(); 1303 *pskb = skb; 1304 return RX_HANDLER_PASS; 1305 } 1306 1307 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) 1308 { 1309 struct crypto_aead *tfm; 1310 int ret; 1311 1312 tfm = crypto_alloc_aead("gcm(aes)", 0, 0); 1313 1314 if (IS_ERR(tfm)) 1315 return tfm; 1316 1317 ret = crypto_aead_setkey(tfm, key, key_len); 1318 if (ret < 0) 1319 goto fail; 1320 1321 ret = crypto_aead_setauthsize(tfm, icv_len); 1322 if (ret < 0) 1323 goto fail; 1324 1325 return tfm; 1326 fail: 1327 crypto_free_aead(tfm); 1328 return ERR_PTR(ret); 1329 } 1330 1331 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, 1332 int icv_len) 1333 { 1334 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); 1335 if (!rx_sa->stats) 1336 return -ENOMEM; 1337 1338 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1339 if (IS_ERR(rx_sa->key.tfm)) { 1340 free_percpu(rx_sa->stats); 1341 return PTR_ERR(rx_sa->key.tfm); 1342 } 1343 1344 rx_sa->active = false; 1345 rx_sa->next_pn = 1; 1346 refcount_set(&rx_sa->refcnt, 1); 1347 spin_lock_init(&rx_sa->lock); 1348 1349 return 0; 1350 } 1351 1352 static void clear_rx_sa(struct macsec_rx_sa *rx_sa) 1353 { 1354 rx_sa->active = false; 1355 1356 macsec_rxsa_put(rx_sa); 1357 } 1358 1359 static void free_rx_sc(struct macsec_rx_sc *rx_sc) 1360 { 1361 int i; 1362 1363 for (i = 0; i < MACSEC_NUM_AN; i++) { 1364 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]); 1365 1366 RCU_INIT_POINTER(rx_sc->sa[i], NULL); 1367 if (sa) 1368 clear_rx_sa(sa); 1369 } 1370 1371 macsec_rxsc_put(rx_sc); 1372 } 1373 1374 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) 1375 { 1376 struct macsec_rx_sc *rx_sc, __rcu **rx_scp; 1377 1378 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp); 1379 rx_sc; 1380 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) { 1381 if (rx_sc->sci == sci) { 1382 if (rx_sc->active) 1383 secy->n_rx_sc--; 1384 rcu_assign_pointer(*rx_scp, rx_sc->next); 1385 return rx_sc; 1386 } 1387 } 1388 1389 return NULL; 1390 } 1391 1392 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) 1393 { 1394 struct macsec_rx_sc *rx_sc; 1395 struct macsec_dev *macsec; 1396 struct net_device *real_dev = macsec_priv(dev)->real_dev; 1397 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 1398 struct macsec_secy *secy; 1399 1400 list_for_each_entry(macsec, &rxd->secys, secys) { 1401 if (find_rx_sc_rtnl(&macsec->secy, sci)) 1402 return ERR_PTR(-EEXIST); 1403 } 1404 1405 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); 1406 if (!rx_sc) 1407 return ERR_PTR(-ENOMEM); 1408 1409 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats); 1410 if (!rx_sc->stats) { 1411 kfree(rx_sc); 1412 return ERR_PTR(-ENOMEM); 1413 } 1414 1415 rx_sc->sci = sci; 1416 rx_sc->active = true; 1417 refcount_set(&rx_sc->refcnt, 1); 1418 1419 secy = &macsec_priv(dev)->secy; 1420 rcu_assign_pointer(rx_sc->next, secy->rx_sc); 1421 rcu_assign_pointer(secy->rx_sc, rx_sc); 1422 1423 if (rx_sc->active) 1424 secy->n_rx_sc++; 1425 1426 return rx_sc; 1427 } 1428 1429 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, 1430 int icv_len) 1431 { 1432 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); 1433 if (!tx_sa->stats) 1434 return -ENOMEM; 1435 1436 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1437 if (IS_ERR(tx_sa->key.tfm)) { 1438 free_percpu(tx_sa->stats); 1439 return PTR_ERR(tx_sa->key.tfm); 1440 } 1441 1442 tx_sa->active = false; 1443 refcount_set(&tx_sa->refcnt, 1); 1444 spin_lock_init(&tx_sa->lock); 1445 1446 return 0; 1447 } 1448 1449 static void clear_tx_sa(struct macsec_tx_sa *tx_sa) 1450 { 1451 tx_sa->active = false; 1452 1453 macsec_txsa_put(tx_sa); 1454 } 1455 1456 static struct genl_family macsec_fam; 1457 1458 static struct net_device *get_dev_from_nl(struct net *net, 1459 struct nlattr **attrs) 1460 { 1461 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]); 1462 struct net_device *dev; 1463 1464 dev = __dev_get_by_index(net, ifindex); 1465 if (!dev) 1466 return ERR_PTR(-ENODEV); 1467 1468 if (!netif_is_macsec(dev)) 1469 return ERR_PTR(-ENODEV); 1470 1471 return dev; 1472 } 1473 1474 static sci_t nla_get_sci(const struct nlattr *nla) 1475 { 1476 return (__force sci_t)nla_get_u64(nla); 1477 } 1478 1479 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, 1480 int padattr) 1481 { 1482 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); 1483 } 1484 1485 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, 1486 struct nlattr **attrs, 1487 struct nlattr **tb_sa, 1488 struct net_device **devp, 1489 struct macsec_secy **secyp, 1490 struct macsec_tx_sc **scp, 1491 u8 *assoc_num) 1492 { 1493 struct net_device *dev; 1494 struct macsec_secy *secy; 1495 struct macsec_tx_sc *tx_sc; 1496 struct macsec_tx_sa *tx_sa; 1497 1498 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1499 return ERR_PTR(-EINVAL); 1500 1501 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1502 1503 dev = get_dev_from_nl(net, attrs); 1504 if (IS_ERR(dev)) 1505 return ERR_CAST(dev); 1506 1507 if (*assoc_num >= MACSEC_NUM_AN) 1508 return ERR_PTR(-EINVAL); 1509 1510 secy = &macsec_priv(dev)->secy; 1511 tx_sc = &secy->tx_sc; 1512 1513 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]); 1514 if (!tx_sa) 1515 return ERR_PTR(-ENODEV); 1516 1517 *devp = dev; 1518 *scp = tx_sc; 1519 *secyp = secy; 1520 return tx_sa; 1521 } 1522 1523 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net, 1524 struct nlattr **attrs, 1525 struct nlattr **tb_rxsc, 1526 struct net_device **devp, 1527 struct macsec_secy **secyp) 1528 { 1529 struct net_device *dev; 1530 struct macsec_secy *secy; 1531 struct macsec_rx_sc *rx_sc; 1532 sci_t sci; 1533 1534 dev = get_dev_from_nl(net, attrs); 1535 if (IS_ERR(dev)) 1536 return ERR_CAST(dev); 1537 1538 secy = &macsec_priv(dev)->secy; 1539 1540 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1541 return ERR_PTR(-EINVAL); 1542 1543 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1544 rx_sc = find_rx_sc_rtnl(secy, sci); 1545 if (!rx_sc) 1546 return ERR_PTR(-ENODEV); 1547 1548 *secyp = secy; 1549 *devp = dev; 1550 1551 return rx_sc; 1552 } 1553 1554 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, 1555 struct nlattr **attrs, 1556 struct nlattr **tb_rxsc, 1557 struct nlattr **tb_sa, 1558 struct net_device **devp, 1559 struct macsec_secy **secyp, 1560 struct macsec_rx_sc **scp, 1561 u8 *assoc_num) 1562 { 1563 struct macsec_rx_sc *rx_sc; 1564 struct macsec_rx_sa *rx_sa; 1565 1566 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1567 return ERR_PTR(-EINVAL); 1568 1569 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1570 if (*assoc_num >= MACSEC_NUM_AN) 1571 return ERR_PTR(-EINVAL); 1572 1573 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); 1574 if (IS_ERR(rx_sc)) 1575 return ERR_CAST(rx_sc); 1576 1577 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]); 1578 if (!rx_sa) 1579 return ERR_PTR(-ENODEV); 1580 1581 *scp = rx_sc; 1582 return rx_sa; 1583 } 1584 1585 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { 1586 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, 1587 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, 1588 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, 1589 }; 1590 1591 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { 1592 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, 1593 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, 1594 }; 1595 1596 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { 1597 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, 1598 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, 1599 [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 }, 1600 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, 1601 .len = MACSEC_KEYID_LEN, }, 1602 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1603 .len = MACSEC_MAX_KEY_LEN, }, 1604 }; 1605 1606 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) 1607 { 1608 if (!attrs[MACSEC_ATTR_SA_CONFIG]) 1609 return -EINVAL; 1610 1611 if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL)) 1612 return -EINVAL; 1613 1614 return 0; 1615 } 1616 1617 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) 1618 { 1619 if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) 1620 return -EINVAL; 1621 1622 if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL)) 1623 return -EINVAL; 1624 1625 return 0; 1626 } 1627 1628 static bool validate_add_rxsa(struct nlattr **attrs) 1629 { 1630 if (!attrs[MACSEC_SA_ATTR_AN] || 1631 !attrs[MACSEC_SA_ATTR_KEY] || 1632 !attrs[MACSEC_SA_ATTR_KEYID]) 1633 return false; 1634 1635 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1636 return false; 1637 1638 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1639 return false; 1640 1641 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1642 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1643 return false; 1644 } 1645 1646 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1647 return false; 1648 1649 return true; 1650 } 1651 1652 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) 1653 { 1654 struct net_device *dev; 1655 struct nlattr **attrs = info->attrs; 1656 struct macsec_secy *secy; 1657 struct macsec_rx_sc *rx_sc; 1658 struct macsec_rx_sa *rx_sa; 1659 unsigned char assoc_num; 1660 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1661 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1662 int err; 1663 1664 if (!attrs[MACSEC_ATTR_IFINDEX]) 1665 return -EINVAL; 1666 1667 if (parse_sa_config(attrs, tb_sa)) 1668 return -EINVAL; 1669 1670 if (parse_rxsc_config(attrs, tb_rxsc)) 1671 return -EINVAL; 1672 1673 if (!validate_add_rxsa(tb_sa)) 1674 return -EINVAL; 1675 1676 rtnl_lock(); 1677 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 1678 if (IS_ERR(rx_sc)) { 1679 rtnl_unlock(); 1680 return PTR_ERR(rx_sc); 1681 } 1682 1683 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1684 1685 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1686 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n", 1687 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1688 rtnl_unlock(); 1689 return -EINVAL; 1690 } 1691 1692 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); 1693 if (rx_sa) { 1694 rtnl_unlock(); 1695 return -EBUSY; 1696 } 1697 1698 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1699 if (!rx_sa) { 1700 rtnl_unlock(); 1701 return -ENOMEM; 1702 } 1703 1704 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1705 secy->key_len, secy->icv_len); 1706 if (err < 0) { 1707 kfree(rx_sa); 1708 rtnl_unlock(); 1709 return err; 1710 } 1711 1712 if (tb_sa[MACSEC_SA_ATTR_PN]) { 1713 spin_lock_bh(&rx_sa->lock); 1714 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 1715 spin_unlock_bh(&rx_sa->lock); 1716 } 1717 1718 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1719 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1720 1721 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1722 rx_sa->sc = rx_sc; 1723 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); 1724 1725 rtnl_unlock(); 1726 1727 return 0; 1728 } 1729 1730 static bool validate_add_rxsc(struct nlattr **attrs) 1731 { 1732 if (!attrs[MACSEC_RXSC_ATTR_SCI]) 1733 return false; 1734 1735 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { 1736 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) 1737 return false; 1738 } 1739 1740 return true; 1741 } 1742 1743 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) 1744 { 1745 struct net_device *dev; 1746 sci_t sci = MACSEC_UNDEF_SCI; 1747 struct nlattr **attrs = info->attrs; 1748 struct macsec_rx_sc *rx_sc; 1749 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1750 1751 if (!attrs[MACSEC_ATTR_IFINDEX]) 1752 return -EINVAL; 1753 1754 if (parse_rxsc_config(attrs, tb_rxsc)) 1755 return -EINVAL; 1756 1757 if (!validate_add_rxsc(tb_rxsc)) 1758 return -EINVAL; 1759 1760 rtnl_lock(); 1761 dev = get_dev_from_nl(genl_info_net(info), attrs); 1762 if (IS_ERR(dev)) { 1763 rtnl_unlock(); 1764 return PTR_ERR(dev); 1765 } 1766 1767 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1768 1769 rx_sc = create_rx_sc(dev, sci); 1770 if (IS_ERR(rx_sc)) { 1771 rtnl_unlock(); 1772 return PTR_ERR(rx_sc); 1773 } 1774 1775 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) 1776 rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 1777 1778 rtnl_unlock(); 1779 1780 return 0; 1781 } 1782 1783 static bool validate_add_txsa(struct nlattr **attrs) 1784 { 1785 if (!attrs[MACSEC_SA_ATTR_AN] || 1786 !attrs[MACSEC_SA_ATTR_PN] || 1787 !attrs[MACSEC_SA_ATTR_KEY] || 1788 !attrs[MACSEC_SA_ATTR_KEYID]) 1789 return false; 1790 1791 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1792 return false; 1793 1794 if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1795 return false; 1796 1797 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1798 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1799 return false; 1800 } 1801 1802 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1803 return false; 1804 1805 return true; 1806 } 1807 1808 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) 1809 { 1810 struct net_device *dev; 1811 struct nlattr **attrs = info->attrs; 1812 struct macsec_secy *secy; 1813 struct macsec_tx_sc *tx_sc; 1814 struct macsec_tx_sa *tx_sa; 1815 unsigned char assoc_num; 1816 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1817 int err; 1818 1819 if (!attrs[MACSEC_ATTR_IFINDEX]) 1820 return -EINVAL; 1821 1822 if (parse_sa_config(attrs, tb_sa)) 1823 return -EINVAL; 1824 1825 if (!validate_add_txsa(tb_sa)) 1826 return -EINVAL; 1827 1828 rtnl_lock(); 1829 dev = get_dev_from_nl(genl_info_net(info), attrs); 1830 if (IS_ERR(dev)) { 1831 rtnl_unlock(); 1832 return PTR_ERR(dev); 1833 } 1834 1835 secy = &macsec_priv(dev)->secy; 1836 tx_sc = &secy->tx_sc; 1837 1838 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1839 1840 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1841 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n", 1842 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1843 rtnl_unlock(); 1844 return -EINVAL; 1845 } 1846 1847 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); 1848 if (tx_sa) { 1849 rtnl_unlock(); 1850 return -EBUSY; 1851 } 1852 1853 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 1854 if (!tx_sa) { 1855 rtnl_unlock(); 1856 return -ENOMEM; 1857 } 1858 1859 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1860 secy->key_len, secy->icv_len); 1861 if (err < 0) { 1862 kfree(tx_sa); 1863 rtnl_unlock(); 1864 return err; 1865 } 1866 1867 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1868 1869 spin_lock_bh(&tx_sa->lock); 1870 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 1871 spin_unlock_bh(&tx_sa->lock); 1872 1873 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1874 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1875 1876 if (assoc_num == tx_sc->encoding_sa && tx_sa->active) 1877 secy->operational = true; 1878 1879 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); 1880 1881 rtnl_unlock(); 1882 1883 return 0; 1884 } 1885 1886 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) 1887 { 1888 struct nlattr **attrs = info->attrs; 1889 struct net_device *dev; 1890 struct macsec_secy *secy; 1891 struct macsec_rx_sc *rx_sc; 1892 struct macsec_rx_sa *rx_sa; 1893 u8 assoc_num; 1894 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1895 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1896 1897 if (!attrs[MACSEC_ATTR_IFINDEX]) 1898 return -EINVAL; 1899 1900 if (parse_sa_config(attrs, tb_sa)) 1901 return -EINVAL; 1902 1903 if (parse_rxsc_config(attrs, tb_rxsc)) 1904 return -EINVAL; 1905 1906 rtnl_lock(); 1907 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 1908 &dev, &secy, &rx_sc, &assoc_num); 1909 if (IS_ERR(rx_sa)) { 1910 rtnl_unlock(); 1911 return PTR_ERR(rx_sa); 1912 } 1913 1914 if (rx_sa->active) { 1915 rtnl_unlock(); 1916 return -EBUSY; 1917 } 1918 1919 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); 1920 clear_rx_sa(rx_sa); 1921 1922 rtnl_unlock(); 1923 1924 return 0; 1925 } 1926 1927 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) 1928 { 1929 struct nlattr **attrs = info->attrs; 1930 struct net_device *dev; 1931 struct macsec_secy *secy; 1932 struct macsec_rx_sc *rx_sc; 1933 sci_t sci; 1934 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1935 1936 if (!attrs[MACSEC_ATTR_IFINDEX]) 1937 return -EINVAL; 1938 1939 if (parse_rxsc_config(attrs, tb_rxsc)) 1940 return -EINVAL; 1941 1942 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1943 return -EINVAL; 1944 1945 rtnl_lock(); 1946 dev = get_dev_from_nl(genl_info_net(info), info->attrs); 1947 if (IS_ERR(dev)) { 1948 rtnl_unlock(); 1949 return PTR_ERR(dev); 1950 } 1951 1952 secy = &macsec_priv(dev)->secy; 1953 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1954 1955 rx_sc = del_rx_sc(secy, sci); 1956 if (!rx_sc) { 1957 rtnl_unlock(); 1958 return -ENODEV; 1959 } 1960 1961 free_rx_sc(rx_sc); 1962 rtnl_unlock(); 1963 1964 return 0; 1965 } 1966 1967 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) 1968 { 1969 struct nlattr **attrs = info->attrs; 1970 struct net_device *dev; 1971 struct macsec_secy *secy; 1972 struct macsec_tx_sc *tx_sc; 1973 struct macsec_tx_sa *tx_sa; 1974 u8 assoc_num; 1975 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1976 1977 if (!attrs[MACSEC_ATTR_IFINDEX]) 1978 return -EINVAL; 1979 1980 if (parse_sa_config(attrs, tb_sa)) 1981 return -EINVAL; 1982 1983 rtnl_lock(); 1984 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 1985 &dev, &secy, &tx_sc, &assoc_num); 1986 if (IS_ERR(tx_sa)) { 1987 rtnl_unlock(); 1988 return PTR_ERR(tx_sa); 1989 } 1990 1991 if (tx_sa->active) { 1992 rtnl_unlock(); 1993 return -EBUSY; 1994 } 1995 1996 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); 1997 clear_tx_sa(tx_sa); 1998 1999 rtnl_unlock(); 2000 2001 return 0; 2002 } 2003 2004 static bool validate_upd_sa(struct nlattr **attrs) 2005 { 2006 if (!attrs[MACSEC_SA_ATTR_AN] || 2007 attrs[MACSEC_SA_ATTR_KEY] || 2008 attrs[MACSEC_SA_ATTR_KEYID]) 2009 return false; 2010 2011 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 2012 return false; 2013 2014 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 2015 return false; 2016 2017 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 2018 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 2019 return false; 2020 } 2021 2022 return true; 2023 } 2024 2025 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) 2026 { 2027 struct nlattr **attrs = info->attrs; 2028 struct net_device *dev; 2029 struct macsec_secy *secy; 2030 struct macsec_tx_sc *tx_sc; 2031 struct macsec_tx_sa *tx_sa; 2032 u8 assoc_num; 2033 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2034 2035 if (!attrs[MACSEC_ATTR_IFINDEX]) 2036 return -EINVAL; 2037 2038 if (parse_sa_config(attrs, tb_sa)) 2039 return -EINVAL; 2040 2041 if (!validate_upd_sa(tb_sa)) 2042 return -EINVAL; 2043 2044 rtnl_lock(); 2045 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2046 &dev, &secy, &tx_sc, &assoc_num); 2047 if (IS_ERR(tx_sa)) { 2048 rtnl_unlock(); 2049 return PTR_ERR(tx_sa); 2050 } 2051 2052 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2053 spin_lock_bh(&tx_sa->lock); 2054 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 2055 spin_unlock_bh(&tx_sa->lock); 2056 } 2057 2058 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2059 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2060 2061 if (assoc_num == tx_sc->encoding_sa) 2062 secy->operational = tx_sa->active; 2063 2064 rtnl_unlock(); 2065 2066 return 0; 2067 } 2068 2069 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) 2070 { 2071 struct nlattr **attrs = info->attrs; 2072 struct net_device *dev; 2073 struct macsec_secy *secy; 2074 struct macsec_rx_sc *rx_sc; 2075 struct macsec_rx_sa *rx_sa; 2076 u8 assoc_num; 2077 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2078 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2079 2080 if (!attrs[MACSEC_ATTR_IFINDEX]) 2081 return -EINVAL; 2082 2083 if (parse_rxsc_config(attrs, tb_rxsc)) 2084 return -EINVAL; 2085 2086 if (parse_sa_config(attrs, tb_sa)) 2087 return -EINVAL; 2088 2089 if (!validate_upd_sa(tb_sa)) 2090 return -EINVAL; 2091 2092 rtnl_lock(); 2093 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2094 &dev, &secy, &rx_sc, &assoc_num); 2095 if (IS_ERR(rx_sa)) { 2096 rtnl_unlock(); 2097 return PTR_ERR(rx_sa); 2098 } 2099 2100 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2101 spin_lock_bh(&rx_sa->lock); 2102 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); 2103 spin_unlock_bh(&rx_sa->lock); 2104 } 2105 2106 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2107 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2108 2109 rtnl_unlock(); 2110 return 0; 2111 } 2112 2113 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) 2114 { 2115 struct nlattr **attrs = info->attrs; 2116 struct net_device *dev; 2117 struct macsec_secy *secy; 2118 struct macsec_rx_sc *rx_sc; 2119 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2120 2121 if (!attrs[MACSEC_ATTR_IFINDEX]) 2122 return -EINVAL; 2123 2124 if (parse_rxsc_config(attrs, tb_rxsc)) 2125 return -EINVAL; 2126 2127 if (!validate_add_rxsc(tb_rxsc)) 2128 return -EINVAL; 2129 2130 rtnl_lock(); 2131 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 2132 if (IS_ERR(rx_sc)) { 2133 rtnl_unlock(); 2134 return PTR_ERR(rx_sc); 2135 } 2136 2137 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { 2138 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 2139 2140 if (rx_sc->active != new) 2141 secy->n_rx_sc += new ? 1 : -1; 2142 2143 rx_sc->active = new; 2144 } 2145 2146 rtnl_unlock(); 2147 2148 return 0; 2149 } 2150 2151 static int copy_tx_sa_stats(struct sk_buff *skb, 2152 struct macsec_tx_sa_stats __percpu *pstats) 2153 { 2154 struct macsec_tx_sa_stats sum = {0, }; 2155 int cpu; 2156 2157 for_each_possible_cpu(cpu) { 2158 const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu); 2159 2160 sum.OutPktsProtected += stats->OutPktsProtected; 2161 sum.OutPktsEncrypted += stats->OutPktsEncrypted; 2162 } 2163 2164 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) || 2165 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted)) 2166 return -EMSGSIZE; 2167 2168 return 0; 2169 } 2170 2171 static noinline_for_stack int 2172 copy_rx_sa_stats(struct sk_buff *skb, 2173 struct macsec_rx_sa_stats __percpu *pstats) 2174 { 2175 struct macsec_rx_sa_stats sum = {0, }; 2176 int cpu; 2177 2178 for_each_possible_cpu(cpu) { 2179 const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu); 2180 2181 sum.InPktsOK += stats->InPktsOK; 2182 sum.InPktsInvalid += stats->InPktsInvalid; 2183 sum.InPktsNotValid += stats->InPktsNotValid; 2184 sum.InPktsNotUsingSA += stats->InPktsNotUsingSA; 2185 sum.InPktsUnusedSA += stats->InPktsUnusedSA; 2186 } 2187 2188 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) || 2189 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) || 2190 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) || 2191 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) || 2192 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA)) 2193 return -EMSGSIZE; 2194 2195 return 0; 2196 } 2197 2198 static noinline_for_stack int 2199 copy_rx_sc_stats(struct sk_buff *skb, struct pcpu_rx_sc_stats __percpu *pstats) 2200 { 2201 struct macsec_rx_sc_stats sum = {0, }; 2202 int cpu; 2203 2204 for_each_possible_cpu(cpu) { 2205 const struct pcpu_rx_sc_stats *stats; 2206 struct macsec_rx_sc_stats tmp; 2207 unsigned int start; 2208 2209 stats = per_cpu_ptr(pstats, cpu); 2210 do { 2211 start = u64_stats_fetch_begin_irq(&stats->syncp); 2212 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2213 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2214 2215 sum.InOctetsValidated += tmp.InOctetsValidated; 2216 sum.InOctetsDecrypted += tmp.InOctetsDecrypted; 2217 sum.InPktsUnchecked += tmp.InPktsUnchecked; 2218 sum.InPktsDelayed += tmp.InPktsDelayed; 2219 sum.InPktsOK += tmp.InPktsOK; 2220 sum.InPktsInvalid += tmp.InPktsInvalid; 2221 sum.InPktsLate += tmp.InPktsLate; 2222 sum.InPktsNotValid += tmp.InPktsNotValid; 2223 sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA; 2224 sum.InPktsUnusedSA += tmp.InPktsUnusedSA; 2225 } 2226 2227 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, 2228 sum.InOctetsValidated, 2229 MACSEC_RXSC_STATS_ATTR_PAD) || 2230 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, 2231 sum.InOctetsDecrypted, 2232 MACSEC_RXSC_STATS_ATTR_PAD) || 2233 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, 2234 sum.InPktsUnchecked, 2235 MACSEC_RXSC_STATS_ATTR_PAD) || 2236 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, 2237 sum.InPktsDelayed, 2238 MACSEC_RXSC_STATS_ATTR_PAD) || 2239 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, 2240 sum.InPktsOK, 2241 MACSEC_RXSC_STATS_ATTR_PAD) || 2242 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, 2243 sum.InPktsInvalid, 2244 MACSEC_RXSC_STATS_ATTR_PAD) || 2245 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, 2246 sum.InPktsLate, 2247 MACSEC_RXSC_STATS_ATTR_PAD) || 2248 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, 2249 sum.InPktsNotValid, 2250 MACSEC_RXSC_STATS_ATTR_PAD) || 2251 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2252 sum.InPktsNotUsingSA, 2253 MACSEC_RXSC_STATS_ATTR_PAD) || 2254 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, 2255 sum.InPktsUnusedSA, 2256 MACSEC_RXSC_STATS_ATTR_PAD)) 2257 return -EMSGSIZE; 2258 2259 return 0; 2260 } 2261 2262 static noinline_for_stack int 2263 copy_tx_sc_stats(struct sk_buff *skb, struct pcpu_tx_sc_stats __percpu *pstats) 2264 { 2265 struct macsec_tx_sc_stats sum = {0, }; 2266 int cpu; 2267 2268 for_each_possible_cpu(cpu) { 2269 const struct pcpu_tx_sc_stats *stats; 2270 struct macsec_tx_sc_stats tmp; 2271 unsigned int start; 2272 2273 stats = per_cpu_ptr(pstats, cpu); 2274 do { 2275 start = u64_stats_fetch_begin_irq(&stats->syncp); 2276 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2277 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2278 2279 sum.OutPktsProtected += tmp.OutPktsProtected; 2280 sum.OutPktsEncrypted += tmp.OutPktsEncrypted; 2281 sum.OutOctetsProtected += tmp.OutOctetsProtected; 2282 sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted; 2283 } 2284 2285 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, 2286 sum.OutPktsProtected, 2287 MACSEC_TXSC_STATS_ATTR_PAD) || 2288 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2289 sum.OutPktsEncrypted, 2290 MACSEC_TXSC_STATS_ATTR_PAD) || 2291 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, 2292 sum.OutOctetsProtected, 2293 MACSEC_TXSC_STATS_ATTR_PAD) || 2294 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, 2295 sum.OutOctetsEncrypted, 2296 MACSEC_TXSC_STATS_ATTR_PAD)) 2297 return -EMSGSIZE; 2298 2299 return 0; 2300 } 2301 2302 static noinline_for_stack int 2303 copy_secy_stats(struct sk_buff *skb, struct pcpu_secy_stats __percpu *pstats) 2304 { 2305 struct macsec_dev_stats sum = {0, }; 2306 int cpu; 2307 2308 for_each_possible_cpu(cpu) { 2309 const struct pcpu_secy_stats *stats; 2310 struct macsec_dev_stats tmp; 2311 unsigned int start; 2312 2313 stats = per_cpu_ptr(pstats, cpu); 2314 do { 2315 start = u64_stats_fetch_begin_irq(&stats->syncp); 2316 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2317 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2318 2319 sum.OutPktsUntagged += tmp.OutPktsUntagged; 2320 sum.InPktsUntagged += tmp.InPktsUntagged; 2321 sum.OutPktsTooLong += tmp.OutPktsTooLong; 2322 sum.InPktsNoTag += tmp.InPktsNoTag; 2323 sum.InPktsBadTag += tmp.InPktsBadTag; 2324 sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI; 2325 sum.InPktsNoSCI += tmp.InPktsNoSCI; 2326 sum.InPktsOverrun += tmp.InPktsOverrun; 2327 } 2328 2329 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, 2330 sum.OutPktsUntagged, 2331 MACSEC_SECY_STATS_ATTR_PAD) || 2332 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, 2333 sum.InPktsUntagged, 2334 MACSEC_SECY_STATS_ATTR_PAD) || 2335 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, 2336 sum.OutPktsTooLong, 2337 MACSEC_SECY_STATS_ATTR_PAD) || 2338 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, 2339 sum.InPktsNoTag, 2340 MACSEC_SECY_STATS_ATTR_PAD) || 2341 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, 2342 sum.InPktsBadTag, 2343 MACSEC_SECY_STATS_ATTR_PAD) || 2344 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, 2345 sum.InPktsUnknownSCI, 2346 MACSEC_SECY_STATS_ATTR_PAD) || 2347 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, 2348 sum.InPktsNoSCI, 2349 MACSEC_SECY_STATS_ATTR_PAD) || 2350 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, 2351 sum.InPktsOverrun, 2352 MACSEC_SECY_STATS_ATTR_PAD)) 2353 return -EMSGSIZE; 2354 2355 return 0; 2356 } 2357 2358 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) 2359 { 2360 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2361 struct nlattr *secy_nest = nla_nest_start_noflag(skb, 2362 MACSEC_ATTR_SECY); 2363 u64 csid; 2364 2365 if (!secy_nest) 2366 return 1; 2367 2368 switch (secy->key_len) { 2369 case MACSEC_GCM_AES_128_SAK_LEN: 2370 csid = MACSEC_DEFAULT_CIPHER_ID; 2371 break; 2372 case MACSEC_GCM_AES_256_SAK_LEN: 2373 csid = MACSEC_CIPHER_ID_GCM_AES_256; 2374 break; 2375 default: 2376 goto cancel; 2377 } 2378 2379 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, 2380 MACSEC_SECY_ATTR_PAD) || 2381 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, 2382 csid, MACSEC_SECY_ATTR_PAD) || 2383 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 2384 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 2385 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 2386 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) || 2387 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) || 2388 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) || 2389 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) || 2390 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) || 2391 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) || 2392 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa)) 2393 goto cancel; 2394 2395 if (secy->replay_protect) { 2396 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window)) 2397 goto cancel; 2398 } 2399 2400 nla_nest_end(skb, secy_nest); 2401 return 0; 2402 2403 cancel: 2404 nla_nest_cancel(skb, secy_nest); 2405 return 1; 2406 } 2407 2408 static noinline_for_stack int 2409 dump_secy(struct macsec_secy *secy, struct net_device *dev, 2410 struct sk_buff *skb, struct netlink_callback *cb) 2411 { 2412 struct macsec_rx_sc *rx_sc; 2413 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2414 struct nlattr *txsa_list, *rxsc_list; 2415 int i, j; 2416 void *hdr; 2417 struct nlattr *attr; 2418 2419 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 2420 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); 2421 if (!hdr) 2422 return -EMSGSIZE; 2423 2424 genl_dump_check_consistent(cb, hdr); 2425 2426 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 2427 goto nla_put_failure; 2428 2429 if (nla_put_secy(secy, skb)) 2430 goto nla_put_failure; 2431 2432 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS); 2433 if (!attr) 2434 goto nla_put_failure; 2435 if (copy_tx_sc_stats(skb, tx_sc->stats)) { 2436 nla_nest_cancel(skb, attr); 2437 goto nla_put_failure; 2438 } 2439 nla_nest_end(skb, attr); 2440 2441 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS); 2442 if (!attr) 2443 goto nla_put_failure; 2444 if (copy_secy_stats(skb, macsec_priv(dev)->stats)) { 2445 nla_nest_cancel(skb, attr); 2446 goto nla_put_failure; 2447 } 2448 nla_nest_end(skb, attr); 2449 2450 txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST); 2451 if (!txsa_list) 2452 goto nla_put_failure; 2453 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { 2454 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); 2455 struct nlattr *txsa_nest; 2456 2457 if (!tx_sa) 2458 continue; 2459 2460 txsa_nest = nla_nest_start_noflag(skb, j++); 2461 if (!txsa_nest) { 2462 nla_nest_cancel(skb, txsa_list); 2463 goto nla_put_failure; 2464 } 2465 2466 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2467 nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) || 2468 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || 2469 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { 2470 nla_nest_cancel(skb, txsa_nest); 2471 nla_nest_cancel(skb, txsa_list); 2472 goto nla_put_failure; 2473 } 2474 2475 attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS); 2476 if (!attr) { 2477 nla_nest_cancel(skb, txsa_nest); 2478 nla_nest_cancel(skb, txsa_list); 2479 goto nla_put_failure; 2480 } 2481 if (copy_tx_sa_stats(skb, tx_sa->stats)) { 2482 nla_nest_cancel(skb, attr); 2483 nla_nest_cancel(skb, txsa_nest); 2484 nla_nest_cancel(skb, txsa_list); 2485 goto nla_put_failure; 2486 } 2487 nla_nest_end(skb, attr); 2488 2489 nla_nest_end(skb, txsa_nest); 2490 } 2491 nla_nest_end(skb, txsa_list); 2492 2493 rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST); 2494 if (!rxsc_list) 2495 goto nla_put_failure; 2496 2497 j = 1; 2498 for_each_rxsc_rtnl(secy, rx_sc) { 2499 int k; 2500 struct nlattr *rxsa_list; 2501 struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++); 2502 2503 if (!rxsc_nest) { 2504 nla_nest_cancel(skb, rxsc_list); 2505 goto nla_put_failure; 2506 } 2507 2508 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || 2509 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci, 2510 MACSEC_RXSC_ATTR_PAD)) { 2511 nla_nest_cancel(skb, rxsc_nest); 2512 nla_nest_cancel(skb, rxsc_list); 2513 goto nla_put_failure; 2514 } 2515 2516 attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS); 2517 if (!attr) { 2518 nla_nest_cancel(skb, rxsc_nest); 2519 nla_nest_cancel(skb, rxsc_list); 2520 goto nla_put_failure; 2521 } 2522 if (copy_rx_sc_stats(skb, rx_sc->stats)) { 2523 nla_nest_cancel(skb, attr); 2524 nla_nest_cancel(skb, rxsc_nest); 2525 nla_nest_cancel(skb, rxsc_list); 2526 goto nla_put_failure; 2527 } 2528 nla_nest_end(skb, attr); 2529 2530 rxsa_list = nla_nest_start_noflag(skb, 2531 MACSEC_RXSC_ATTR_SA_LIST); 2532 if (!rxsa_list) { 2533 nla_nest_cancel(skb, rxsc_nest); 2534 nla_nest_cancel(skb, rxsc_list); 2535 goto nla_put_failure; 2536 } 2537 2538 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { 2539 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); 2540 struct nlattr *rxsa_nest; 2541 2542 if (!rx_sa) 2543 continue; 2544 2545 rxsa_nest = nla_nest_start_noflag(skb, k++); 2546 if (!rxsa_nest) { 2547 nla_nest_cancel(skb, rxsa_list); 2548 nla_nest_cancel(skb, rxsc_nest); 2549 nla_nest_cancel(skb, rxsc_list); 2550 goto nla_put_failure; 2551 } 2552 2553 attr = nla_nest_start_noflag(skb, 2554 MACSEC_SA_ATTR_STATS); 2555 if (!attr) { 2556 nla_nest_cancel(skb, rxsa_list); 2557 nla_nest_cancel(skb, rxsc_nest); 2558 nla_nest_cancel(skb, rxsc_list); 2559 goto nla_put_failure; 2560 } 2561 if (copy_rx_sa_stats(skb, rx_sa->stats)) { 2562 nla_nest_cancel(skb, attr); 2563 nla_nest_cancel(skb, rxsa_list); 2564 nla_nest_cancel(skb, rxsc_nest); 2565 nla_nest_cancel(skb, rxsc_list); 2566 goto nla_put_failure; 2567 } 2568 nla_nest_end(skb, attr); 2569 2570 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2571 nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) || 2572 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || 2573 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { 2574 nla_nest_cancel(skb, rxsa_nest); 2575 nla_nest_cancel(skb, rxsc_nest); 2576 nla_nest_cancel(skb, rxsc_list); 2577 goto nla_put_failure; 2578 } 2579 nla_nest_end(skb, rxsa_nest); 2580 } 2581 2582 nla_nest_end(skb, rxsa_list); 2583 nla_nest_end(skb, rxsc_nest); 2584 } 2585 2586 nla_nest_end(skb, rxsc_list); 2587 2588 genlmsg_end(skb, hdr); 2589 2590 return 0; 2591 2592 nla_put_failure: 2593 genlmsg_cancel(skb, hdr); 2594 return -EMSGSIZE; 2595 } 2596 2597 static int macsec_generation = 1; /* protected by RTNL */ 2598 2599 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 2600 { 2601 struct net *net = sock_net(skb->sk); 2602 struct net_device *dev; 2603 int dev_idx, d; 2604 2605 dev_idx = cb->args[0]; 2606 2607 d = 0; 2608 rtnl_lock(); 2609 2610 cb->seq = macsec_generation; 2611 2612 for_each_netdev(net, dev) { 2613 struct macsec_secy *secy; 2614 2615 if (d < dev_idx) 2616 goto next; 2617 2618 if (!netif_is_macsec(dev)) 2619 goto next; 2620 2621 secy = &macsec_priv(dev)->secy; 2622 if (dump_secy(secy, dev, skb, cb) < 0) 2623 goto done; 2624 next: 2625 d++; 2626 } 2627 2628 done: 2629 rtnl_unlock(); 2630 cb->args[0] = d; 2631 return skb->len; 2632 } 2633 2634 static const struct genl_ops macsec_genl_ops[] = { 2635 { 2636 .cmd = MACSEC_CMD_GET_TXSC, 2637 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2638 .dumpit = macsec_dump_txsc, 2639 }, 2640 { 2641 .cmd = MACSEC_CMD_ADD_RXSC, 2642 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2643 .doit = macsec_add_rxsc, 2644 .flags = GENL_ADMIN_PERM, 2645 }, 2646 { 2647 .cmd = MACSEC_CMD_DEL_RXSC, 2648 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2649 .doit = macsec_del_rxsc, 2650 .flags = GENL_ADMIN_PERM, 2651 }, 2652 { 2653 .cmd = MACSEC_CMD_UPD_RXSC, 2654 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2655 .doit = macsec_upd_rxsc, 2656 .flags = GENL_ADMIN_PERM, 2657 }, 2658 { 2659 .cmd = MACSEC_CMD_ADD_TXSA, 2660 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2661 .doit = macsec_add_txsa, 2662 .flags = GENL_ADMIN_PERM, 2663 }, 2664 { 2665 .cmd = MACSEC_CMD_DEL_TXSA, 2666 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2667 .doit = macsec_del_txsa, 2668 .flags = GENL_ADMIN_PERM, 2669 }, 2670 { 2671 .cmd = MACSEC_CMD_UPD_TXSA, 2672 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2673 .doit = macsec_upd_txsa, 2674 .flags = GENL_ADMIN_PERM, 2675 }, 2676 { 2677 .cmd = MACSEC_CMD_ADD_RXSA, 2678 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2679 .doit = macsec_add_rxsa, 2680 .flags = GENL_ADMIN_PERM, 2681 }, 2682 { 2683 .cmd = MACSEC_CMD_DEL_RXSA, 2684 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2685 .doit = macsec_del_rxsa, 2686 .flags = GENL_ADMIN_PERM, 2687 }, 2688 { 2689 .cmd = MACSEC_CMD_UPD_RXSA, 2690 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2691 .doit = macsec_upd_rxsa, 2692 .flags = GENL_ADMIN_PERM, 2693 }, 2694 }; 2695 2696 static struct genl_family macsec_fam __ro_after_init = { 2697 .name = MACSEC_GENL_NAME, 2698 .hdrsize = 0, 2699 .version = MACSEC_GENL_VERSION, 2700 .maxattr = MACSEC_ATTR_MAX, 2701 .policy = macsec_genl_policy, 2702 .netnsok = true, 2703 .module = THIS_MODULE, 2704 .ops = macsec_genl_ops, 2705 .n_ops = ARRAY_SIZE(macsec_genl_ops), 2706 }; 2707 2708 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, 2709 struct net_device *dev) 2710 { 2711 struct macsec_dev *macsec = netdev_priv(dev); 2712 struct macsec_secy *secy = &macsec->secy; 2713 struct pcpu_secy_stats *secy_stats; 2714 int ret, len; 2715 2716 /* 10.5 */ 2717 if (!secy->protect_frames) { 2718 secy_stats = this_cpu_ptr(macsec->stats); 2719 u64_stats_update_begin(&secy_stats->syncp); 2720 secy_stats->stats.OutPktsUntagged++; 2721 u64_stats_update_end(&secy_stats->syncp); 2722 skb->dev = macsec->real_dev; 2723 len = skb->len; 2724 ret = dev_queue_xmit(skb); 2725 count_tx(dev, ret, len); 2726 return ret; 2727 } 2728 2729 if (!secy->operational) { 2730 kfree_skb(skb); 2731 dev->stats.tx_dropped++; 2732 return NETDEV_TX_OK; 2733 } 2734 2735 skb = macsec_encrypt(skb, dev); 2736 if (IS_ERR(skb)) { 2737 if (PTR_ERR(skb) != -EINPROGRESS) 2738 dev->stats.tx_dropped++; 2739 return NETDEV_TX_OK; 2740 } 2741 2742 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 2743 2744 macsec_encrypt_finish(skb, dev); 2745 len = skb->len; 2746 ret = dev_queue_xmit(skb); 2747 count_tx(dev, ret, len); 2748 return ret; 2749 } 2750 2751 #define MACSEC_FEATURES \ 2752 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 2753 static struct lock_class_key macsec_netdev_addr_lock_key; 2754 2755 static int macsec_dev_init(struct net_device *dev) 2756 { 2757 struct macsec_dev *macsec = macsec_priv(dev); 2758 struct net_device *real_dev = macsec->real_dev; 2759 int err; 2760 2761 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 2762 if (!dev->tstats) 2763 return -ENOMEM; 2764 2765 err = gro_cells_init(&macsec->gro_cells, dev); 2766 if (err) { 2767 free_percpu(dev->tstats); 2768 return err; 2769 } 2770 2771 dev->features = real_dev->features & MACSEC_FEATURES; 2772 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; 2773 2774 dev->needed_headroom = real_dev->needed_headroom + 2775 MACSEC_NEEDED_HEADROOM; 2776 dev->needed_tailroom = real_dev->needed_tailroom + 2777 MACSEC_NEEDED_TAILROOM; 2778 2779 if (is_zero_ether_addr(dev->dev_addr)) 2780 eth_hw_addr_inherit(dev, real_dev); 2781 if (is_zero_ether_addr(dev->broadcast)) 2782 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 2783 2784 return 0; 2785 } 2786 2787 static void macsec_dev_uninit(struct net_device *dev) 2788 { 2789 struct macsec_dev *macsec = macsec_priv(dev); 2790 2791 gro_cells_destroy(&macsec->gro_cells); 2792 free_percpu(dev->tstats); 2793 } 2794 2795 static netdev_features_t macsec_fix_features(struct net_device *dev, 2796 netdev_features_t features) 2797 { 2798 struct macsec_dev *macsec = macsec_priv(dev); 2799 struct net_device *real_dev = macsec->real_dev; 2800 2801 features &= (real_dev->features & MACSEC_FEATURES) | 2802 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; 2803 features |= NETIF_F_LLTX; 2804 2805 return features; 2806 } 2807 2808 static int macsec_dev_open(struct net_device *dev) 2809 { 2810 struct macsec_dev *macsec = macsec_priv(dev); 2811 struct net_device *real_dev = macsec->real_dev; 2812 int err; 2813 2814 err = dev_uc_add(real_dev, dev->dev_addr); 2815 if (err < 0) 2816 return err; 2817 2818 if (dev->flags & IFF_ALLMULTI) { 2819 err = dev_set_allmulti(real_dev, 1); 2820 if (err < 0) 2821 goto del_unicast; 2822 } 2823 2824 if (dev->flags & IFF_PROMISC) { 2825 err = dev_set_promiscuity(real_dev, 1); 2826 if (err < 0) 2827 goto clear_allmulti; 2828 } 2829 2830 if (netif_carrier_ok(real_dev)) 2831 netif_carrier_on(dev); 2832 2833 return 0; 2834 clear_allmulti: 2835 if (dev->flags & IFF_ALLMULTI) 2836 dev_set_allmulti(real_dev, -1); 2837 del_unicast: 2838 dev_uc_del(real_dev, dev->dev_addr); 2839 netif_carrier_off(dev); 2840 return err; 2841 } 2842 2843 static int macsec_dev_stop(struct net_device *dev) 2844 { 2845 struct macsec_dev *macsec = macsec_priv(dev); 2846 struct net_device *real_dev = macsec->real_dev; 2847 2848 netif_carrier_off(dev); 2849 2850 dev_mc_unsync(real_dev, dev); 2851 dev_uc_unsync(real_dev, dev); 2852 2853 if (dev->flags & IFF_ALLMULTI) 2854 dev_set_allmulti(real_dev, -1); 2855 2856 if (dev->flags & IFF_PROMISC) 2857 dev_set_promiscuity(real_dev, -1); 2858 2859 dev_uc_del(real_dev, dev->dev_addr); 2860 2861 return 0; 2862 } 2863 2864 static void macsec_dev_change_rx_flags(struct net_device *dev, int change) 2865 { 2866 struct net_device *real_dev = macsec_priv(dev)->real_dev; 2867 2868 if (!(dev->flags & IFF_UP)) 2869 return; 2870 2871 if (change & IFF_ALLMULTI) 2872 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 2873 2874 if (change & IFF_PROMISC) 2875 dev_set_promiscuity(real_dev, 2876 dev->flags & IFF_PROMISC ? 1 : -1); 2877 } 2878 2879 static void macsec_dev_set_rx_mode(struct net_device *dev) 2880 { 2881 struct net_device *real_dev = macsec_priv(dev)->real_dev; 2882 2883 dev_mc_sync(real_dev, dev); 2884 dev_uc_sync(real_dev, dev); 2885 } 2886 2887 static int macsec_set_mac_address(struct net_device *dev, void *p) 2888 { 2889 struct macsec_dev *macsec = macsec_priv(dev); 2890 struct net_device *real_dev = macsec->real_dev; 2891 struct sockaddr *addr = p; 2892 int err; 2893 2894 if (!is_valid_ether_addr(addr->sa_data)) 2895 return -EADDRNOTAVAIL; 2896 2897 if (!(dev->flags & IFF_UP)) 2898 goto out; 2899 2900 err = dev_uc_add(real_dev, addr->sa_data); 2901 if (err < 0) 2902 return err; 2903 2904 dev_uc_del(real_dev, dev->dev_addr); 2905 2906 out: 2907 ether_addr_copy(dev->dev_addr, addr->sa_data); 2908 return 0; 2909 } 2910 2911 static int macsec_change_mtu(struct net_device *dev, int new_mtu) 2912 { 2913 struct macsec_dev *macsec = macsec_priv(dev); 2914 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); 2915 2916 if (macsec->real_dev->mtu - extra < new_mtu) 2917 return -ERANGE; 2918 2919 dev->mtu = new_mtu; 2920 2921 return 0; 2922 } 2923 2924 static void macsec_get_stats64(struct net_device *dev, 2925 struct rtnl_link_stats64 *s) 2926 { 2927 int cpu; 2928 2929 if (!dev->tstats) 2930 return; 2931 2932 for_each_possible_cpu(cpu) { 2933 struct pcpu_sw_netstats *stats; 2934 struct pcpu_sw_netstats tmp; 2935 int start; 2936 2937 stats = per_cpu_ptr(dev->tstats, cpu); 2938 do { 2939 start = u64_stats_fetch_begin_irq(&stats->syncp); 2940 tmp.rx_packets = stats->rx_packets; 2941 tmp.rx_bytes = stats->rx_bytes; 2942 tmp.tx_packets = stats->tx_packets; 2943 tmp.tx_bytes = stats->tx_bytes; 2944 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2945 2946 s->rx_packets += tmp.rx_packets; 2947 s->rx_bytes += tmp.rx_bytes; 2948 s->tx_packets += tmp.tx_packets; 2949 s->tx_bytes += tmp.tx_bytes; 2950 } 2951 2952 s->rx_dropped = dev->stats.rx_dropped; 2953 s->tx_dropped = dev->stats.tx_dropped; 2954 } 2955 2956 static int macsec_get_iflink(const struct net_device *dev) 2957 { 2958 return macsec_priv(dev)->real_dev->ifindex; 2959 } 2960 2961 static int macsec_get_nest_level(struct net_device *dev) 2962 { 2963 return macsec_priv(dev)->nest_level; 2964 } 2965 2966 static const struct net_device_ops macsec_netdev_ops = { 2967 .ndo_init = macsec_dev_init, 2968 .ndo_uninit = macsec_dev_uninit, 2969 .ndo_open = macsec_dev_open, 2970 .ndo_stop = macsec_dev_stop, 2971 .ndo_fix_features = macsec_fix_features, 2972 .ndo_change_mtu = macsec_change_mtu, 2973 .ndo_set_rx_mode = macsec_dev_set_rx_mode, 2974 .ndo_change_rx_flags = macsec_dev_change_rx_flags, 2975 .ndo_set_mac_address = macsec_set_mac_address, 2976 .ndo_start_xmit = macsec_start_xmit, 2977 .ndo_get_stats64 = macsec_get_stats64, 2978 .ndo_get_iflink = macsec_get_iflink, 2979 .ndo_get_lock_subclass = macsec_get_nest_level, 2980 }; 2981 2982 static const struct device_type macsec_type = { 2983 .name = "macsec", 2984 }; 2985 2986 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { 2987 [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, 2988 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, 2989 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, 2990 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, 2991 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, 2992 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, 2993 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, 2994 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, 2995 [IFLA_MACSEC_ES] = { .type = NLA_U8 }, 2996 [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, 2997 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, 2998 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, 2999 }; 3000 3001 static void macsec_free_netdev(struct net_device *dev) 3002 { 3003 struct macsec_dev *macsec = macsec_priv(dev); 3004 struct net_device *real_dev = macsec->real_dev; 3005 3006 free_percpu(macsec->stats); 3007 free_percpu(macsec->secy.tx_sc.stats); 3008 3009 dev_put(real_dev); 3010 } 3011 3012 static void macsec_setup(struct net_device *dev) 3013 { 3014 ether_setup(dev); 3015 dev->min_mtu = 0; 3016 dev->max_mtu = ETH_MAX_MTU; 3017 dev->priv_flags |= IFF_NO_QUEUE; 3018 dev->netdev_ops = &macsec_netdev_ops; 3019 dev->needs_free_netdev = true; 3020 dev->priv_destructor = macsec_free_netdev; 3021 SET_NETDEV_DEVTYPE(dev, &macsec_type); 3022 3023 eth_zero_addr(dev->broadcast); 3024 } 3025 3026 static int macsec_changelink_common(struct net_device *dev, 3027 struct nlattr *data[]) 3028 { 3029 struct macsec_secy *secy; 3030 struct macsec_tx_sc *tx_sc; 3031 3032 secy = &macsec_priv(dev)->secy; 3033 tx_sc = &secy->tx_sc; 3034 3035 if (data[IFLA_MACSEC_ENCODING_SA]) { 3036 struct macsec_tx_sa *tx_sa; 3037 3038 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]); 3039 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]); 3040 3041 secy->operational = tx_sa && tx_sa->active; 3042 } 3043 3044 if (data[IFLA_MACSEC_WINDOW]) 3045 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); 3046 3047 if (data[IFLA_MACSEC_ENCRYPT]) 3048 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); 3049 3050 if (data[IFLA_MACSEC_PROTECT]) 3051 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]); 3052 3053 if (data[IFLA_MACSEC_INC_SCI]) 3054 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 3055 3056 if (data[IFLA_MACSEC_ES]) 3057 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]); 3058 3059 if (data[IFLA_MACSEC_SCB]) 3060 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]); 3061 3062 if (data[IFLA_MACSEC_REPLAY_PROTECT]) 3063 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]); 3064 3065 if (data[IFLA_MACSEC_VALIDATION]) 3066 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); 3067 3068 if (data[IFLA_MACSEC_CIPHER_SUITE]) { 3069 switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) { 3070 case MACSEC_CIPHER_ID_GCM_AES_128: 3071 case MACSEC_DEFAULT_CIPHER_ID: 3072 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3073 break; 3074 case MACSEC_CIPHER_ID_GCM_AES_256: 3075 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3076 break; 3077 default: 3078 return -EINVAL; 3079 } 3080 } 3081 3082 return 0; 3083 } 3084 3085 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], 3086 struct nlattr *data[], 3087 struct netlink_ext_ack *extack) 3088 { 3089 if (!data) 3090 return 0; 3091 3092 if (data[IFLA_MACSEC_CIPHER_SUITE] || 3093 data[IFLA_MACSEC_ICV_LEN] || 3094 data[IFLA_MACSEC_SCI] || 3095 data[IFLA_MACSEC_PORT]) 3096 return -EINVAL; 3097 3098 return macsec_changelink_common(dev, data); 3099 } 3100 3101 static void macsec_del_dev(struct macsec_dev *macsec) 3102 { 3103 int i; 3104 3105 while (macsec->secy.rx_sc) { 3106 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); 3107 3108 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); 3109 free_rx_sc(rx_sc); 3110 } 3111 3112 for (i = 0; i < MACSEC_NUM_AN; i++) { 3113 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); 3114 3115 if (sa) { 3116 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); 3117 clear_tx_sa(sa); 3118 } 3119 } 3120 } 3121 3122 static void macsec_common_dellink(struct net_device *dev, struct list_head *head) 3123 { 3124 struct macsec_dev *macsec = macsec_priv(dev); 3125 struct net_device *real_dev = macsec->real_dev; 3126 3127 unregister_netdevice_queue(dev, head); 3128 list_del_rcu(&macsec->secys); 3129 macsec_del_dev(macsec); 3130 netdev_upper_dev_unlink(real_dev, dev); 3131 3132 macsec_generation++; 3133 } 3134 3135 static void macsec_dellink(struct net_device *dev, struct list_head *head) 3136 { 3137 struct macsec_dev *macsec = macsec_priv(dev); 3138 struct net_device *real_dev = macsec->real_dev; 3139 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3140 3141 macsec_common_dellink(dev, head); 3142 3143 if (list_empty(&rxd->secys)) { 3144 netdev_rx_handler_unregister(real_dev); 3145 kfree(rxd); 3146 } 3147 } 3148 3149 static int register_macsec_dev(struct net_device *real_dev, 3150 struct net_device *dev) 3151 { 3152 struct macsec_dev *macsec = macsec_priv(dev); 3153 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3154 3155 if (!rxd) { 3156 int err; 3157 3158 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL); 3159 if (!rxd) 3160 return -ENOMEM; 3161 3162 INIT_LIST_HEAD(&rxd->secys); 3163 3164 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 3165 rxd); 3166 if (err < 0) { 3167 kfree(rxd); 3168 return err; 3169 } 3170 } 3171 3172 list_add_tail_rcu(&macsec->secys, &rxd->secys); 3173 return 0; 3174 } 3175 3176 static bool sci_exists(struct net_device *dev, sci_t sci) 3177 { 3178 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev); 3179 struct macsec_dev *macsec; 3180 3181 list_for_each_entry(macsec, &rxd->secys, secys) { 3182 if (macsec->secy.sci == sci) 3183 return true; 3184 } 3185 3186 return false; 3187 } 3188 3189 static sci_t dev_to_sci(struct net_device *dev, __be16 port) 3190 { 3191 return make_sci(dev->dev_addr, port); 3192 } 3193 3194 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) 3195 { 3196 struct macsec_dev *macsec = macsec_priv(dev); 3197 struct macsec_secy *secy = &macsec->secy; 3198 3199 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); 3200 if (!macsec->stats) 3201 return -ENOMEM; 3202 3203 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); 3204 if (!secy->tx_sc.stats) { 3205 free_percpu(macsec->stats); 3206 return -ENOMEM; 3207 } 3208 3209 if (sci == MACSEC_UNDEF_SCI) 3210 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3211 3212 secy->netdev = dev; 3213 secy->operational = true; 3214 secy->key_len = DEFAULT_SAK_LEN; 3215 secy->icv_len = icv_len; 3216 secy->validate_frames = MACSEC_VALIDATE_DEFAULT; 3217 secy->protect_frames = true; 3218 secy->replay_protect = false; 3219 3220 secy->sci = sci; 3221 secy->tx_sc.active = true; 3222 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; 3223 secy->tx_sc.encrypt = DEFAULT_ENCRYPT; 3224 secy->tx_sc.send_sci = DEFAULT_SEND_SCI; 3225 secy->tx_sc.end_station = false; 3226 secy->tx_sc.scb = false; 3227 3228 return 0; 3229 } 3230 3231 static int macsec_newlink(struct net *net, struct net_device *dev, 3232 struct nlattr *tb[], struct nlattr *data[], 3233 struct netlink_ext_ack *extack) 3234 { 3235 struct macsec_dev *macsec = macsec_priv(dev); 3236 struct net_device *real_dev; 3237 int err; 3238 sci_t sci; 3239 u8 icv_len = DEFAULT_ICV_LEN; 3240 rx_handler_func_t *rx_handler; 3241 3242 if (!tb[IFLA_LINK]) 3243 return -EINVAL; 3244 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); 3245 if (!real_dev) 3246 return -ENODEV; 3247 3248 dev->priv_flags |= IFF_MACSEC; 3249 3250 macsec->real_dev = real_dev; 3251 3252 if (data && data[IFLA_MACSEC_ICV_LEN]) 3253 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3254 dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true); 3255 3256 rx_handler = rtnl_dereference(real_dev->rx_handler); 3257 if (rx_handler && rx_handler != macsec_handle_frame) 3258 return -EBUSY; 3259 3260 err = register_netdevice(dev); 3261 if (err < 0) 3262 return err; 3263 3264 dev_hold(real_dev); 3265 3266 macsec->nest_level = dev_get_nest_level(real_dev) + 1; 3267 netdev_lockdep_set_classes(dev); 3268 lockdep_set_class_and_subclass(&dev->addr_list_lock, 3269 &macsec_netdev_addr_lock_key, 3270 macsec_get_nest_level(dev)); 3271 3272 err = netdev_upper_dev_link(real_dev, dev, extack); 3273 if (err < 0) 3274 goto unregister; 3275 3276 /* need to be already registered so that ->init has run and 3277 * the MAC addr is set 3278 */ 3279 if (data && data[IFLA_MACSEC_SCI]) 3280 sci = nla_get_sci(data[IFLA_MACSEC_SCI]); 3281 else if (data && data[IFLA_MACSEC_PORT]) 3282 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT])); 3283 else 3284 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3285 3286 if (rx_handler && sci_exists(real_dev, sci)) { 3287 err = -EBUSY; 3288 goto unlink; 3289 } 3290 3291 err = macsec_add_dev(dev, sci, icv_len); 3292 if (err) 3293 goto unlink; 3294 3295 if (data) { 3296 err = macsec_changelink_common(dev, data); 3297 if (err) 3298 goto del_dev; 3299 } 3300 3301 err = register_macsec_dev(real_dev, dev); 3302 if (err < 0) 3303 goto del_dev; 3304 3305 netif_stacked_transfer_operstate(real_dev, dev); 3306 linkwatch_fire_event(dev); 3307 3308 macsec_generation++; 3309 3310 return 0; 3311 3312 del_dev: 3313 macsec_del_dev(macsec); 3314 unlink: 3315 netdev_upper_dev_unlink(real_dev, dev); 3316 unregister: 3317 unregister_netdevice(dev); 3318 return err; 3319 } 3320 3321 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[], 3322 struct netlink_ext_ack *extack) 3323 { 3324 u64 csid = MACSEC_DEFAULT_CIPHER_ID; 3325 u8 icv_len = DEFAULT_ICV_LEN; 3326 int flag; 3327 bool es, scb, sci; 3328 3329 if (!data) 3330 return 0; 3331 3332 if (data[IFLA_MACSEC_CIPHER_SUITE]) 3333 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); 3334 3335 if (data[IFLA_MACSEC_ICV_LEN]) { 3336 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3337 if (icv_len != DEFAULT_ICV_LEN) { 3338 char dummy_key[DEFAULT_SAK_LEN] = { 0 }; 3339 struct crypto_aead *dummy_tfm; 3340 3341 dummy_tfm = macsec_alloc_tfm(dummy_key, 3342 DEFAULT_SAK_LEN, 3343 icv_len); 3344 if (IS_ERR(dummy_tfm)) 3345 return PTR_ERR(dummy_tfm); 3346 crypto_free_aead(dummy_tfm); 3347 } 3348 } 3349 3350 switch (csid) { 3351 case MACSEC_CIPHER_ID_GCM_AES_128: 3352 case MACSEC_CIPHER_ID_GCM_AES_256: 3353 case MACSEC_DEFAULT_CIPHER_ID: 3354 if (icv_len < MACSEC_MIN_ICV_LEN || 3355 icv_len > MACSEC_STD_ICV_LEN) 3356 return -EINVAL; 3357 break; 3358 default: 3359 return -EINVAL; 3360 } 3361 3362 if (data[IFLA_MACSEC_ENCODING_SA]) { 3363 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) 3364 return -EINVAL; 3365 } 3366 3367 for (flag = IFLA_MACSEC_ENCODING_SA + 1; 3368 flag < IFLA_MACSEC_VALIDATION; 3369 flag++) { 3370 if (data[flag]) { 3371 if (nla_get_u8(data[flag]) > 1) 3372 return -EINVAL; 3373 } 3374 } 3375 3376 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; 3377 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; 3378 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; 3379 3380 if ((sci && (scb || es)) || (scb && es)) 3381 return -EINVAL; 3382 3383 if (data[IFLA_MACSEC_VALIDATION] && 3384 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 3385 return -EINVAL; 3386 3387 if ((data[IFLA_MACSEC_REPLAY_PROTECT] && 3388 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && 3389 !data[IFLA_MACSEC_WINDOW]) 3390 return -EINVAL; 3391 3392 return 0; 3393 } 3394 3395 static struct net *macsec_get_link_net(const struct net_device *dev) 3396 { 3397 return dev_net(macsec_priv(dev)->real_dev); 3398 } 3399 3400 static size_t macsec_get_size(const struct net_device *dev) 3401 { 3402 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */ 3403 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */ 3404 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */ 3405 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */ 3406 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */ 3407 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */ 3408 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */ 3409 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */ 3410 nla_total_size(1) + /* IFLA_MACSEC_ES */ 3411 nla_total_size(1) + /* IFLA_MACSEC_SCB */ 3412 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */ 3413 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */ 3414 0; 3415 } 3416 3417 static int macsec_fill_info(struct sk_buff *skb, 3418 const struct net_device *dev) 3419 { 3420 struct macsec_secy *secy = &macsec_priv(dev)->secy; 3421 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 3422 u64 csid; 3423 3424 switch (secy->key_len) { 3425 case MACSEC_GCM_AES_128_SAK_LEN: 3426 csid = MACSEC_DEFAULT_CIPHER_ID; 3427 break; 3428 case MACSEC_GCM_AES_256_SAK_LEN: 3429 csid = MACSEC_CIPHER_ID_GCM_AES_256; 3430 break; 3431 default: 3432 goto nla_put_failure; 3433 } 3434 3435 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, 3436 IFLA_MACSEC_PAD) || 3437 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 3438 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, 3439 csid, IFLA_MACSEC_PAD) || 3440 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 3441 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 3442 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 3443 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) || 3444 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) || 3445 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || 3446 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || 3447 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || 3448 0) 3449 goto nla_put_failure; 3450 3451 if (secy->replay_protect) { 3452 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window)) 3453 goto nla_put_failure; 3454 } 3455 3456 return 0; 3457 3458 nla_put_failure: 3459 return -EMSGSIZE; 3460 } 3461 3462 static struct rtnl_link_ops macsec_link_ops __read_mostly = { 3463 .kind = "macsec", 3464 .priv_size = sizeof(struct macsec_dev), 3465 .maxtype = IFLA_MACSEC_MAX, 3466 .policy = macsec_rtnl_policy, 3467 .setup = macsec_setup, 3468 .validate = macsec_validate_attr, 3469 .newlink = macsec_newlink, 3470 .changelink = macsec_changelink, 3471 .dellink = macsec_dellink, 3472 .get_size = macsec_get_size, 3473 .fill_info = macsec_fill_info, 3474 .get_link_net = macsec_get_link_net, 3475 }; 3476 3477 static bool is_macsec_master(struct net_device *dev) 3478 { 3479 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame; 3480 } 3481 3482 static int macsec_notify(struct notifier_block *this, unsigned long event, 3483 void *ptr) 3484 { 3485 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); 3486 LIST_HEAD(head); 3487 3488 if (!is_macsec_master(real_dev)) 3489 return NOTIFY_DONE; 3490 3491 switch (event) { 3492 case NETDEV_DOWN: 3493 case NETDEV_UP: 3494 case NETDEV_CHANGE: { 3495 struct macsec_dev *m, *n; 3496 struct macsec_rxh_data *rxd; 3497 3498 rxd = macsec_data_rtnl(real_dev); 3499 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 3500 struct net_device *dev = m->secy.netdev; 3501 3502 netif_stacked_transfer_operstate(real_dev, dev); 3503 } 3504 break; 3505 } 3506 case NETDEV_UNREGISTER: { 3507 struct macsec_dev *m, *n; 3508 struct macsec_rxh_data *rxd; 3509 3510 rxd = macsec_data_rtnl(real_dev); 3511 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 3512 macsec_common_dellink(m->secy.netdev, &head); 3513 } 3514 3515 netdev_rx_handler_unregister(real_dev); 3516 kfree(rxd); 3517 3518 unregister_netdevice_many(&head); 3519 break; 3520 } 3521 case NETDEV_CHANGEMTU: { 3522 struct macsec_dev *m; 3523 struct macsec_rxh_data *rxd; 3524 3525 rxd = macsec_data_rtnl(real_dev); 3526 list_for_each_entry(m, &rxd->secys, secys) { 3527 struct net_device *dev = m->secy.netdev; 3528 unsigned int mtu = real_dev->mtu - (m->secy.icv_len + 3529 macsec_extra_len(true)); 3530 3531 if (dev->mtu > mtu) 3532 dev_set_mtu(dev, mtu); 3533 } 3534 } 3535 } 3536 3537 return NOTIFY_OK; 3538 } 3539 3540 static struct notifier_block macsec_notifier = { 3541 .notifier_call = macsec_notify, 3542 }; 3543 3544 static int __init macsec_init(void) 3545 { 3546 int err; 3547 3548 pr_info("MACsec IEEE 802.1AE\n"); 3549 err = register_netdevice_notifier(&macsec_notifier); 3550 if (err) 3551 return err; 3552 3553 err = rtnl_link_register(&macsec_link_ops); 3554 if (err) 3555 goto notifier; 3556 3557 err = genl_register_family(&macsec_fam); 3558 if (err) 3559 goto rtnl; 3560 3561 return 0; 3562 3563 rtnl: 3564 rtnl_link_unregister(&macsec_link_ops); 3565 notifier: 3566 unregister_netdevice_notifier(&macsec_notifier); 3567 return err; 3568 } 3569 3570 static void __exit macsec_exit(void) 3571 { 3572 genl_unregister_family(&macsec_fam); 3573 rtnl_link_unregister(&macsec_link_ops); 3574 unregister_netdevice_notifier(&macsec_notifier); 3575 rcu_barrier(); 3576 } 3577 3578 module_init(macsec_init); 3579 module_exit(macsec_exit); 3580 3581 MODULE_ALIAS_RTNL_LINK("macsec"); 3582 MODULE_ALIAS_GENL_FAMILY("macsec"); 3583 3584 MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); 3585 MODULE_LICENSE("GPL v2"); 3586