1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * drivers/net/macsec.c - MACsec device 4 * 5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> 6 */ 7 8 #include <linux/types.h> 9 #include <linux/skbuff.h> 10 #include <linux/socket.h> 11 #include <linux/module.h> 12 #include <crypto/aead.h> 13 #include <linux/etherdevice.h> 14 #include <linux/netdevice.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/refcount.h> 17 #include <net/genetlink.h> 18 #include <net/sock.h> 19 #include <net/gro_cells.h> 20 #include <net/macsec.h> 21 #include <net/dst_metadata.h> 22 #include <linux/phy.h> 23 #include <linux/byteorder/generic.h> 24 #include <linux/if_arp.h> 25 26 #include <uapi/linux/if_macsec.h> 27 28 /* SecTAG length = macsec_eth_header without the optional SCI */ 29 #define MACSEC_TAG_LEN 6 30 31 struct macsec_eth_header { 32 struct ethhdr eth; 33 /* SecTAG */ 34 u8 tci_an; 35 #if defined(__LITTLE_ENDIAN_BITFIELD) 36 u8 short_length:6, 37 unused:2; 38 #elif defined(__BIG_ENDIAN_BITFIELD) 39 u8 unused:2, 40 short_length:6; 41 #else 42 #error "Please fix <asm/byteorder.h>" 43 #endif 44 __be32 packet_number; 45 u8 secure_channel_id[8]; /* optional */ 46 } __packed; 47 48 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ 49 #define MIN_NON_SHORT_LEN 48 50 51 #define GCM_AES_IV_LEN 12 52 53 #define for_each_rxsc(secy, sc) \ 54 for (sc = rcu_dereference_bh(secy->rx_sc); \ 55 sc; \ 56 sc = rcu_dereference_bh(sc->next)) 57 #define for_each_rxsc_rtnl(secy, sc) \ 58 for (sc = rtnl_dereference(secy->rx_sc); \ 59 sc; \ 60 sc = rtnl_dereference(sc->next)) 61 62 #define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31))) 63 64 struct gcm_iv_xpn { 65 union { 66 u8 short_secure_channel_id[4]; 67 ssci_t ssci; 68 }; 69 __be64 pn; 70 } __packed; 71 72 struct gcm_iv { 73 union { 74 u8 secure_channel_id[8]; 75 sci_t sci; 76 }; 77 __be32 pn; 78 }; 79 80 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT 81 82 struct pcpu_secy_stats { 83 struct macsec_dev_stats stats; 84 struct u64_stats_sync syncp; 85 }; 86 87 /** 88 * struct macsec_dev - private data 89 * @secy: SecY config 90 * @real_dev: pointer to underlying netdevice 91 * @dev_tracker: refcount tracker for @real_dev reference 92 * @stats: MACsec device stats 93 * @secys: linked list of SecY's on the underlying device 94 * @gro_cells: pointer to the Generic Receive Offload cell 95 * @offload: status of offloading on the MACsec device 96 */ 97 struct macsec_dev { 98 struct macsec_secy secy; 99 struct net_device *real_dev; 100 netdevice_tracker dev_tracker; 101 struct pcpu_secy_stats __percpu *stats; 102 struct list_head secys; 103 struct gro_cells gro_cells; 104 enum macsec_offload offload; 105 }; 106 107 /** 108 * struct macsec_rxh_data - rx_handler private argument 109 * @secys: linked list of SecY's on this underlying device 110 */ 111 struct macsec_rxh_data { 112 struct list_head secys; 113 }; 114 115 static struct macsec_dev *macsec_priv(const struct net_device *dev) 116 { 117 return (struct macsec_dev *)netdev_priv(dev); 118 } 119 120 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev) 121 { 122 return rcu_dereference_bh(dev->rx_handler_data); 123 } 124 125 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev) 126 { 127 return rtnl_dereference(dev->rx_handler_data); 128 } 129 130 struct macsec_cb { 131 struct aead_request *req; 132 union { 133 struct macsec_tx_sa *tx_sa; 134 struct macsec_rx_sa *rx_sa; 135 }; 136 u8 assoc_num; 137 bool valid; 138 bool has_sci; 139 }; 140 141 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) 142 { 143 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr); 144 145 if (!sa || !sa->active) 146 return NULL; 147 148 if (!refcount_inc_not_zero(&sa->refcnt)) 149 return NULL; 150 151 return sa; 152 } 153 154 static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc) 155 { 156 struct macsec_rx_sa *sa = NULL; 157 int an; 158 159 for (an = 0; an < MACSEC_NUM_AN; an++) { 160 sa = macsec_rxsa_get(rx_sc->sa[an]); 161 if (sa) 162 break; 163 } 164 return sa; 165 } 166 167 static void free_rx_sc_rcu(struct rcu_head *head) 168 { 169 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); 170 171 free_percpu(rx_sc->stats); 172 kfree(rx_sc); 173 } 174 175 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) 176 { 177 return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL; 178 } 179 180 static void macsec_rxsc_put(struct macsec_rx_sc *sc) 181 { 182 if (refcount_dec_and_test(&sc->refcnt)) 183 call_rcu(&sc->rcu_head, free_rx_sc_rcu); 184 } 185 186 static void free_rxsa(struct rcu_head *head) 187 { 188 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); 189 190 crypto_free_aead(sa->key.tfm); 191 free_percpu(sa->stats); 192 kfree(sa); 193 } 194 195 static void macsec_rxsa_put(struct macsec_rx_sa *sa) 196 { 197 if (refcount_dec_and_test(&sa->refcnt)) 198 call_rcu(&sa->rcu, free_rxsa); 199 } 200 201 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) 202 { 203 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr); 204 205 if (!sa || !sa->active) 206 return NULL; 207 208 if (!refcount_inc_not_zero(&sa->refcnt)) 209 return NULL; 210 211 return sa; 212 } 213 214 static void free_txsa(struct rcu_head *head) 215 { 216 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); 217 218 crypto_free_aead(sa->key.tfm); 219 free_percpu(sa->stats); 220 kfree(sa); 221 } 222 223 static void macsec_txsa_put(struct macsec_tx_sa *sa) 224 { 225 if (refcount_dec_and_test(&sa->refcnt)) 226 call_rcu(&sa->rcu, free_txsa); 227 } 228 229 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) 230 { 231 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb)); 232 return (struct macsec_cb *)skb->cb; 233 } 234 235 #define MACSEC_PORT_SCB (0x0000) 236 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) 237 #define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff) 238 239 #define MACSEC_GCM_AES_128_SAK_LEN 16 240 #define MACSEC_GCM_AES_256_SAK_LEN 32 241 242 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN 243 #define DEFAULT_XPN false 244 #define DEFAULT_SEND_SCI true 245 #define DEFAULT_ENCRYPT false 246 #define DEFAULT_ENCODING_SA 0 247 #define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1)) 248 249 static sci_t make_sci(const u8 *addr, __be16 port) 250 { 251 sci_t sci; 252 253 memcpy(&sci, addr, ETH_ALEN); 254 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port)); 255 256 return sci; 257 } 258 259 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) 260 { 261 sci_t sci; 262 263 if (sci_present) 264 memcpy(&sci, hdr->secure_channel_id, 265 sizeof(hdr->secure_channel_id)); 266 else 267 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); 268 269 return sci; 270 } 271 272 static unsigned int macsec_sectag_len(bool sci_present) 273 { 274 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); 275 } 276 277 static unsigned int macsec_hdr_len(bool sci_present) 278 { 279 return macsec_sectag_len(sci_present) + ETH_HLEN; 280 } 281 282 static unsigned int macsec_extra_len(bool sci_present) 283 { 284 return macsec_sectag_len(sci_present) + sizeof(__be16); 285 } 286 287 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ 288 static void macsec_fill_sectag(struct macsec_eth_header *h, 289 const struct macsec_secy *secy, u32 pn, 290 bool sci_present) 291 { 292 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 293 294 memset(&h->tci_an, 0, macsec_sectag_len(sci_present)); 295 h->eth.h_proto = htons(ETH_P_MACSEC); 296 297 if (sci_present) { 298 h->tci_an |= MACSEC_TCI_SC; 299 memcpy(&h->secure_channel_id, &secy->sci, 300 sizeof(h->secure_channel_id)); 301 } else { 302 if (tx_sc->end_station) 303 h->tci_an |= MACSEC_TCI_ES; 304 if (tx_sc->scb) 305 h->tci_an |= MACSEC_TCI_SCB; 306 } 307 308 h->packet_number = htonl(pn); 309 310 /* with GCM, C/E clear for !encrypt, both set for encrypt */ 311 if (tx_sc->encrypt) 312 h->tci_an |= MACSEC_TCI_CONFID; 313 else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) 314 h->tci_an |= MACSEC_TCI_C; 315 316 h->tci_an |= tx_sc->encoding_sa; 317 } 318 319 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) 320 { 321 if (data_len < MIN_NON_SHORT_LEN) 322 h->short_length = data_len; 323 } 324 325 /* Checks if a MACsec interface is being offloaded to an hardware engine */ 326 static bool macsec_is_offloaded(struct macsec_dev *macsec) 327 { 328 if (macsec->offload == MACSEC_OFFLOAD_MAC || 329 macsec->offload == MACSEC_OFFLOAD_PHY) 330 return true; 331 332 return false; 333 } 334 335 /* Checks if underlying layers implement MACsec offloading functions. */ 336 static bool macsec_check_offload(enum macsec_offload offload, 337 struct macsec_dev *macsec) 338 { 339 if (!macsec || !macsec->real_dev) 340 return false; 341 342 if (offload == MACSEC_OFFLOAD_PHY) 343 return macsec->real_dev->phydev && 344 macsec->real_dev->phydev->macsec_ops; 345 else if (offload == MACSEC_OFFLOAD_MAC) 346 return macsec->real_dev->features & NETIF_F_HW_MACSEC && 347 macsec->real_dev->macsec_ops; 348 349 return false; 350 } 351 352 static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload, 353 struct macsec_dev *macsec, 354 struct macsec_context *ctx) 355 { 356 if (ctx) { 357 memset(ctx, 0, sizeof(*ctx)); 358 ctx->offload = offload; 359 360 if (offload == MACSEC_OFFLOAD_PHY) 361 ctx->phydev = macsec->real_dev->phydev; 362 else if (offload == MACSEC_OFFLOAD_MAC) 363 ctx->netdev = macsec->real_dev; 364 } 365 366 if (offload == MACSEC_OFFLOAD_PHY) 367 return macsec->real_dev->phydev->macsec_ops; 368 else 369 return macsec->real_dev->macsec_ops; 370 } 371 372 /* Returns a pointer to the MACsec ops struct if any and updates the MACsec 373 * context device reference if provided. 374 */ 375 static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec, 376 struct macsec_context *ctx) 377 { 378 if (!macsec_check_offload(macsec->offload, macsec)) 379 return NULL; 380 381 return __macsec_get_ops(macsec->offload, macsec, ctx); 382 } 383 384 /* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */ 385 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn) 386 { 387 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; 388 int len = skb->len - 2 * ETH_ALEN; 389 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; 390 391 /* a) It comprises at least 17 octets */ 392 if (skb->len <= 16) 393 return false; 394 395 /* b) MACsec EtherType: already checked */ 396 397 /* c) V bit is clear */ 398 if (h->tci_an & MACSEC_TCI_VERSION) 399 return false; 400 401 /* d) ES or SCB => !SC */ 402 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && 403 (h->tci_an & MACSEC_TCI_SC)) 404 return false; 405 406 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */ 407 if (h->unused) 408 return false; 409 410 /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */ 411 if (!h->packet_number && !xpn) 412 return false; 413 414 /* length check, f) g) h) i) */ 415 if (h->short_length) 416 return len == extra_len + h->short_length; 417 return len >= extra_len + MIN_NON_SHORT_LEN; 418 } 419 420 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) 421 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN 422 423 static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn, 424 salt_t salt) 425 { 426 struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv; 427 428 gcm_iv->ssci = ssci ^ salt.ssci; 429 gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn; 430 } 431 432 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) 433 { 434 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; 435 436 gcm_iv->sci = sci; 437 gcm_iv->pn = htonl(pn); 438 } 439 440 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) 441 { 442 return (struct macsec_eth_header *)skb_mac_header(skb); 443 } 444 445 static void __macsec_pn_wrapped(struct macsec_secy *secy, 446 struct macsec_tx_sa *tx_sa) 447 { 448 pr_debug("PN wrapped, transitioning to !oper\n"); 449 tx_sa->active = false; 450 if (secy->protect_frames) 451 secy->operational = false; 452 } 453 454 void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa) 455 { 456 spin_lock_bh(&tx_sa->lock); 457 __macsec_pn_wrapped(secy, tx_sa); 458 spin_unlock_bh(&tx_sa->lock); 459 } 460 EXPORT_SYMBOL_GPL(macsec_pn_wrapped); 461 462 static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa, 463 struct macsec_secy *secy) 464 { 465 pn_t pn; 466 467 spin_lock_bh(&tx_sa->lock); 468 469 pn = tx_sa->next_pn_halves; 470 if (secy->xpn) 471 tx_sa->next_pn++; 472 else 473 tx_sa->next_pn_halves.lower++; 474 475 if (tx_sa->next_pn == 0) 476 __macsec_pn_wrapped(secy, tx_sa); 477 spin_unlock_bh(&tx_sa->lock); 478 479 return pn; 480 } 481 482 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) 483 { 484 struct macsec_dev *macsec = netdev_priv(dev); 485 486 skb->dev = macsec->real_dev; 487 skb_reset_mac_header(skb); 488 skb->protocol = eth_hdr(skb)->h_proto; 489 } 490 491 static unsigned int macsec_msdu_len(struct sk_buff *skb) 492 { 493 struct macsec_dev *macsec = macsec_priv(skb->dev); 494 struct macsec_secy *secy = &macsec->secy; 495 bool sci_present = macsec_skb_cb(skb)->has_sci; 496 497 return skb->len - macsec_hdr_len(sci_present) - secy->icv_len; 498 } 499 500 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, 501 struct macsec_tx_sa *tx_sa) 502 { 503 unsigned int msdu_len = macsec_msdu_len(skb); 504 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); 505 506 u64_stats_update_begin(&txsc_stats->syncp); 507 if (tx_sc->encrypt) { 508 txsc_stats->stats.OutOctetsEncrypted += msdu_len; 509 txsc_stats->stats.OutPktsEncrypted++; 510 this_cpu_inc(tx_sa->stats->OutPktsEncrypted); 511 } else { 512 txsc_stats->stats.OutOctetsProtected += msdu_len; 513 txsc_stats->stats.OutPktsProtected++; 514 this_cpu_inc(tx_sa->stats->OutPktsProtected); 515 } 516 u64_stats_update_end(&txsc_stats->syncp); 517 } 518 519 static void count_tx(struct net_device *dev, int ret, int len) 520 { 521 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 522 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 523 524 u64_stats_update_begin(&stats->syncp); 525 u64_stats_inc(&stats->tx_packets); 526 u64_stats_add(&stats->tx_bytes, len); 527 u64_stats_update_end(&stats->syncp); 528 } 529 } 530 531 static void macsec_encrypt_done(void *data, int err) 532 { 533 struct sk_buff *skb = data; 534 struct net_device *dev = skb->dev; 535 struct macsec_dev *macsec = macsec_priv(dev); 536 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; 537 int len, ret; 538 539 aead_request_free(macsec_skb_cb(skb)->req); 540 541 rcu_read_lock_bh(); 542 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 543 /* packet is encrypted/protected so tx_bytes must be calculated */ 544 len = macsec_msdu_len(skb) + 2 * ETH_ALEN; 545 macsec_encrypt_finish(skb, dev); 546 ret = dev_queue_xmit(skb); 547 count_tx(dev, ret, len); 548 rcu_read_unlock_bh(); 549 550 macsec_txsa_put(sa); 551 dev_put(dev); 552 } 553 554 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 555 unsigned char **iv, 556 struct scatterlist **sg, 557 int num_frags) 558 { 559 size_t size, iv_offset, sg_offset; 560 struct aead_request *req; 561 void *tmp; 562 563 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm); 564 iv_offset = size; 565 size += GCM_AES_IV_LEN; 566 567 size = ALIGN(size, __alignof__(struct scatterlist)); 568 sg_offset = size; 569 size += sizeof(struct scatterlist) * num_frags; 570 571 tmp = kmalloc(size, GFP_ATOMIC); 572 if (!tmp) 573 return NULL; 574 575 *iv = (unsigned char *)(tmp + iv_offset); 576 *sg = (struct scatterlist *)(tmp + sg_offset); 577 req = tmp; 578 579 aead_request_set_tfm(req, tfm); 580 581 return req; 582 } 583 584 static struct sk_buff *macsec_encrypt(struct sk_buff *skb, 585 struct net_device *dev) 586 { 587 int ret; 588 struct scatterlist *sg; 589 struct sk_buff *trailer; 590 unsigned char *iv; 591 struct ethhdr *eth; 592 struct macsec_eth_header *hh; 593 size_t unprotected_len; 594 struct aead_request *req; 595 struct macsec_secy *secy; 596 struct macsec_tx_sc *tx_sc; 597 struct macsec_tx_sa *tx_sa; 598 struct macsec_dev *macsec = macsec_priv(dev); 599 bool sci_present; 600 pn_t pn; 601 602 secy = &macsec->secy; 603 tx_sc = &secy->tx_sc; 604 605 /* 10.5.1 TX SA assignment */ 606 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]); 607 if (!tx_sa) { 608 secy->operational = false; 609 kfree_skb(skb); 610 return ERR_PTR(-EINVAL); 611 } 612 613 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || 614 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { 615 struct sk_buff *nskb = skb_copy_expand(skb, 616 MACSEC_NEEDED_HEADROOM, 617 MACSEC_NEEDED_TAILROOM, 618 GFP_ATOMIC); 619 if (likely(nskb)) { 620 consume_skb(skb); 621 skb = nskb; 622 } else { 623 macsec_txsa_put(tx_sa); 624 kfree_skb(skb); 625 return ERR_PTR(-ENOMEM); 626 } 627 } else { 628 skb = skb_unshare(skb, GFP_ATOMIC); 629 if (!skb) { 630 macsec_txsa_put(tx_sa); 631 return ERR_PTR(-ENOMEM); 632 } 633 } 634 635 unprotected_len = skb->len; 636 eth = eth_hdr(skb); 637 sci_present = macsec_send_sci(secy); 638 hh = skb_push(skb, macsec_extra_len(sci_present)); 639 memmove(hh, eth, 2 * ETH_ALEN); 640 641 pn = tx_sa_update_pn(tx_sa, secy); 642 if (pn.full64 == 0) { 643 macsec_txsa_put(tx_sa); 644 kfree_skb(skb); 645 return ERR_PTR(-ENOLINK); 646 } 647 macsec_fill_sectag(hh, secy, pn.lower, sci_present); 648 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); 649 650 skb_put(skb, secy->icv_len); 651 652 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { 653 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 654 655 u64_stats_update_begin(&secy_stats->syncp); 656 secy_stats->stats.OutPktsTooLong++; 657 u64_stats_update_end(&secy_stats->syncp); 658 659 macsec_txsa_put(tx_sa); 660 kfree_skb(skb); 661 return ERR_PTR(-EINVAL); 662 } 663 664 ret = skb_cow_data(skb, 0, &trailer); 665 if (unlikely(ret < 0)) { 666 macsec_txsa_put(tx_sa); 667 kfree_skb(skb); 668 return ERR_PTR(ret); 669 } 670 671 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret); 672 if (!req) { 673 macsec_txsa_put(tx_sa); 674 kfree_skb(skb); 675 return ERR_PTR(-ENOMEM); 676 } 677 678 if (secy->xpn) 679 macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt); 680 else 681 macsec_fill_iv(iv, secy->sci, pn.lower); 682 683 sg_init_table(sg, ret); 684 ret = skb_to_sgvec(skb, sg, 0, skb->len); 685 if (unlikely(ret < 0)) { 686 aead_request_free(req); 687 macsec_txsa_put(tx_sa); 688 kfree_skb(skb); 689 return ERR_PTR(ret); 690 } 691 692 if (tx_sc->encrypt) { 693 int len = skb->len - macsec_hdr_len(sci_present) - 694 secy->icv_len; 695 aead_request_set_crypt(req, sg, sg, len, iv); 696 aead_request_set_ad(req, macsec_hdr_len(sci_present)); 697 } else { 698 aead_request_set_crypt(req, sg, sg, 0, iv); 699 aead_request_set_ad(req, skb->len - secy->icv_len); 700 } 701 702 macsec_skb_cb(skb)->req = req; 703 macsec_skb_cb(skb)->tx_sa = tx_sa; 704 macsec_skb_cb(skb)->has_sci = sci_present; 705 aead_request_set_callback(req, 0, macsec_encrypt_done, skb); 706 707 dev_hold(skb->dev); 708 ret = crypto_aead_encrypt(req); 709 if (ret == -EINPROGRESS) { 710 return ERR_PTR(ret); 711 } else if (ret != 0) { 712 dev_put(skb->dev); 713 kfree_skb(skb); 714 aead_request_free(req); 715 macsec_txsa_put(tx_sa); 716 return ERR_PTR(-EINVAL); 717 } 718 719 dev_put(skb->dev); 720 aead_request_free(req); 721 macsec_txsa_put(tx_sa); 722 723 return skb; 724 } 725 726 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn) 727 { 728 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 729 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats); 730 struct macsec_eth_header *hdr = macsec_ethhdr(skb); 731 u32 lowest_pn = 0; 732 733 spin_lock(&rx_sa->lock); 734 if (rx_sa->next_pn_halves.lower >= secy->replay_window) 735 lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window; 736 737 /* Now perform replay protection check again 738 * (see IEEE 802.1AE-2006 figure 10-5) 739 */ 740 if (secy->replay_protect && pn < lowest_pn && 741 (!secy->xpn || pn_same_half(pn, lowest_pn))) { 742 spin_unlock(&rx_sa->lock); 743 u64_stats_update_begin(&rxsc_stats->syncp); 744 rxsc_stats->stats.InPktsLate++; 745 u64_stats_update_end(&rxsc_stats->syncp); 746 secy->netdev->stats.rx_dropped++; 747 return false; 748 } 749 750 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { 751 unsigned int msdu_len = macsec_msdu_len(skb); 752 u64_stats_update_begin(&rxsc_stats->syncp); 753 if (hdr->tci_an & MACSEC_TCI_E) 754 rxsc_stats->stats.InOctetsDecrypted += msdu_len; 755 else 756 rxsc_stats->stats.InOctetsValidated += msdu_len; 757 u64_stats_update_end(&rxsc_stats->syncp); 758 } 759 760 if (!macsec_skb_cb(skb)->valid) { 761 spin_unlock(&rx_sa->lock); 762 763 /* 10.6.5 */ 764 if (hdr->tci_an & MACSEC_TCI_C || 765 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 766 u64_stats_update_begin(&rxsc_stats->syncp); 767 rxsc_stats->stats.InPktsNotValid++; 768 u64_stats_update_end(&rxsc_stats->syncp); 769 this_cpu_inc(rx_sa->stats->InPktsNotValid); 770 secy->netdev->stats.rx_errors++; 771 return false; 772 } 773 774 u64_stats_update_begin(&rxsc_stats->syncp); 775 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) { 776 rxsc_stats->stats.InPktsInvalid++; 777 this_cpu_inc(rx_sa->stats->InPktsInvalid); 778 } else if (pn < lowest_pn) { 779 rxsc_stats->stats.InPktsDelayed++; 780 } else { 781 rxsc_stats->stats.InPktsUnchecked++; 782 } 783 u64_stats_update_end(&rxsc_stats->syncp); 784 } else { 785 u64_stats_update_begin(&rxsc_stats->syncp); 786 if (pn < lowest_pn) { 787 rxsc_stats->stats.InPktsDelayed++; 788 } else { 789 rxsc_stats->stats.InPktsOK++; 790 this_cpu_inc(rx_sa->stats->InPktsOK); 791 } 792 u64_stats_update_end(&rxsc_stats->syncp); 793 794 // Instead of "pn >=" - to support pn overflow in xpn 795 if (pn + 1 > rx_sa->next_pn_halves.lower) { 796 rx_sa->next_pn_halves.lower = pn + 1; 797 } else if (secy->xpn && 798 !pn_same_half(pn, rx_sa->next_pn_halves.lower)) { 799 rx_sa->next_pn_halves.upper++; 800 rx_sa->next_pn_halves.lower = pn + 1; 801 } 802 803 spin_unlock(&rx_sa->lock); 804 } 805 806 return true; 807 } 808 809 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) 810 { 811 skb->pkt_type = PACKET_HOST; 812 skb->protocol = eth_type_trans(skb, dev); 813 814 skb_reset_network_header(skb); 815 if (!skb_transport_header_was_set(skb)) 816 skb_reset_transport_header(skb); 817 skb_reset_mac_len(skb); 818 } 819 820 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) 821 { 822 skb->ip_summed = CHECKSUM_NONE; 823 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); 824 skb_pull(skb, hdr_len); 825 pskb_trim_unique(skb, skb->len - icv_len); 826 } 827 828 static void count_rx(struct net_device *dev, int len) 829 { 830 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 831 832 u64_stats_update_begin(&stats->syncp); 833 u64_stats_inc(&stats->rx_packets); 834 u64_stats_add(&stats->rx_bytes, len); 835 u64_stats_update_end(&stats->syncp); 836 } 837 838 static void macsec_decrypt_done(void *data, int err) 839 { 840 struct sk_buff *skb = data; 841 struct net_device *dev = skb->dev; 842 struct macsec_dev *macsec = macsec_priv(dev); 843 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 844 struct macsec_rx_sc *rx_sc = rx_sa->sc; 845 int len; 846 u32 pn; 847 848 aead_request_free(macsec_skb_cb(skb)->req); 849 850 if (!err) 851 macsec_skb_cb(skb)->valid = true; 852 853 rcu_read_lock_bh(); 854 pn = ntohl(macsec_ethhdr(skb)->packet_number); 855 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { 856 rcu_read_unlock_bh(); 857 kfree_skb(skb); 858 goto out; 859 } 860 861 macsec_finalize_skb(skb, macsec->secy.icv_len, 862 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 863 len = skb->len; 864 macsec_reset_skb(skb, macsec->secy.netdev); 865 866 if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) 867 count_rx(dev, len); 868 869 rcu_read_unlock_bh(); 870 871 out: 872 macsec_rxsa_put(rx_sa); 873 macsec_rxsc_put(rx_sc); 874 dev_put(dev); 875 } 876 877 static struct sk_buff *macsec_decrypt(struct sk_buff *skb, 878 struct net_device *dev, 879 struct macsec_rx_sa *rx_sa, 880 sci_t sci, 881 struct macsec_secy *secy) 882 { 883 int ret; 884 struct scatterlist *sg; 885 struct sk_buff *trailer; 886 unsigned char *iv; 887 struct aead_request *req; 888 struct macsec_eth_header *hdr; 889 u32 hdr_pn; 890 u16 icv_len = secy->icv_len; 891 892 macsec_skb_cb(skb)->valid = false; 893 skb = skb_share_check(skb, GFP_ATOMIC); 894 if (!skb) 895 return ERR_PTR(-ENOMEM); 896 897 ret = skb_cow_data(skb, 0, &trailer); 898 if (unlikely(ret < 0)) { 899 kfree_skb(skb); 900 return ERR_PTR(ret); 901 } 902 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret); 903 if (!req) { 904 kfree_skb(skb); 905 return ERR_PTR(-ENOMEM); 906 } 907 908 hdr = (struct macsec_eth_header *)skb->data; 909 hdr_pn = ntohl(hdr->packet_number); 910 911 if (secy->xpn) { 912 pn_t recovered_pn = rx_sa->next_pn_halves; 913 914 recovered_pn.lower = hdr_pn; 915 if (hdr_pn < rx_sa->next_pn_halves.lower && 916 !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower)) 917 recovered_pn.upper++; 918 919 macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64, 920 rx_sa->key.salt); 921 } else { 922 macsec_fill_iv(iv, sci, hdr_pn); 923 } 924 925 sg_init_table(sg, ret); 926 ret = skb_to_sgvec(skb, sg, 0, skb->len); 927 if (unlikely(ret < 0)) { 928 aead_request_free(req); 929 kfree_skb(skb); 930 return ERR_PTR(ret); 931 } 932 933 if (hdr->tci_an & MACSEC_TCI_E) { 934 /* confidentiality: ethernet + macsec header 935 * authenticated, encrypted payload 936 */ 937 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci); 938 939 aead_request_set_crypt(req, sg, sg, len, iv); 940 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci)); 941 skb = skb_unshare(skb, GFP_ATOMIC); 942 if (!skb) { 943 aead_request_free(req); 944 return ERR_PTR(-ENOMEM); 945 } 946 } else { 947 /* integrity only: all headers + data authenticated */ 948 aead_request_set_crypt(req, sg, sg, icv_len, iv); 949 aead_request_set_ad(req, skb->len - icv_len); 950 } 951 952 macsec_skb_cb(skb)->req = req; 953 skb->dev = dev; 954 aead_request_set_callback(req, 0, macsec_decrypt_done, skb); 955 956 dev_hold(dev); 957 ret = crypto_aead_decrypt(req); 958 if (ret == -EINPROGRESS) { 959 return ERR_PTR(ret); 960 } else if (ret != 0) { 961 /* decryption/authentication failed 962 * 10.6 if validateFrames is disabled, deliver anyway 963 */ 964 if (ret != -EBADMSG) { 965 kfree_skb(skb); 966 skb = ERR_PTR(ret); 967 } 968 } else { 969 macsec_skb_cb(skb)->valid = true; 970 } 971 dev_put(dev); 972 973 aead_request_free(req); 974 975 return skb; 976 } 977 978 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci) 979 { 980 struct macsec_rx_sc *rx_sc; 981 982 for_each_rxsc(secy, rx_sc) { 983 if (rx_sc->sci == sci) 984 return rx_sc; 985 } 986 987 return NULL; 988 } 989 990 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) 991 { 992 struct macsec_rx_sc *rx_sc; 993 994 for_each_rxsc_rtnl(secy, rx_sc) { 995 if (rx_sc->sci == sci) 996 return rx_sc; 997 } 998 999 return NULL; 1000 } 1001 1002 static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) 1003 { 1004 /* Deliver to the uncontrolled port by default */ 1005 enum rx_handler_result ret = RX_HANDLER_PASS; 1006 struct ethhdr *hdr = eth_hdr(skb); 1007 struct metadata_dst *md_dst; 1008 struct macsec_rxh_data *rxd; 1009 struct macsec_dev *macsec; 1010 1011 rcu_read_lock(); 1012 rxd = macsec_data_rcu(skb->dev); 1013 md_dst = skb_metadata_dst(skb); 1014 1015 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1016 struct sk_buff *nskb; 1017 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 1018 struct net_device *ndev = macsec->secy.netdev; 1019 1020 /* If h/w offloading is enabled, HW decodes frames and strips 1021 * the SecTAG, so we have to deduce which port to deliver to. 1022 */ 1023 if (macsec_is_offloaded(macsec) && netif_running(ndev)) { 1024 struct macsec_rx_sc *rx_sc = NULL; 1025 1026 if (md_dst && md_dst->type == METADATA_MACSEC) 1027 rx_sc = find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci); 1028 1029 if (md_dst && md_dst->type == METADATA_MACSEC && !rx_sc) 1030 continue; 1031 1032 if (ether_addr_equal_64bits(hdr->h_dest, 1033 ndev->dev_addr)) { 1034 /* exact match, divert skb to this port */ 1035 skb->dev = ndev; 1036 skb->pkt_type = PACKET_HOST; 1037 ret = RX_HANDLER_ANOTHER; 1038 goto out; 1039 } else if (is_multicast_ether_addr_64bits( 1040 hdr->h_dest)) { 1041 /* multicast frame, deliver on this port too */ 1042 nskb = skb_clone(skb, GFP_ATOMIC); 1043 if (!nskb) 1044 break; 1045 1046 nskb->dev = ndev; 1047 if (ether_addr_equal_64bits(hdr->h_dest, 1048 ndev->broadcast)) 1049 nskb->pkt_type = PACKET_BROADCAST; 1050 else 1051 nskb->pkt_type = PACKET_MULTICAST; 1052 1053 __netif_rx(nskb); 1054 } else if (rx_sc || ndev->flags & IFF_PROMISC) { 1055 skb->dev = ndev; 1056 skb->pkt_type = PACKET_HOST; 1057 ret = RX_HANDLER_ANOTHER; 1058 goto out; 1059 } 1060 1061 continue; 1062 } 1063 1064 /* 10.6 If the management control validateFrames is not 1065 * Strict, frames without a SecTAG are received, counted, and 1066 * delivered to the Controlled Port 1067 */ 1068 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1069 u64_stats_update_begin(&secy_stats->syncp); 1070 secy_stats->stats.InPktsNoTag++; 1071 u64_stats_update_end(&secy_stats->syncp); 1072 macsec->secy.netdev->stats.rx_dropped++; 1073 continue; 1074 } 1075 1076 /* deliver on this port */ 1077 nskb = skb_clone(skb, GFP_ATOMIC); 1078 if (!nskb) 1079 break; 1080 1081 nskb->dev = ndev; 1082 1083 if (__netif_rx(nskb) == NET_RX_SUCCESS) { 1084 u64_stats_update_begin(&secy_stats->syncp); 1085 secy_stats->stats.InPktsUntagged++; 1086 u64_stats_update_end(&secy_stats->syncp); 1087 } 1088 } 1089 1090 out: 1091 rcu_read_unlock(); 1092 return ret; 1093 } 1094 1095 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) 1096 { 1097 struct sk_buff *skb = *pskb; 1098 struct net_device *dev = skb->dev; 1099 struct macsec_eth_header *hdr; 1100 struct macsec_secy *secy = NULL; 1101 struct macsec_rx_sc *rx_sc; 1102 struct macsec_rx_sa *rx_sa; 1103 struct macsec_rxh_data *rxd; 1104 struct macsec_dev *macsec; 1105 unsigned int len; 1106 sci_t sci; 1107 u32 hdr_pn; 1108 bool cbit; 1109 struct pcpu_rx_sc_stats *rxsc_stats; 1110 struct pcpu_secy_stats *secy_stats; 1111 bool pulled_sci; 1112 int ret; 1113 1114 if (skb_headroom(skb) < ETH_HLEN) 1115 goto drop_direct; 1116 1117 hdr = macsec_ethhdr(skb); 1118 if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) 1119 return handle_not_macsec(skb); 1120 1121 skb = skb_unshare(skb, GFP_ATOMIC); 1122 *pskb = skb; 1123 if (!skb) 1124 return RX_HANDLER_CONSUMED; 1125 1126 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); 1127 if (!pulled_sci) { 1128 if (!pskb_may_pull(skb, macsec_extra_len(false))) 1129 goto drop_direct; 1130 } 1131 1132 hdr = macsec_ethhdr(skb); 1133 1134 /* Frames with a SecTAG that has the TCI E bit set but the C 1135 * bit clear are discarded, as this reserved encoding is used 1136 * to identify frames with a SecTAG that are not to be 1137 * delivered to the Controlled Port. 1138 */ 1139 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E) 1140 return RX_HANDLER_PASS; 1141 1142 /* now, pull the extra length */ 1143 if (hdr->tci_an & MACSEC_TCI_SC) { 1144 if (!pulled_sci) 1145 goto drop_direct; 1146 } 1147 1148 /* ethernet header is part of crypto processing */ 1149 skb_push(skb, ETH_HLEN); 1150 1151 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); 1152 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; 1153 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); 1154 1155 rcu_read_lock(); 1156 rxd = macsec_data_rcu(skb->dev); 1157 1158 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1159 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); 1160 1161 sc = sc ? macsec_rxsc_get(sc) : NULL; 1162 1163 if (sc) { 1164 secy = &macsec->secy; 1165 rx_sc = sc; 1166 break; 1167 } 1168 } 1169 1170 if (!secy) 1171 goto nosci; 1172 1173 dev = secy->netdev; 1174 macsec = macsec_priv(dev); 1175 secy_stats = this_cpu_ptr(macsec->stats); 1176 rxsc_stats = this_cpu_ptr(rx_sc->stats); 1177 1178 if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) { 1179 u64_stats_update_begin(&secy_stats->syncp); 1180 secy_stats->stats.InPktsBadTag++; 1181 u64_stats_update_end(&secy_stats->syncp); 1182 secy->netdev->stats.rx_errors++; 1183 goto drop_nosa; 1184 } 1185 1186 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]); 1187 if (!rx_sa) { 1188 /* 10.6.1 if the SA is not in use */ 1189 1190 /* If validateFrames is Strict or the C bit in the 1191 * SecTAG is set, discard 1192 */ 1193 struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc); 1194 if (hdr->tci_an & MACSEC_TCI_C || 1195 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 1196 u64_stats_update_begin(&rxsc_stats->syncp); 1197 rxsc_stats->stats.InPktsNotUsingSA++; 1198 u64_stats_update_end(&rxsc_stats->syncp); 1199 secy->netdev->stats.rx_errors++; 1200 if (active_rx_sa) 1201 this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA); 1202 goto drop_nosa; 1203 } 1204 1205 /* not Strict, the frame (with the SecTAG and ICV 1206 * removed) is delivered to the Controlled Port. 1207 */ 1208 u64_stats_update_begin(&rxsc_stats->syncp); 1209 rxsc_stats->stats.InPktsUnusedSA++; 1210 u64_stats_update_end(&rxsc_stats->syncp); 1211 if (active_rx_sa) 1212 this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA); 1213 goto deliver; 1214 } 1215 1216 /* First, PN check to avoid decrypting obviously wrong packets */ 1217 hdr_pn = ntohl(hdr->packet_number); 1218 if (secy->replay_protect) { 1219 bool late; 1220 1221 spin_lock(&rx_sa->lock); 1222 late = rx_sa->next_pn_halves.lower >= secy->replay_window && 1223 hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window); 1224 1225 if (secy->xpn) 1226 late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn); 1227 spin_unlock(&rx_sa->lock); 1228 1229 if (late) { 1230 u64_stats_update_begin(&rxsc_stats->syncp); 1231 rxsc_stats->stats.InPktsLate++; 1232 u64_stats_update_end(&rxsc_stats->syncp); 1233 macsec->secy.netdev->stats.rx_dropped++; 1234 goto drop; 1235 } 1236 } 1237 1238 macsec_skb_cb(skb)->rx_sa = rx_sa; 1239 1240 /* Disabled && !changed text => skip validation */ 1241 if (hdr->tci_an & MACSEC_TCI_C || 1242 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1243 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1244 1245 if (IS_ERR(skb)) { 1246 /* the decrypt callback needs the reference */ 1247 if (PTR_ERR(skb) != -EINPROGRESS) { 1248 macsec_rxsa_put(rx_sa); 1249 macsec_rxsc_put(rx_sc); 1250 } 1251 rcu_read_unlock(); 1252 *pskb = NULL; 1253 return RX_HANDLER_CONSUMED; 1254 } 1255 1256 if (!macsec_post_decrypt(skb, secy, hdr_pn)) 1257 goto drop; 1258 1259 deliver: 1260 macsec_finalize_skb(skb, secy->icv_len, 1261 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1262 len = skb->len; 1263 macsec_reset_skb(skb, secy->netdev); 1264 1265 if (rx_sa) 1266 macsec_rxsa_put(rx_sa); 1267 macsec_rxsc_put(rx_sc); 1268 1269 skb_orphan(skb); 1270 ret = gro_cells_receive(&macsec->gro_cells, skb); 1271 if (ret == NET_RX_SUCCESS) 1272 count_rx(dev, len); 1273 else 1274 macsec->secy.netdev->stats.rx_dropped++; 1275 1276 rcu_read_unlock(); 1277 1278 *pskb = NULL; 1279 return RX_HANDLER_CONSUMED; 1280 1281 drop: 1282 macsec_rxsa_put(rx_sa); 1283 drop_nosa: 1284 macsec_rxsc_put(rx_sc); 1285 rcu_read_unlock(); 1286 drop_direct: 1287 kfree_skb(skb); 1288 *pskb = NULL; 1289 return RX_HANDLER_CONSUMED; 1290 1291 nosci: 1292 /* 10.6.1 if the SC is not found */ 1293 cbit = !!(hdr->tci_an & MACSEC_TCI_C); 1294 if (!cbit) 1295 macsec_finalize_skb(skb, MACSEC_DEFAULT_ICV_LEN, 1296 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1297 1298 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1299 struct sk_buff *nskb; 1300 1301 secy_stats = this_cpu_ptr(macsec->stats); 1302 1303 /* If validateFrames is Strict or the C bit in the 1304 * SecTAG is set, discard 1305 */ 1306 if (cbit || 1307 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1308 u64_stats_update_begin(&secy_stats->syncp); 1309 secy_stats->stats.InPktsNoSCI++; 1310 u64_stats_update_end(&secy_stats->syncp); 1311 macsec->secy.netdev->stats.rx_errors++; 1312 continue; 1313 } 1314 1315 /* not strict, the frame (with the SecTAG and ICV 1316 * removed) is delivered to the Controlled Port. 1317 */ 1318 nskb = skb_clone(skb, GFP_ATOMIC); 1319 if (!nskb) 1320 break; 1321 1322 macsec_reset_skb(nskb, macsec->secy.netdev); 1323 1324 ret = __netif_rx(nskb); 1325 if (ret == NET_RX_SUCCESS) { 1326 u64_stats_update_begin(&secy_stats->syncp); 1327 secy_stats->stats.InPktsUnknownSCI++; 1328 u64_stats_update_end(&secy_stats->syncp); 1329 } else { 1330 macsec->secy.netdev->stats.rx_dropped++; 1331 } 1332 } 1333 1334 rcu_read_unlock(); 1335 *pskb = skb; 1336 return RX_HANDLER_PASS; 1337 } 1338 1339 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) 1340 { 1341 struct crypto_aead *tfm; 1342 int ret; 1343 1344 /* Pick a sync gcm(aes) cipher to ensure order is preserved. */ 1345 tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); 1346 1347 if (IS_ERR(tfm)) 1348 return tfm; 1349 1350 ret = crypto_aead_setkey(tfm, key, key_len); 1351 if (ret < 0) 1352 goto fail; 1353 1354 ret = crypto_aead_setauthsize(tfm, icv_len); 1355 if (ret < 0) 1356 goto fail; 1357 1358 return tfm; 1359 fail: 1360 crypto_free_aead(tfm); 1361 return ERR_PTR(ret); 1362 } 1363 1364 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, 1365 int icv_len) 1366 { 1367 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); 1368 if (!rx_sa->stats) 1369 return -ENOMEM; 1370 1371 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1372 if (IS_ERR(rx_sa->key.tfm)) { 1373 free_percpu(rx_sa->stats); 1374 return PTR_ERR(rx_sa->key.tfm); 1375 } 1376 1377 rx_sa->ssci = MACSEC_UNDEF_SSCI; 1378 rx_sa->active = false; 1379 rx_sa->next_pn = 1; 1380 refcount_set(&rx_sa->refcnt, 1); 1381 spin_lock_init(&rx_sa->lock); 1382 1383 return 0; 1384 } 1385 1386 static void clear_rx_sa(struct macsec_rx_sa *rx_sa) 1387 { 1388 rx_sa->active = false; 1389 1390 macsec_rxsa_put(rx_sa); 1391 } 1392 1393 static void free_rx_sc(struct macsec_rx_sc *rx_sc) 1394 { 1395 int i; 1396 1397 for (i = 0; i < MACSEC_NUM_AN; i++) { 1398 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]); 1399 1400 RCU_INIT_POINTER(rx_sc->sa[i], NULL); 1401 if (sa) 1402 clear_rx_sa(sa); 1403 } 1404 1405 macsec_rxsc_put(rx_sc); 1406 } 1407 1408 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) 1409 { 1410 struct macsec_rx_sc *rx_sc, __rcu **rx_scp; 1411 1412 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp); 1413 rx_sc; 1414 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) { 1415 if (rx_sc->sci == sci) { 1416 if (rx_sc->active) 1417 secy->n_rx_sc--; 1418 rcu_assign_pointer(*rx_scp, rx_sc->next); 1419 return rx_sc; 1420 } 1421 } 1422 1423 return NULL; 1424 } 1425 1426 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci, 1427 bool active) 1428 { 1429 struct macsec_rx_sc *rx_sc; 1430 struct macsec_dev *macsec; 1431 struct net_device *real_dev = macsec_priv(dev)->real_dev; 1432 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 1433 struct macsec_secy *secy; 1434 1435 list_for_each_entry(macsec, &rxd->secys, secys) { 1436 if (find_rx_sc_rtnl(&macsec->secy, sci)) 1437 return ERR_PTR(-EEXIST); 1438 } 1439 1440 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); 1441 if (!rx_sc) 1442 return ERR_PTR(-ENOMEM); 1443 1444 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats); 1445 if (!rx_sc->stats) { 1446 kfree(rx_sc); 1447 return ERR_PTR(-ENOMEM); 1448 } 1449 1450 rx_sc->sci = sci; 1451 rx_sc->active = active; 1452 refcount_set(&rx_sc->refcnt, 1); 1453 1454 secy = &macsec_priv(dev)->secy; 1455 rcu_assign_pointer(rx_sc->next, secy->rx_sc); 1456 rcu_assign_pointer(secy->rx_sc, rx_sc); 1457 1458 if (rx_sc->active) 1459 secy->n_rx_sc++; 1460 1461 return rx_sc; 1462 } 1463 1464 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, 1465 int icv_len) 1466 { 1467 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); 1468 if (!tx_sa->stats) 1469 return -ENOMEM; 1470 1471 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1472 if (IS_ERR(tx_sa->key.tfm)) { 1473 free_percpu(tx_sa->stats); 1474 return PTR_ERR(tx_sa->key.tfm); 1475 } 1476 1477 tx_sa->ssci = MACSEC_UNDEF_SSCI; 1478 tx_sa->active = false; 1479 refcount_set(&tx_sa->refcnt, 1); 1480 spin_lock_init(&tx_sa->lock); 1481 1482 return 0; 1483 } 1484 1485 static void clear_tx_sa(struct macsec_tx_sa *tx_sa) 1486 { 1487 tx_sa->active = false; 1488 1489 macsec_txsa_put(tx_sa); 1490 } 1491 1492 static struct genl_family macsec_fam; 1493 1494 static struct net_device *get_dev_from_nl(struct net *net, 1495 struct nlattr **attrs) 1496 { 1497 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]); 1498 struct net_device *dev; 1499 1500 dev = __dev_get_by_index(net, ifindex); 1501 if (!dev) 1502 return ERR_PTR(-ENODEV); 1503 1504 if (!netif_is_macsec(dev)) 1505 return ERR_PTR(-ENODEV); 1506 1507 return dev; 1508 } 1509 1510 static enum macsec_offload nla_get_offload(const struct nlattr *nla) 1511 { 1512 return (__force enum macsec_offload)nla_get_u8(nla); 1513 } 1514 1515 static sci_t nla_get_sci(const struct nlattr *nla) 1516 { 1517 return (__force sci_t)nla_get_u64(nla); 1518 } 1519 1520 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, 1521 int padattr) 1522 { 1523 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); 1524 } 1525 1526 static ssci_t nla_get_ssci(const struct nlattr *nla) 1527 { 1528 return (__force ssci_t)nla_get_u32(nla); 1529 } 1530 1531 static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value) 1532 { 1533 return nla_put_u32(skb, attrtype, (__force u64)value); 1534 } 1535 1536 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, 1537 struct nlattr **attrs, 1538 struct nlattr **tb_sa, 1539 struct net_device **devp, 1540 struct macsec_secy **secyp, 1541 struct macsec_tx_sc **scp, 1542 u8 *assoc_num) 1543 { 1544 struct net_device *dev; 1545 struct macsec_secy *secy; 1546 struct macsec_tx_sc *tx_sc; 1547 struct macsec_tx_sa *tx_sa; 1548 1549 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1550 return ERR_PTR(-EINVAL); 1551 1552 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1553 1554 dev = get_dev_from_nl(net, attrs); 1555 if (IS_ERR(dev)) 1556 return ERR_CAST(dev); 1557 1558 if (*assoc_num >= MACSEC_NUM_AN) 1559 return ERR_PTR(-EINVAL); 1560 1561 secy = &macsec_priv(dev)->secy; 1562 tx_sc = &secy->tx_sc; 1563 1564 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]); 1565 if (!tx_sa) 1566 return ERR_PTR(-ENODEV); 1567 1568 *devp = dev; 1569 *scp = tx_sc; 1570 *secyp = secy; 1571 return tx_sa; 1572 } 1573 1574 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net, 1575 struct nlattr **attrs, 1576 struct nlattr **tb_rxsc, 1577 struct net_device **devp, 1578 struct macsec_secy **secyp) 1579 { 1580 struct net_device *dev; 1581 struct macsec_secy *secy; 1582 struct macsec_rx_sc *rx_sc; 1583 sci_t sci; 1584 1585 dev = get_dev_from_nl(net, attrs); 1586 if (IS_ERR(dev)) 1587 return ERR_CAST(dev); 1588 1589 secy = &macsec_priv(dev)->secy; 1590 1591 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1592 return ERR_PTR(-EINVAL); 1593 1594 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1595 rx_sc = find_rx_sc_rtnl(secy, sci); 1596 if (!rx_sc) 1597 return ERR_PTR(-ENODEV); 1598 1599 *secyp = secy; 1600 *devp = dev; 1601 1602 return rx_sc; 1603 } 1604 1605 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, 1606 struct nlattr **attrs, 1607 struct nlattr **tb_rxsc, 1608 struct nlattr **tb_sa, 1609 struct net_device **devp, 1610 struct macsec_secy **secyp, 1611 struct macsec_rx_sc **scp, 1612 u8 *assoc_num) 1613 { 1614 struct macsec_rx_sc *rx_sc; 1615 struct macsec_rx_sa *rx_sa; 1616 1617 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1618 return ERR_PTR(-EINVAL); 1619 1620 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1621 if (*assoc_num >= MACSEC_NUM_AN) 1622 return ERR_PTR(-EINVAL); 1623 1624 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); 1625 if (IS_ERR(rx_sc)) 1626 return ERR_CAST(rx_sc); 1627 1628 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]); 1629 if (!rx_sa) 1630 return ERR_PTR(-ENODEV); 1631 1632 *scp = rx_sc; 1633 return rx_sa; 1634 } 1635 1636 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { 1637 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, 1638 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, 1639 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, 1640 [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED }, 1641 }; 1642 1643 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { 1644 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, 1645 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, 1646 }; 1647 1648 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { 1649 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, 1650 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, 1651 [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4), 1652 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, 1653 .len = MACSEC_KEYID_LEN, }, 1654 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1655 .len = MACSEC_MAX_KEY_LEN, }, 1656 [MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 }, 1657 [MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY, 1658 .len = MACSEC_SALT_LEN, }, 1659 }; 1660 1661 static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = { 1662 [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 }, 1663 }; 1664 1665 /* Offloads an operation to a device driver */ 1666 static int macsec_offload(int (* const func)(struct macsec_context *), 1667 struct macsec_context *ctx) 1668 { 1669 int ret; 1670 1671 if (unlikely(!func)) 1672 return 0; 1673 1674 if (ctx->offload == MACSEC_OFFLOAD_PHY) 1675 mutex_lock(&ctx->phydev->lock); 1676 1677 ret = (*func)(ctx); 1678 1679 if (ctx->offload == MACSEC_OFFLOAD_PHY) 1680 mutex_unlock(&ctx->phydev->lock); 1681 1682 return ret; 1683 } 1684 1685 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) 1686 { 1687 if (!attrs[MACSEC_ATTR_SA_CONFIG]) 1688 return -EINVAL; 1689 1690 if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL)) 1691 return -EINVAL; 1692 1693 return 0; 1694 } 1695 1696 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) 1697 { 1698 if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) 1699 return -EINVAL; 1700 1701 if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL)) 1702 return -EINVAL; 1703 1704 return 0; 1705 } 1706 1707 static bool validate_add_rxsa(struct nlattr **attrs) 1708 { 1709 if (!attrs[MACSEC_SA_ATTR_AN] || 1710 !attrs[MACSEC_SA_ATTR_KEY] || 1711 !attrs[MACSEC_SA_ATTR_KEYID]) 1712 return false; 1713 1714 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1715 return false; 1716 1717 if (attrs[MACSEC_SA_ATTR_PN] && 1718 nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 1719 return false; 1720 1721 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1722 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1723 return false; 1724 } 1725 1726 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1727 return false; 1728 1729 return true; 1730 } 1731 1732 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) 1733 { 1734 struct net_device *dev; 1735 struct nlattr **attrs = info->attrs; 1736 struct macsec_secy *secy; 1737 struct macsec_rx_sc *rx_sc; 1738 struct macsec_rx_sa *rx_sa; 1739 unsigned char assoc_num; 1740 int pn_len; 1741 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1742 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1743 int err; 1744 1745 if (!attrs[MACSEC_ATTR_IFINDEX]) 1746 return -EINVAL; 1747 1748 if (parse_sa_config(attrs, tb_sa)) 1749 return -EINVAL; 1750 1751 if (parse_rxsc_config(attrs, tb_rxsc)) 1752 return -EINVAL; 1753 1754 if (!validate_add_rxsa(tb_sa)) 1755 return -EINVAL; 1756 1757 rtnl_lock(); 1758 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 1759 if (IS_ERR(rx_sc)) { 1760 rtnl_unlock(); 1761 return PTR_ERR(rx_sc); 1762 } 1763 1764 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1765 1766 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1767 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n", 1768 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1769 rtnl_unlock(); 1770 return -EINVAL; 1771 } 1772 1773 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 1774 if (tb_sa[MACSEC_SA_ATTR_PN] && 1775 nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 1776 pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n", 1777 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 1778 rtnl_unlock(); 1779 return -EINVAL; 1780 } 1781 1782 if (secy->xpn) { 1783 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { 1784 rtnl_unlock(); 1785 return -EINVAL; 1786 } 1787 1788 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { 1789 pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n", 1790 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), 1791 MACSEC_SALT_LEN); 1792 rtnl_unlock(); 1793 return -EINVAL; 1794 } 1795 } 1796 1797 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); 1798 if (rx_sa) { 1799 rtnl_unlock(); 1800 return -EBUSY; 1801 } 1802 1803 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1804 if (!rx_sa) { 1805 rtnl_unlock(); 1806 return -ENOMEM; 1807 } 1808 1809 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1810 secy->key_len, secy->icv_len); 1811 if (err < 0) { 1812 kfree(rx_sa); 1813 rtnl_unlock(); 1814 return err; 1815 } 1816 1817 if (tb_sa[MACSEC_SA_ATTR_PN]) { 1818 spin_lock_bh(&rx_sa->lock); 1819 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 1820 spin_unlock_bh(&rx_sa->lock); 1821 } 1822 1823 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1824 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1825 1826 rx_sa->sc = rx_sc; 1827 1828 if (secy->xpn) { 1829 rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); 1830 nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], 1831 MACSEC_SALT_LEN); 1832 } 1833 1834 /* If h/w offloading is available, propagate to the device */ 1835 if (macsec_is_offloaded(netdev_priv(dev))) { 1836 const struct macsec_ops *ops; 1837 struct macsec_context ctx; 1838 1839 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1840 if (!ops) { 1841 err = -EOPNOTSUPP; 1842 goto cleanup; 1843 } 1844 1845 ctx.sa.assoc_num = assoc_num; 1846 ctx.sa.rx_sa = rx_sa; 1847 ctx.secy = secy; 1848 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1849 secy->key_len); 1850 1851 err = macsec_offload(ops->mdo_add_rxsa, &ctx); 1852 memzero_explicit(ctx.sa.key, secy->key_len); 1853 if (err) 1854 goto cleanup; 1855 } 1856 1857 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1858 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); 1859 1860 rtnl_unlock(); 1861 1862 return 0; 1863 1864 cleanup: 1865 macsec_rxsa_put(rx_sa); 1866 rtnl_unlock(); 1867 return err; 1868 } 1869 1870 static bool validate_add_rxsc(struct nlattr **attrs) 1871 { 1872 if (!attrs[MACSEC_RXSC_ATTR_SCI]) 1873 return false; 1874 1875 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { 1876 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) 1877 return false; 1878 } 1879 1880 return true; 1881 } 1882 1883 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) 1884 { 1885 struct net_device *dev; 1886 sci_t sci = MACSEC_UNDEF_SCI; 1887 struct nlattr **attrs = info->attrs; 1888 struct macsec_rx_sc *rx_sc; 1889 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1890 struct macsec_secy *secy; 1891 bool active = true; 1892 int ret; 1893 1894 if (!attrs[MACSEC_ATTR_IFINDEX]) 1895 return -EINVAL; 1896 1897 if (parse_rxsc_config(attrs, tb_rxsc)) 1898 return -EINVAL; 1899 1900 if (!validate_add_rxsc(tb_rxsc)) 1901 return -EINVAL; 1902 1903 rtnl_lock(); 1904 dev = get_dev_from_nl(genl_info_net(info), attrs); 1905 if (IS_ERR(dev)) { 1906 rtnl_unlock(); 1907 return PTR_ERR(dev); 1908 } 1909 1910 secy = &macsec_priv(dev)->secy; 1911 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1912 1913 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) 1914 active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 1915 1916 rx_sc = create_rx_sc(dev, sci, active); 1917 if (IS_ERR(rx_sc)) { 1918 rtnl_unlock(); 1919 return PTR_ERR(rx_sc); 1920 } 1921 1922 if (macsec_is_offloaded(netdev_priv(dev))) { 1923 const struct macsec_ops *ops; 1924 struct macsec_context ctx; 1925 1926 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1927 if (!ops) { 1928 ret = -EOPNOTSUPP; 1929 goto cleanup; 1930 } 1931 1932 ctx.rx_sc = rx_sc; 1933 ctx.secy = secy; 1934 1935 ret = macsec_offload(ops->mdo_add_rxsc, &ctx); 1936 if (ret) 1937 goto cleanup; 1938 } 1939 1940 rtnl_unlock(); 1941 1942 return 0; 1943 1944 cleanup: 1945 del_rx_sc(secy, sci); 1946 free_rx_sc(rx_sc); 1947 rtnl_unlock(); 1948 return ret; 1949 } 1950 1951 static bool validate_add_txsa(struct nlattr **attrs) 1952 { 1953 if (!attrs[MACSEC_SA_ATTR_AN] || 1954 !attrs[MACSEC_SA_ATTR_PN] || 1955 !attrs[MACSEC_SA_ATTR_KEY] || 1956 !attrs[MACSEC_SA_ATTR_KEYID]) 1957 return false; 1958 1959 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1960 return false; 1961 1962 if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 1963 return false; 1964 1965 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1966 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1967 return false; 1968 } 1969 1970 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1971 return false; 1972 1973 return true; 1974 } 1975 1976 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) 1977 { 1978 struct net_device *dev; 1979 struct nlattr **attrs = info->attrs; 1980 struct macsec_secy *secy; 1981 struct macsec_tx_sc *tx_sc; 1982 struct macsec_tx_sa *tx_sa; 1983 unsigned char assoc_num; 1984 int pn_len; 1985 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1986 bool was_operational; 1987 int err; 1988 1989 if (!attrs[MACSEC_ATTR_IFINDEX]) 1990 return -EINVAL; 1991 1992 if (parse_sa_config(attrs, tb_sa)) 1993 return -EINVAL; 1994 1995 if (!validate_add_txsa(tb_sa)) 1996 return -EINVAL; 1997 1998 rtnl_lock(); 1999 dev = get_dev_from_nl(genl_info_net(info), attrs); 2000 if (IS_ERR(dev)) { 2001 rtnl_unlock(); 2002 return PTR_ERR(dev); 2003 } 2004 2005 secy = &macsec_priv(dev)->secy; 2006 tx_sc = &secy->tx_sc; 2007 2008 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 2009 2010 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 2011 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n", 2012 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 2013 rtnl_unlock(); 2014 return -EINVAL; 2015 } 2016 2017 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2018 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2019 pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n", 2020 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2021 rtnl_unlock(); 2022 return -EINVAL; 2023 } 2024 2025 if (secy->xpn) { 2026 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { 2027 rtnl_unlock(); 2028 return -EINVAL; 2029 } 2030 2031 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { 2032 pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n", 2033 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), 2034 MACSEC_SALT_LEN); 2035 rtnl_unlock(); 2036 return -EINVAL; 2037 } 2038 } 2039 2040 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); 2041 if (tx_sa) { 2042 rtnl_unlock(); 2043 return -EBUSY; 2044 } 2045 2046 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 2047 if (!tx_sa) { 2048 rtnl_unlock(); 2049 return -ENOMEM; 2050 } 2051 2052 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 2053 secy->key_len, secy->icv_len); 2054 if (err < 0) { 2055 kfree(tx_sa); 2056 rtnl_unlock(); 2057 return err; 2058 } 2059 2060 spin_lock_bh(&tx_sa->lock); 2061 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2062 spin_unlock_bh(&tx_sa->lock); 2063 2064 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2065 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2066 2067 was_operational = secy->operational; 2068 if (assoc_num == tx_sc->encoding_sa && tx_sa->active) 2069 secy->operational = true; 2070 2071 if (secy->xpn) { 2072 tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); 2073 nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], 2074 MACSEC_SALT_LEN); 2075 } 2076 2077 /* If h/w offloading is available, propagate to the device */ 2078 if (macsec_is_offloaded(netdev_priv(dev))) { 2079 const struct macsec_ops *ops; 2080 struct macsec_context ctx; 2081 2082 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2083 if (!ops) { 2084 err = -EOPNOTSUPP; 2085 goto cleanup; 2086 } 2087 2088 ctx.sa.assoc_num = assoc_num; 2089 ctx.sa.tx_sa = tx_sa; 2090 ctx.secy = secy; 2091 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 2092 secy->key_len); 2093 2094 err = macsec_offload(ops->mdo_add_txsa, &ctx); 2095 memzero_explicit(ctx.sa.key, secy->key_len); 2096 if (err) 2097 goto cleanup; 2098 } 2099 2100 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 2101 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); 2102 2103 rtnl_unlock(); 2104 2105 return 0; 2106 2107 cleanup: 2108 secy->operational = was_operational; 2109 macsec_txsa_put(tx_sa); 2110 rtnl_unlock(); 2111 return err; 2112 } 2113 2114 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) 2115 { 2116 struct nlattr **attrs = info->attrs; 2117 struct net_device *dev; 2118 struct macsec_secy *secy; 2119 struct macsec_rx_sc *rx_sc; 2120 struct macsec_rx_sa *rx_sa; 2121 u8 assoc_num; 2122 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2123 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2124 int ret; 2125 2126 if (!attrs[MACSEC_ATTR_IFINDEX]) 2127 return -EINVAL; 2128 2129 if (parse_sa_config(attrs, tb_sa)) 2130 return -EINVAL; 2131 2132 if (parse_rxsc_config(attrs, tb_rxsc)) 2133 return -EINVAL; 2134 2135 rtnl_lock(); 2136 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2137 &dev, &secy, &rx_sc, &assoc_num); 2138 if (IS_ERR(rx_sa)) { 2139 rtnl_unlock(); 2140 return PTR_ERR(rx_sa); 2141 } 2142 2143 if (rx_sa->active) { 2144 rtnl_unlock(); 2145 return -EBUSY; 2146 } 2147 2148 /* If h/w offloading is available, propagate to the device */ 2149 if (macsec_is_offloaded(netdev_priv(dev))) { 2150 const struct macsec_ops *ops; 2151 struct macsec_context ctx; 2152 2153 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2154 if (!ops) { 2155 ret = -EOPNOTSUPP; 2156 goto cleanup; 2157 } 2158 2159 ctx.sa.assoc_num = assoc_num; 2160 ctx.sa.rx_sa = rx_sa; 2161 ctx.secy = secy; 2162 2163 ret = macsec_offload(ops->mdo_del_rxsa, &ctx); 2164 if (ret) 2165 goto cleanup; 2166 } 2167 2168 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); 2169 clear_rx_sa(rx_sa); 2170 2171 rtnl_unlock(); 2172 2173 return 0; 2174 2175 cleanup: 2176 rtnl_unlock(); 2177 return ret; 2178 } 2179 2180 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) 2181 { 2182 struct nlattr **attrs = info->attrs; 2183 struct net_device *dev; 2184 struct macsec_secy *secy; 2185 struct macsec_rx_sc *rx_sc; 2186 sci_t sci; 2187 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2188 int ret; 2189 2190 if (!attrs[MACSEC_ATTR_IFINDEX]) 2191 return -EINVAL; 2192 2193 if (parse_rxsc_config(attrs, tb_rxsc)) 2194 return -EINVAL; 2195 2196 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 2197 return -EINVAL; 2198 2199 rtnl_lock(); 2200 dev = get_dev_from_nl(genl_info_net(info), info->attrs); 2201 if (IS_ERR(dev)) { 2202 rtnl_unlock(); 2203 return PTR_ERR(dev); 2204 } 2205 2206 secy = &macsec_priv(dev)->secy; 2207 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 2208 2209 rx_sc = del_rx_sc(secy, sci); 2210 if (!rx_sc) { 2211 rtnl_unlock(); 2212 return -ENODEV; 2213 } 2214 2215 /* If h/w offloading is available, propagate to the device */ 2216 if (macsec_is_offloaded(netdev_priv(dev))) { 2217 const struct macsec_ops *ops; 2218 struct macsec_context ctx; 2219 2220 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2221 if (!ops) { 2222 ret = -EOPNOTSUPP; 2223 goto cleanup; 2224 } 2225 2226 ctx.rx_sc = rx_sc; 2227 ctx.secy = secy; 2228 ret = macsec_offload(ops->mdo_del_rxsc, &ctx); 2229 if (ret) 2230 goto cleanup; 2231 } 2232 2233 free_rx_sc(rx_sc); 2234 rtnl_unlock(); 2235 2236 return 0; 2237 2238 cleanup: 2239 rtnl_unlock(); 2240 return ret; 2241 } 2242 2243 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) 2244 { 2245 struct nlattr **attrs = info->attrs; 2246 struct net_device *dev; 2247 struct macsec_secy *secy; 2248 struct macsec_tx_sc *tx_sc; 2249 struct macsec_tx_sa *tx_sa; 2250 u8 assoc_num; 2251 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2252 int ret; 2253 2254 if (!attrs[MACSEC_ATTR_IFINDEX]) 2255 return -EINVAL; 2256 2257 if (parse_sa_config(attrs, tb_sa)) 2258 return -EINVAL; 2259 2260 rtnl_lock(); 2261 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2262 &dev, &secy, &tx_sc, &assoc_num); 2263 if (IS_ERR(tx_sa)) { 2264 rtnl_unlock(); 2265 return PTR_ERR(tx_sa); 2266 } 2267 2268 if (tx_sa->active) { 2269 rtnl_unlock(); 2270 return -EBUSY; 2271 } 2272 2273 /* If h/w offloading is available, propagate to the device */ 2274 if (macsec_is_offloaded(netdev_priv(dev))) { 2275 const struct macsec_ops *ops; 2276 struct macsec_context ctx; 2277 2278 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2279 if (!ops) { 2280 ret = -EOPNOTSUPP; 2281 goto cleanup; 2282 } 2283 2284 ctx.sa.assoc_num = assoc_num; 2285 ctx.sa.tx_sa = tx_sa; 2286 ctx.secy = secy; 2287 2288 ret = macsec_offload(ops->mdo_del_txsa, &ctx); 2289 if (ret) 2290 goto cleanup; 2291 } 2292 2293 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); 2294 clear_tx_sa(tx_sa); 2295 2296 rtnl_unlock(); 2297 2298 return 0; 2299 2300 cleanup: 2301 rtnl_unlock(); 2302 return ret; 2303 } 2304 2305 static bool validate_upd_sa(struct nlattr **attrs) 2306 { 2307 if (!attrs[MACSEC_SA_ATTR_AN] || 2308 attrs[MACSEC_SA_ATTR_KEY] || 2309 attrs[MACSEC_SA_ATTR_KEYID] || 2310 attrs[MACSEC_SA_ATTR_SSCI] || 2311 attrs[MACSEC_SA_ATTR_SALT]) 2312 return false; 2313 2314 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 2315 return false; 2316 2317 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 2318 return false; 2319 2320 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 2321 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 2322 return false; 2323 } 2324 2325 return true; 2326 } 2327 2328 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) 2329 { 2330 struct nlattr **attrs = info->attrs; 2331 struct net_device *dev; 2332 struct macsec_secy *secy; 2333 struct macsec_tx_sc *tx_sc; 2334 struct macsec_tx_sa *tx_sa; 2335 u8 assoc_num; 2336 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2337 bool was_operational, was_active; 2338 pn_t prev_pn; 2339 int ret = 0; 2340 2341 prev_pn.full64 = 0; 2342 2343 if (!attrs[MACSEC_ATTR_IFINDEX]) 2344 return -EINVAL; 2345 2346 if (parse_sa_config(attrs, tb_sa)) 2347 return -EINVAL; 2348 2349 if (!validate_upd_sa(tb_sa)) 2350 return -EINVAL; 2351 2352 rtnl_lock(); 2353 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2354 &dev, &secy, &tx_sc, &assoc_num); 2355 if (IS_ERR(tx_sa)) { 2356 rtnl_unlock(); 2357 return PTR_ERR(tx_sa); 2358 } 2359 2360 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2361 int pn_len; 2362 2363 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2364 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2365 pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n", 2366 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2367 rtnl_unlock(); 2368 return -EINVAL; 2369 } 2370 2371 spin_lock_bh(&tx_sa->lock); 2372 prev_pn = tx_sa->next_pn_halves; 2373 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2374 spin_unlock_bh(&tx_sa->lock); 2375 } 2376 2377 was_active = tx_sa->active; 2378 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2379 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2380 2381 was_operational = secy->operational; 2382 if (assoc_num == tx_sc->encoding_sa) 2383 secy->operational = tx_sa->active; 2384 2385 /* If h/w offloading is available, propagate to the device */ 2386 if (macsec_is_offloaded(netdev_priv(dev))) { 2387 const struct macsec_ops *ops; 2388 struct macsec_context ctx; 2389 2390 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2391 if (!ops) { 2392 ret = -EOPNOTSUPP; 2393 goto cleanup; 2394 } 2395 2396 ctx.sa.assoc_num = assoc_num; 2397 ctx.sa.tx_sa = tx_sa; 2398 ctx.secy = secy; 2399 2400 ret = macsec_offload(ops->mdo_upd_txsa, &ctx); 2401 if (ret) 2402 goto cleanup; 2403 } 2404 2405 rtnl_unlock(); 2406 2407 return 0; 2408 2409 cleanup: 2410 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2411 spin_lock_bh(&tx_sa->lock); 2412 tx_sa->next_pn_halves = prev_pn; 2413 spin_unlock_bh(&tx_sa->lock); 2414 } 2415 tx_sa->active = was_active; 2416 secy->operational = was_operational; 2417 rtnl_unlock(); 2418 return ret; 2419 } 2420 2421 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) 2422 { 2423 struct nlattr **attrs = info->attrs; 2424 struct net_device *dev; 2425 struct macsec_secy *secy; 2426 struct macsec_rx_sc *rx_sc; 2427 struct macsec_rx_sa *rx_sa; 2428 u8 assoc_num; 2429 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2430 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2431 bool was_active; 2432 pn_t prev_pn; 2433 int ret = 0; 2434 2435 prev_pn.full64 = 0; 2436 2437 if (!attrs[MACSEC_ATTR_IFINDEX]) 2438 return -EINVAL; 2439 2440 if (parse_rxsc_config(attrs, tb_rxsc)) 2441 return -EINVAL; 2442 2443 if (parse_sa_config(attrs, tb_sa)) 2444 return -EINVAL; 2445 2446 if (!validate_upd_sa(tb_sa)) 2447 return -EINVAL; 2448 2449 rtnl_lock(); 2450 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2451 &dev, &secy, &rx_sc, &assoc_num); 2452 if (IS_ERR(rx_sa)) { 2453 rtnl_unlock(); 2454 return PTR_ERR(rx_sa); 2455 } 2456 2457 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2458 int pn_len; 2459 2460 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2461 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2462 pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n", 2463 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2464 rtnl_unlock(); 2465 return -EINVAL; 2466 } 2467 2468 spin_lock_bh(&rx_sa->lock); 2469 prev_pn = rx_sa->next_pn_halves; 2470 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2471 spin_unlock_bh(&rx_sa->lock); 2472 } 2473 2474 was_active = rx_sa->active; 2475 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2476 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2477 2478 /* If h/w offloading is available, propagate to the device */ 2479 if (macsec_is_offloaded(netdev_priv(dev))) { 2480 const struct macsec_ops *ops; 2481 struct macsec_context ctx; 2482 2483 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2484 if (!ops) { 2485 ret = -EOPNOTSUPP; 2486 goto cleanup; 2487 } 2488 2489 ctx.sa.assoc_num = assoc_num; 2490 ctx.sa.rx_sa = rx_sa; 2491 ctx.secy = secy; 2492 2493 ret = macsec_offload(ops->mdo_upd_rxsa, &ctx); 2494 if (ret) 2495 goto cleanup; 2496 } 2497 2498 rtnl_unlock(); 2499 return 0; 2500 2501 cleanup: 2502 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2503 spin_lock_bh(&rx_sa->lock); 2504 rx_sa->next_pn_halves = prev_pn; 2505 spin_unlock_bh(&rx_sa->lock); 2506 } 2507 rx_sa->active = was_active; 2508 rtnl_unlock(); 2509 return ret; 2510 } 2511 2512 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) 2513 { 2514 struct nlattr **attrs = info->attrs; 2515 struct net_device *dev; 2516 struct macsec_secy *secy; 2517 struct macsec_rx_sc *rx_sc; 2518 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2519 unsigned int prev_n_rx_sc; 2520 bool was_active; 2521 int ret; 2522 2523 if (!attrs[MACSEC_ATTR_IFINDEX]) 2524 return -EINVAL; 2525 2526 if (parse_rxsc_config(attrs, tb_rxsc)) 2527 return -EINVAL; 2528 2529 if (!validate_add_rxsc(tb_rxsc)) 2530 return -EINVAL; 2531 2532 rtnl_lock(); 2533 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 2534 if (IS_ERR(rx_sc)) { 2535 rtnl_unlock(); 2536 return PTR_ERR(rx_sc); 2537 } 2538 2539 was_active = rx_sc->active; 2540 prev_n_rx_sc = secy->n_rx_sc; 2541 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { 2542 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 2543 2544 if (rx_sc->active != new) 2545 secy->n_rx_sc += new ? 1 : -1; 2546 2547 rx_sc->active = new; 2548 } 2549 2550 /* If h/w offloading is available, propagate to the device */ 2551 if (macsec_is_offloaded(netdev_priv(dev))) { 2552 const struct macsec_ops *ops; 2553 struct macsec_context ctx; 2554 2555 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2556 if (!ops) { 2557 ret = -EOPNOTSUPP; 2558 goto cleanup; 2559 } 2560 2561 ctx.rx_sc = rx_sc; 2562 ctx.secy = secy; 2563 2564 ret = macsec_offload(ops->mdo_upd_rxsc, &ctx); 2565 if (ret) 2566 goto cleanup; 2567 } 2568 2569 rtnl_unlock(); 2570 2571 return 0; 2572 2573 cleanup: 2574 secy->n_rx_sc = prev_n_rx_sc; 2575 rx_sc->active = was_active; 2576 rtnl_unlock(); 2577 return ret; 2578 } 2579 2580 static bool macsec_is_configured(struct macsec_dev *macsec) 2581 { 2582 struct macsec_secy *secy = &macsec->secy; 2583 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2584 int i; 2585 2586 if (secy->rx_sc) 2587 return true; 2588 2589 for (i = 0; i < MACSEC_NUM_AN; i++) 2590 if (tx_sc->sa[i]) 2591 return true; 2592 2593 return false; 2594 } 2595 2596 static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload) 2597 { 2598 enum macsec_offload prev_offload; 2599 const struct macsec_ops *ops; 2600 struct macsec_context ctx; 2601 struct macsec_dev *macsec; 2602 int ret = 0; 2603 2604 macsec = macsec_priv(dev); 2605 2606 /* Check if the offloading mode is supported by the underlying layers */ 2607 if (offload != MACSEC_OFFLOAD_OFF && 2608 !macsec_check_offload(offload, macsec)) 2609 return -EOPNOTSUPP; 2610 2611 /* Check if the net device is busy. */ 2612 if (netif_running(dev)) 2613 return -EBUSY; 2614 2615 /* Check if the device already has rules configured: we do not support 2616 * rules migration. 2617 */ 2618 if (macsec_is_configured(macsec)) 2619 return -EBUSY; 2620 2621 prev_offload = macsec->offload; 2622 2623 ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload, 2624 macsec, &ctx); 2625 if (!ops) 2626 return -EOPNOTSUPP; 2627 2628 macsec->offload = offload; 2629 2630 ctx.secy = &macsec->secy; 2631 ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx) 2632 : macsec_offload(ops->mdo_add_secy, &ctx); 2633 if (ret) 2634 macsec->offload = prev_offload; 2635 2636 return ret; 2637 } 2638 2639 static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) 2640 { 2641 struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1]; 2642 struct nlattr **attrs = info->attrs; 2643 enum macsec_offload offload; 2644 struct macsec_dev *macsec; 2645 struct net_device *dev; 2646 int ret = 0; 2647 2648 if (!attrs[MACSEC_ATTR_IFINDEX]) 2649 return -EINVAL; 2650 2651 if (!attrs[MACSEC_ATTR_OFFLOAD]) 2652 return -EINVAL; 2653 2654 if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX, 2655 attrs[MACSEC_ATTR_OFFLOAD], 2656 macsec_genl_offload_policy, NULL)) 2657 return -EINVAL; 2658 2659 rtnl_lock(); 2660 2661 dev = get_dev_from_nl(genl_info_net(info), attrs); 2662 if (IS_ERR(dev)) { 2663 ret = PTR_ERR(dev); 2664 goto out; 2665 } 2666 macsec = macsec_priv(dev); 2667 2668 if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) { 2669 ret = -EINVAL; 2670 goto out; 2671 } 2672 2673 offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); 2674 2675 if (macsec->offload != offload) 2676 ret = macsec_update_offload(dev, offload); 2677 out: 2678 rtnl_unlock(); 2679 return ret; 2680 } 2681 2682 static void get_tx_sa_stats(struct net_device *dev, int an, 2683 struct macsec_tx_sa *tx_sa, 2684 struct macsec_tx_sa_stats *sum) 2685 { 2686 struct macsec_dev *macsec = macsec_priv(dev); 2687 int cpu; 2688 2689 /* If h/w offloading is available, propagate to the device */ 2690 if (macsec_is_offloaded(macsec)) { 2691 const struct macsec_ops *ops; 2692 struct macsec_context ctx; 2693 2694 ops = macsec_get_ops(macsec, &ctx); 2695 if (ops) { 2696 ctx.sa.assoc_num = an; 2697 ctx.sa.tx_sa = tx_sa; 2698 ctx.stats.tx_sa_stats = sum; 2699 ctx.secy = &macsec_priv(dev)->secy; 2700 macsec_offload(ops->mdo_get_tx_sa_stats, &ctx); 2701 } 2702 return; 2703 } 2704 2705 for_each_possible_cpu(cpu) { 2706 const struct macsec_tx_sa_stats *stats = 2707 per_cpu_ptr(tx_sa->stats, cpu); 2708 2709 sum->OutPktsProtected += stats->OutPktsProtected; 2710 sum->OutPktsEncrypted += stats->OutPktsEncrypted; 2711 } 2712 } 2713 2714 static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum) 2715 { 2716 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, 2717 sum->OutPktsProtected) || 2718 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2719 sum->OutPktsEncrypted)) 2720 return -EMSGSIZE; 2721 2722 return 0; 2723 } 2724 2725 static void get_rx_sa_stats(struct net_device *dev, 2726 struct macsec_rx_sc *rx_sc, int an, 2727 struct macsec_rx_sa *rx_sa, 2728 struct macsec_rx_sa_stats *sum) 2729 { 2730 struct macsec_dev *macsec = macsec_priv(dev); 2731 int cpu; 2732 2733 /* If h/w offloading is available, propagate to the device */ 2734 if (macsec_is_offloaded(macsec)) { 2735 const struct macsec_ops *ops; 2736 struct macsec_context ctx; 2737 2738 ops = macsec_get_ops(macsec, &ctx); 2739 if (ops) { 2740 ctx.sa.assoc_num = an; 2741 ctx.sa.rx_sa = rx_sa; 2742 ctx.stats.rx_sa_stats = sum; 2743 ctx.secy = &macsec_priv(dev)->secy; 2744 ctx.rx_sc = rx_sc; 2745 macsec_offload(ops->mdo_get_rx_sa_stats, &ctx); 2746 } 2747 return; 2748 } 2749 2750 for_each_possible_cpu(cpu) { 2751 const struct macsec_rx_sa_stats *stats = 2752 per_cpu_ptr(rx_sa->stats, cpu); 2753 2754 sum->InPktsOK += stats->InPktsOK; 2755 sum->InPktsInvalid += stats->InPktsInvalid; 2756 sum->InPktsNotValid += stats->InPktsNotValid; 2757 sum->InPktsNotUsingSA += stats->InPktsNotUsingSA; 2758 sum->InPktsUnusedSA += stats->InPktsUnusedSA; 2759 } 2760 } 2761 2762 static int copy_rx_sa_stats(struct sk_buff *skb, 2763 struct macsec_rx_sa_stats *sum) 2764 { 2765 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) || 2766 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, 2767 sum->InPktsInvalid) || 2768 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, 2769 sum->InPktsNotValid) || 2770 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2771 sum->InPktsNotUsingSA) || 2772 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, 2773 sum->InPktsUnusedSA)) 2774 return -EMSGSIZE; 2775 2776 return 0; 2777 } 2778 2779 static void get_rx_sc_stats(struct net_device *dev, 2780 struct macsec_rx_sc *rx_sc, 2781 struct macsec_rx_sc_stats *sum) 2782 { 2783 struct macsec_dev *macsec = macsec_priv(dev); 2784 int cpu; 2785 2786 /* If h/w offloading is available, propagate to the device */ 2787 if (macsec_is_offloaded(macsec)) { 2788 const struct macsec_ops *ops; 2789 struct macsec_context ctx; 2790 2791 ops = macsec_get_ops(macsec, &ctx); 2792 if (ops) { 2793 ctx.stats.rx_sc_stats = sum; 2794 ctx.secy = &macsec_priv(dev)->secy; 2795 ctx.rx_sc = rx_sc; 2796 macsec_offload(ops->mdo_get_rx_sc_stats, &ctx); 2797 } 2798 return; 2799 } 2800 2801 for_each_possible_cpu(cpu) { 2802 const struct pcpu_rx_sc_stats *stats; 2803 struct macsec_rx_sc_stats tmp; 2804 unsigned int start; 2805 2806 stats = per_cpu_ptr(rx_sc->stats, cpu); 2807 do { 2808 start = u64_stats_fetch_begin(&stats->syncp); 2809 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2810 } while (u64_stats_fetch_retry(&stats->syncp, start)); 2811 2812 sum->InOctetsValidated += tmp.InOctetsValidated; 2813 sum->InOctetsDecrypted += tmp.InOctetsDecrypted; 2814 sum->InPktsUnchecked += tmp.InPktsUnchecked; 2815 sum->InPktsDelayed += tmp.InPktsDelayed; 2816 sum->InPktsOK += tmp.InPktsOK; 2817 sum->InPktsInvalid += tmp.InPktsInvalid; 2818 sum->InPktsLate += tmp.InPktsLate; 2819 sum->InPktsNotValid += tmp.InPktsNotValid; 2820 sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA; 2821 sum->InPktsUnusedSA += tmp.InPktsUnusedSA; 2822 } 2823 } 2824 2825 static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum) 2826 { 2827 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, 2828 sum->InOctetsValidated, 2829 MACSEC_RXSC_STATS_ATTR_PAD) || 2830 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, 2831 sum->InOctetsDecrypted, 2832 MACSEC_RXSC_STATS_ATTR_PAD) || 2833 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, 2834 sum->InPktsUnchecked, 2835 MACSEC_RXSC_STATS_ATTR_PAD) || 2836 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, 2837 sum->InPktsDelayed, 2838 MACSEC_RXSC_STATS_ATTR_PAD) || 2839 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, 2840 sum->InPktsOK, 2841 MACSEC_RXSC_STATS_ATTR_PAD) || 2842 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, 2843 sum->InPktsInvalid, 2844 MACSEC_RXSC_STATS_ATTR_PAD) || 2845 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, 2846 sum->InPktsLate, 2847 MACSEC_RXSC_STATS_ATTR_PAD) || 2848 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, 2849 sum->InPktsNotValid, 2850 MACSEC_RXSC_STATS_ATTR_PAD) || 2851 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2852 sum->InPktsNotUsingSA, 2853 MACSEC_RXSC_STATS_ATTR_PAD) || 2854 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, 2855 sum->InPktsUnusedSA, 2856 MACSEC_RXSC_STATS_ATTR_PAD)) 2857 return -EMSGSIZE; 2858 2859 return 0; 2860 } 2861 2862 static void get_tx_sc_stats(struct net_device *dev, 2863 struct macsec_tx_sc_stats *sum) 2864 { 2865 struct macsec_dev *macsec = macsec_priv(dev); 2866 int cpu; 2867 2868 /* If h/w offloading is available, propagate to the device */ 2869 if (macsec_is_offloaded(macsec)) { 2870 const struct macsec_ops *ops; 2871 struct macsec_context ctx; 2872 2873 ops = macsec_get_ops(macsec, &ctx); 2874 if (ops) { 2875 ctx.stats.tx_sc_stats = sum; 2876 ctx.secy = &macsec_priv(dev)->secy; 2877 macsec_offload(ops->mdo_get_tx_sc_stats, &ctx); 2878 } 2879 return; 2880 } 2881 2882 for_each_possible_cpu(cpu) { 2883 const struct pcpu_tx_sc_stats *stats; 2884 struct macsec_tx_sc_stats tmp; 2885 unsigned int start; 2886 2887 stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu); 2888 do { 2889 start = u64_stats_fetch_begin(&stats->syncp); 2890 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2891 } while (u64_stats_fetch_retry(&stats->syncp, start)); 2892 2893 sum->OutPktsProtected += tmp.OutPktsProtected; 2894 sum->OutPktsEncrypted += tmp.OutPktsEncrypted; 2895 sum->OutOctetsProtected += tmp.OutOctetsProtected; 2896 sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted; 2897 } 2898 } 2899 2900 static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum) 2901 { 2902 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, 2903 sum->OutPktsProtected, 2904 MACSEC_TXSC_STATS_ATTR_PAD) || 2905 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2906 sum->OutPktsEncrypted, 2907 MACSEC_TXSC_STATS_ATTR_PAD) || 2908 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, 2909 sum->OutOctetsProtected, 2910 MACSEC_TXSC_STATS_ATTR_PAD) || 2911 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, 2912 sum->OutOctetsEncrypted, 2913 MACSEC_TXSC_STATS_ATTR_PAD)) 2914 return -EMSGSIZE; 2915 2916 return 0; 2917 } 2918 2919 static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum) 2920 { 2921 struct macsec_dev *macsec = macsec_priv(dev); 2922 int cpu; 2923 2924 /* If h/w offloading is available, propagate to the device */ 2925 if (macsec_is_offloaded(macsec)) { 2926 const struct macsec_ops *ops; 2927 struct macsec_context ctx; 2928 2929 ops = macsec_get_ops(macsec, &ctx); 2930 if (ops) { 2931 ctx.stats.dev_stats = sum; 2932 ctx.secy = &macsec_priv(dev)->secy; 2933 macsec_offload(ops->mdo_get_dev_stats, &ctx); 2934 } 2935 return; 2936 } 2937 2938 for_each_possible_cpu(cpu) { 2939 const struct pcpu_secy_stats *stats; 2940 struct macsec_dev_stats tmp; 2941 unsigned int start; 2942 2943 stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu); 2944 do { 2945 start = u64_stats_fetch_begin(&stats->syncp); 2946 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2947 } while (u64_stats_fetch_retry(&stats->syncp, start)); 2948 2949 sum->OutPktsUntagged += tmp.OutPktsUntagged; 2950 sum->InPktsUntagged += tmp.InPktsUntagged; 2951 sum->OutPktsTooLong += tmp.OutPktsTooLong; 2952 sum->InPktsNoTag += tmp.InPktsNoTag; 2953 sum->InPktsBadTag += tmp.InPktsBadTag; 2954 sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI; 2955 sum->InPktsNoSCI += tmp.InPktsNoSCI; 2956 sum->InPktsOverrun += tmp.InPktsOverrun; 2957 } 2958 } 2959 2960 static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum) 2961 { 2962 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, 2963 sum->OutPktsUntagged, 2964 MACSEC_SECY_STATS_ATTR_PAD) || 2965 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, 2966 sum->InPktsUntagged, 2967 MACSEC_SECY_STATS_ATTR_PAD) || 2968 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, 2969 sum->OutPktsTooLong, 2970 MACSEC_SECY_STATS_ATTR_PAD) || 2971 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, 2972 sum->InPktsNoTag, 2973 MACSEC_SECY_STATS_ATTR_PAD) || 2974 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, 2975 sum->InPktsBadTag, 2976 MACSEC_SECY_STATS_ATTR_PAD) || 2977 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, 2978 sum->InPktsUnknownSCI, 2979 MACSEC_SECY_STATS_ATTR_PAD) || 2980 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, 2981 sum->InPktsNoSCI, 2982 MACSEC_SECY_STATS_ATTR_PAD) || 2983 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, 2984 sum->InPktsOverrun, 2985 MACSEC_SECY_STATS_ATTR_PAD)) 2986 return -EMSGSIZE; 2987 2988 return 0; 2989 } 2990 2991 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) 2992 { 2993 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2994 struct nlattr *secy_nest = nla_nest_start_noflag(skb, 2995 MACSEC_ATTR_SECY); 2996 u64 csid; 2997 2998 if (!secy_nest) 2999 return 1; 3000 3001 switch (secy->key_len) { 3002 case MACSEC_GCM_AES_128_SAK_LEN: 3003 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; 3004 break; 3005 case MACSEC_GCM_AES_256_SAK_LEN: 3006 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; 3007 break; 3008 default: 3009 goto cancel; 3010 } 3011 3012 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, 3013 MACSEC_SECY_ATTR_PAD) || 3014 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, 3015 csid, MACSEC_SECY_ATTR_PAD) || 3016 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 3017 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 3018 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 3019 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) || 3020 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) || 3021 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) || 3022 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) || 3023 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) || 3024 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) || 3025 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa)) 3026 goto cancel; 3027 3028 if (secy->replay_protect) { 3029 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window)) 3030 goto cancel; 3031 } 3032 3033 nla_nest_end(skb, secy_nest); 3034 return 0; 3035 3036 cancel: 3037 nla_nest_cancel(skb, secy_nest); 3038 return 1; 3039 } 3040 3041 static noinline_for_stack int 3042 dump_secy(struct macsec_secy *secy, struct net_device *dev, 3043 struct sk_buff *skb, struct netlink_callback *cb) 3044 { 3045 struct macsec_tx_sc_stats tx_sc_stats = {0, }; 3046 struct macsec_tx_sa_stats tx_sa_stats = {0, }; 3047 struct macsec_rx_sc_stats rx_sc_stats = {0, }; 3048 struct macsec_rx_sa_stats rx_sa_stats = {0, }; 3049 struct macsec_dev *macsec = netdev_priv(dev); 3050 struct macsec_dev_stats dev_stats = {0, }; 3051 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 3052 struct nlattr *txsa_list, *rxsc_list; 3053 struct macsec_rx_sc *rx_sc; 3054 struct nlattr *attr; 3055 void *hdr; 3056 int i, j; 3057 3058 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3059 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); 3060 if (!hdr) 3061 return -EMSGSIZE; 3062 3063 genl_dump_check_consistent(cb, hdr); 3064 3065 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 3066 goto nla_put_failure; 3067 3068 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD); 3069 if (!attr) 3070 goto nla_put_failure; 3071 if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload)) 3072 goto nla_put_failure; 3073 nla_nest_end(skb, attr); 3074 3075 if (nla_put_secy(secy, skb)) 3076 goto nla_put_failure; 3077 3078 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS); 3079 if (!attr) 3080 goto nla_put_failure; 3081 3082 get_tx_sc_stats(dev, &tx_sc_stats); 3083 if (copy_tx_sc_stats(skb, &tx_sc_stats)) { 3084 nla_nest_cancel(skb, attr); 3085 goto nla_put_failure; 3086 } 3087 nla_nest_end(skb, attr); 3088 3089 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS); 3090 if (!attr) 3091 goto nla_put_failure; 3092 get_secy_stats(dev, &dev_stats); 3093 if (copy_secy_stats(skb, &dev_stats)) { 3094 nla_nest_cancel(skb, attr); 3095 goto nla_put_failure; 3096 } 3097 nla_nest_end(skb, attr); 3098 3099 txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST); 3100 if (!txsa_list) 3101 goto nla_put_failure; 3102 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { 3103 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); 3104 struct nlattr *txsa_nest; 3105 u64 pn; 3106 int pn_len; 3107 3108 if (!tx_sa) 3109 continue; 3110 3111 txsa_nest = nla_nest_start_noflag(skb, j++); 3112 if (!txsa_nest) { 3113 nla_nest_cancel(skb, txsa_list); 3114 goto nla_put_failure; 3115 } 3116 3117 attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS); 3118 if (!attr) { 3119 nla_nest_cancel(skb, txsa_nest); 3120 nla_nest_cancel(skb, txsa_list); 3121 goto nla_put_failure; 3122 } 3123 memset(&tx_sa_stats, 0, sizeof(tx_sa_stats)); 3124 get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats); 3125 if (copy_tx_sa_stats(skb, &tx_sa_stats)) { 3126 nla_nest_cancel(skb, attr); 3127 nla_nest_cancel(skb, txsa_nest); 3128 nla_nest_cancel(skb, txsa_list); 3129 goto nla_put_failure; 3130 } 3131 nla_nest_end(skb, attr); 3132 3133 if (secy->xpn) { 3134 pn = tx_sa->next_pn; 3135 pn_len = MACSEC_XPN_PN_LEN; 3136 } else { 3137 pn = tx_sa->next_pn_halves.lower; 3138 pn_len = MACSEC_DEFAULT_PN_LEN; 3139 } 3140 3141 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 3142 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || 3143 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || 3144 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) || 3145 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { 3146 nla_nest_cancel(skb, txsa_nest); 3147 nla_nest_cancel(skb, txsa_list); 3148 goto nla_put_failure; 3149 } 3150 3151 nla_nest_end(skb, txsa_nest); 3152 } 3153 nla_nest_end(skb, txsa_list); 3154 3155 rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST); 3156 if (!rxsc_list) 3157 goto nla_put_failure; 3158 3159 j = 1; 3160 for_each_rxsc_rtnl(secy, rx_sc) { 3161 int k; 3162 struct nlattr *rxsa_list; 3163 struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++); 3164 3165 if (!rxsc_nest) { 3166 nla_nest_cancel(skb, rxsc_list); 3167 goto nla_put_failure; 3168 } 3169 3170 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || 3171 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci, 3172 MACSEC_RXSC_ATTR_PAD)) { 3173 nla_nest_cancel(skb, rxsc_nest); 3174 nla_nest_cancel(skb, rxsc_list); 3175 goto nla_put_failure; 3176 } 3177 3178 attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS); 3179 if (!attr) { 3180 nla_nest_cancel(skb, rxsc_nest); 3181 nla_nest_cancel(skb, rxsc_list); 3182 goto nla_put_failure; 3183 } 3184 memset(&rx_sc_stats, 0, sizeof(rx_sc_stats)); 3185 get_rx_sc_stats(dev, rx_sc, &rx_sc_stats); 3186 if (copy_rx_sc_stats(skb, &rx_sc_stats)) { 3187 nla_nest_cancel(skb, attr); 3188 nla_nest_cancel(skb, rxsc_nest); 3189 nla_nest_cancel(skb, rxsc_list); 3190 goto nla_put_failure; 3191 } 3192 nla_nest_end(skb, attr); 3193 3194 rxsa_list = nla_nest_start_noflag(skb, 3195 MACSEC_RXSC_ATTR_SA_LIST); 3196 if (!rxsa_list) { 3197 nla_nest_cancel(skb, rxsc_nest); 3198 nla_nest_cancel(skb, rxsc_list); 3199 goto nla_put_failure; 3200 } 3201 3202 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { 3203 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); 3204 struct nlattr *rxsa_nest; 3205 u64 pn; 3206 int pn_len; 3207 3208 if (!rx_sa) 3209 continue; 3210 3211 rxsa_nest = nla_nest_start_noflag(skb, k++); 3212 if (!rxsa_nest) { 3213 nla_nest_cancel(skb, rxsa_list); 3214 nla_nest_cancel(skb, rxsc_nest); 3215 nla_nest_cancel(skb, rxsc_list); 3216 goto nla_put_failure; 3217 } 3218 3219 attr = nla_nest_start_noflag(skb, 3220 MACSEC_SA_ATTR_STATS); 3221 if (!attr) { 3222 nla_nest_cancel(skb, rxsa_list); 3223 nla_nest_cancel(skb, rxsc_nest); 3224 nla_nest_cancel(skb, rxsc_list); 3225 goto nla_put_failure; 3226 } 3227 memset(&rx_sa_stats, 0, sizeof(rx_sa_stats)); 3228 get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats); 3229 if (copy_rx_sa_stats(skb, &rx_sa_stats)) { 3230 nla_nest_cancel(skb, attr); 3231 nla_nest_cancel(skb, rxsa_list); 3232 nla_nest_cancel(skb, rxsc_nest); 3233 nla_nest_cancel(skb, rxsc_list); 3234 goto nla_put_failure; 3235 } 3236 nla_nest_end(skb, attr); 3237 3238 if (secy->xpn) { 3239 pn = rx_sa->next_pn; 3240 pn_len = MACSEC_XPN_PN_LEN; 3241 } else { 3242 pn = rx_sa->next_pn_halves.lower; 3243 pn_len = MACSEC_DEFAULT_PN_LEN; 3244 } 3245 3246 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 3247 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || 3248 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || 3249 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) || 3250 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { 3251 nla_nest_cancel(skb, rxsa_nest); 3252 nla_nest_cancel(skb, rxsc_nest); 3253 nla_nest_cancel(skb, rxsc_list); 3254 goto nla_put_failure; 3255 } 3256 nla_nest_end(skb, rxsa_nest); 3257 } 3258 3259 nla_nest_end(skb, rxsa_list); 3260 nla_nest_end(skb, rxsc_nest); 3261 } 3262 3263 nla_nest_end(skb, rxsc_list); 3264 3265 genlmsg_end(skb, hdr); 3266 3267 return 0; 3268 3269 nla_put_failure: 3270 genlmsg_cancel(skb, hdr); 3271 return -EMSGSIZE; 3272 } 3273 3274 static int macsec_generation = 1; /* protected by RTNL */ 3275 3276 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 3277 { 3278 struct net *net = sock_net(skb->sk); 3279 struct net_device *dev; 3280 int dev_idx, d; 3281 3282 dev_idx = cb->args[0]; 3283 3284 d = 0; 3285 rtnl_lock(); 3286 3287 cb->seq = macsec_generation; 3288 3289 for_each_netdev(net, dev) { 3290 struct macsec_secy *secy; 3291 3292 if (d < dev_idx) 3293 goto next; 3294 3295 if (!netif_is_macsec(dev)) 3296 goto next; 3297 3298 secy = &macsec_priv(dev)->secy; 3299 if (dump_secy(secy, dev, skb, cb) < 0) 3300 goto done; 3301 next: 3302 d++; 3303 } 3304 3305 done: 3306 rtnl_unlock(); 3307 cb->args[0] = d; 3308 return skb->len; 3309 } 3310 3311 static const struct genl_small_ops macsec_genl_ops[] = { 3312 { 3313 .cmd = MACSEC_CMD_GET_TXSC, 3314 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3315 .dumpit = macsec_dump_txsc, 3316 }, 3317 { 3318 .cmd = MACSEC_CMD_ADD_RXSC, 3319 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3320 .doit = macsec_add_rxsc, 3321 .flags = GENL_ADMIN_PERM, 3322 }, 3323 { 3324 .cmd = MACSEC_CMD_DEL_RXSC, 3325 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3326 .doit = macsec_del_rxsc, 3327 .flags = GENL_ADMIN_PERM, 3328 }, 3329 { 3330 .cmd = MACSEC_CMD_UPD_RXSC, 3331 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3332 .doit = macsec_upd_rxsc, 3333 .flags = GENL_ADMIN_PERM, 3334 }, 3335 { 3336 .cmd = MACSEC_CMD_ADD_TXSA, 3337 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3338 .doit = macsec_add_txsa, 3339 .flags = GENL_ADMIN_PERM, 3340 }, 3341 { 3342 .cmd = MACSEC_CMD_DEL_TXSA, 3343 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3344 .doit = macsec_del_txsa, 3345 .flags = GENL_ADMIN_PERM, 3346 }, 3347 { 3348 .cmd = MACSEC_CMD_UPD_TXSA, 3349 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3350 .doit = macsec_upd_txsa, 3351 .flags = GENL_ADMIN_PERM, 3352 }, 3353 { 3354 .cmd = MACSEC_CMD_ADD_RXSA, 3355 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3356 .doit = macsec_add_rxsa, 3357 .flags = GENL_ADMIN_PERM, 3358 }, 3359 { 3360 .cmd = MACSEC_CMD_DEL_RXSA, 3361 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3362 .doit = macsec_del_rxsa, 3363 .flags = GENL_ADMIN_PERM, 3364 }, 3365 { 3366 .cmd = MACSEC_CMD_UPD_RXSA, 3367 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3368 .doit = macsec_upd_rxsa, 3369 .flags = GENL_ADMIN_PERM, 3370 }, 3371 { 3372 .cmd = MACSEC_CMD_UPD_OFFLOAD, 3373 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3374 .doit = macsec_upd_offload, 3375 .flags = GENL_ADMIN_PERM, 3376 }, 3377 }; 3378 3379 static struct genl_family macsec_fam __ro_after_init = { 3380 .name = MACSEC_GENL_NAME, 3381 .hdrsize = 0, 3382 .version = MACSEC_GENL_VERSION, 3383 .maxattr = MACSEC_ATTR_MAX, 3384 .policy = macsec_genl_policy, 3385 .netnsok = true, 3386 .module = THIS_MODULE, 3387 .small_ops = macsec_genl_ops, 3388 .n_small_ops = ARRAY_SIZE(macsec_genl_ops), 3389 .resv_start_op = MACSEC_CMD_UPD_OFFLOAD + 1, 3390 }; 3391 3392 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, 3393 struct net_device *dev) 3394 { 3395 struct macsec_dev *macsec = netdev_priv(dev); 3396 struct macsec_secy *secy = &macsec->secy; 3397 struct pcpu_secy_stats *secy_stats; 3398 int ret, len; 3399 3400 if (macsec_is_offloaded(netdev_priv(dev))) { 3401 struct metadata_dst *md_dst = secy->tx_sc.md_dst; 3402 3403 skb_dst_drop(skb); 3404 dst_hold(&md_dst->dst); 3405 skb_dst_set(skb, &md_dst->dst); 3406 skb->dev = macsec->real_dev; 3407 return dev_queue_xmit(skb); 3408 } 3409 3410 /* 10.5 */ 3411 if (!secy->protect_frames) { 3412 secy_stats = this_cpu_ptr(macsec->stats); 3413 u64_stats_update_begin(&secy_stats->syncp); 3414 secy_stats->stats.OutPktsUntagged++; 3415 u64_stats_update_end(&secy_stats->syncp); 3416 skb->dev = macsec->real_dev; 3417 len = skb->len; 3418 ret = dev_queue_xmit(skb); 3419 count_tx(dev, ret, len); 3420 return ret; 3421 } 3422 3423 if (!secy->operational) { 3424 kfree_skb(skb); 3425 dev->stats.tx_dropped++; 3426 return NETDEV_TX_OK; 3427 } 3428 3429 len = skb->len; 3430 skb = macsec_encrypt(skb, dev); 3431 if (IS_ERR(skb)) { 3432 if (PTR_ERR(skb) != -EINPROGRESS) 3433 dev->stats.tx_dropped++; 3434 return NETDEV_TX_OK; 3435 } 3436 3437 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 3438 3439 macsec_encrypt_finish(skb, dev); 3440 ret = dev_queue_xmit(skb); 3441 count_tx(dev, ret, len); 3442 return ret; 3443 } 3444 3445 #define MACSEC_FEATURES \ 3446 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 3447 3448 static int macsec_dev_init(struct net_device *dev) 3449 { 3450 struct macsec_dev *macsec = macsec_priv(dev); 3451 struct net_device *real_dev = macsec->real_dev; 3452 int err; 3453 3454 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 3455 if (!dev->tstats) 3456 return -ENOMEM; 3457 3458 err = gro_cells_init(&macsec->gro_cells, dev); 3459 if (err) { 3460 free_percpu(dev->tstats); 3461 return err; 3462 } 3463 3464 dev->features = real_dev->features & MACSEC_FEATURES; 3465 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; 3466 3467 dev->needed_headroom = real_dev->needed_headroom + 3468 MACSEC_NEEDED_HEADROOM; 3469 dev->needed_tailroom = real_dev->needed_tailroom + 3470 MACSEC_NEEDED_TAILROOM; 3471 3472 if (is_zero_ether_addr(dev->dev_addr)) 3473 eth_hw_addr_inherit(dev, real_dev); 3474 if (is_zero_ether_addr(dev->broadcast)) 3475 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 3476 3477 /* Get macsec's reference to real_dev */ 3478 netdev_hold(real_dev, &macsec->dev_tracker, GFP_KERNEL); 3479 3480 return 0; 3481 } 3482 3483 static void macsec_dev_uninit(struct net_device *dev) 3484 { 3485 struct macsec_dev *macsec = macsec_priv(dev); 3486 3487 gro_cells_destroy(&macsec->gro_cells); 3488 free_percpu(dev->tstats); 3489 } 3490 3491 static netdev_features_t macsec_fix_features(struct net_device *dev, 3492 netdev_features_t features) 3493 { 3494 struct macsec_dev *macsec = macsec_priv(dev); 3495 struct net_device *real_dev = macsec->real_dev; 3496 3497 features &= (real_dev->features & MACSEC_FEATURES) | 3498 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; 3499 features |= NETIF_F_LLTX; 3500 3501 return features; 3502 } 3503 3504 static int macsec_dev_open(struct net_device *dev) 3505 { 3506 struct macsec_dev *macsec = macsec_priv(dev); 3507 struct net_device *real_dev = macsec->real_dev; 3508 int err; 3509 3510 err = dev_uc_add(real_dev, dev->dev_addr); 3511 if (err < 0) 3512 return err; 3513 3514 if (dev->flags & IFF_ALLMULTI) { 3515 err = dev_set_allmulti(real_dev, 1); 3516 if (err < 0) 3517 goto del_unicast; 3518 } 3519 3520 if (dev->flags & IFF_PROMISC) { 3521 err = dev_set_promiscuity(real_dev, 1); 3522 if (err < 0) 3523 goto clear_allmulti; 3524 } 3525 3526 /* If h/w offloading is available, propagate to the device */ 3527 if (macsec_is_offloaded(macsec)) { 3528 const struct macsec_ops *ops; 3529 struct macsec_context ctx; 3530 3531 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3532 if (!ops) { 3533 err = -EOPNOTSUPP; 3534 goto clear_allmulti; 3535 } 3536 3537 ctx.secy = &macsec->secy; 3538 err = macsec_offload(ops->mdo_dev_open, &ctx); 3539 if (err) 3540 goto clear_allmulti; 3541 } 3542 3543 if (netif_carrier_ok(real_dev)) 3544 netif_carrier_on(dev); 3545 3546 return 0; 3547 clear_allmulti: 3548 if (dev->flags & IFF_ALLMULTI) 3549 dev_set_allmulti(real_dev, -1); 3550 del_unicast: 3551 dev_uc_del(real_dev, dev->dev_addr); 3552 netif_carrier_off(dev); 3553 return err; 3554 } 3555 3556 static int macsec_dev_stop(struct net_device *dev) 3557 { 3558 struct macsec_dev *macsec = macsec_priv(dev); 3559 struct net_device *real_dev = macsec->real_dev; 3560 3561 netif_carrier_off(dev); 3562 3563 /* If h/w offloading is available, propagate to the device */ 3564 if (macsec_is_offloaded(macsec)) { 3565 const struct macsec_ops *ops; 3566 struct macsec_context ctx; 3567 3568 ops = macsec_get_ops(macsec, &ctx); 3569 if (ops) { 3570 ctx.secy = &macsec->secy; 3571 macsec_offload(ops->mdo_dev_stop, &ctx); 3572 } 3573 } 3574 3575 dev_mc_unsync(real_dev, dev); 3576 dev_uc_unsync(real_dev, dev); 3577 3578 if (dev->flags & IFF_ALLMULTI) 3579 dev_set_allmulti(real_dev, -1); 3580 3581 if (dev->flags & IFF_PROMISC) 3582 dev_set_promiscuity(real_dev, -1); 3583 3584 dev_uc_del(real_dev, dev->dev_addr); 3585 3586 return 0; 3587 } 3588 3589 static void macsec_dev_change_rx_flags(struct net_device *dev, int change) 3590 { 3591 struct net_device *real_dev = macsec_priv(dev)->real_dev; 3592 3593 if (!(dev->flags & IFF_UP)) 3594 return; 3595 3596 if (change & IFF_ALLMULTI) 3597 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 3598 3599 if (change & IFF_PROMISC) 3600 dev_set_promiscuity(real_dev, 3601 dev->flags & IFF_PROMISC ? 1 : -1); 3602 } 3603 3604 static void macsec_dev_set_rx_mode(struct net_device *dev) 3605 { 3606 struct net_device *real_dev = macsec_priv(dev)->real_dev; 3607 3608 dev_mc_sync(real_dev, dev); 3609 dev_uc_sync(real_dev, dev); 3610 } 3611 3612 static int macsec_set_mac_address(struct net_device *dev, void *p) 3613 { 3614 struct macsec_dev *macsec = macsec_priv(dev); 3615 struct net_device *real_dev = macsec->real_dev; 3616 struct sockaddr *addr = p; 3617 int err; 3618 3619 if (!is_valid_ether_addr(addr->sa_data)) 3620 return -EADDRNOTAVAIL; 3621 3622 if (!(dev->flags & IFF_UP)) 3623 goto out; 3624 3625 err = dev_uc_add(real_dev, addr->sa_data); 3626 if (err < 0) 3627 return err; 3628 3629 dev_uc_del(real_dev, dev->dev_addr); 3630 3631 out: 3632 eth_hw_addr_set(dev, addr->sa_data); 3633 3634 /* If h/w offloading is available, propagate to the device */ 3635 if (macsec_is_offloaded(macsec)) { 3636 const struct macsec_ops *ops; 3637 struct macsec_context ctx; 3638 3639 ops = macsec_get_ops(macsec, &ctx); 3640 if (ops) { 3641 ctx.secy = &macsec->secy; 3642 macsec_offload(ops->mdo_upd_secy, &ctx); 3643 } 3644 } 3645 3646 return 0; 3647 } 3648 3649 static int macsec_change_mtu(struct net_device *dev, int new_mtu) 3650 { 3651 struct macsec_dev *macsec = macsec_priv(dev); 3652 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); 3653 3654 if (macsec->real_dev->mtu - extra < new_mtu) 3655 return -ERANGE; 3656 3657 dev->mtu = new_mtu; 3658 3659 return 0; 3660 } 3661 3662 static void macsec_get_stats64(struct net_device *dev, 3663 struct rtnl_link_stats64 *s) 3664 { 3665 if (!dev->tstats) 3666 return; 3667 3668 dev_fetch_sw_netstats(s, dev->tstats); 3669 3670 s->rx_dropped = dev->stats.rx_dropped; 3671 s->tx_dropped = dev->stats.tx_dropped; 3672 s->rx_errors = dev->stats.rx_errors; 3673 } 3674 3675 static int macsec_get_iflink(const struct net_device *dev) 3676 { 3677 return macsec_priv(dev)->real_dev->ifindex; 3678 } 3679 3680 static const struct net_device_ops macsec_netdev_ops = { 3681 .ndo_init = macsec_dev_init, 3682 .ndo_uninit = macsec_dev_uninit, 3683 .ndo_open = macsec_dev_open, 3684 .ndo_stop = macsec_dev_stop, 3685 .ndo_fix_features = macsec_fix_features, 3686 .ndo_change_mtu = macsec_change_mtu, 3687 .ndo_set_rx_mode = macsec_dev_set_rx_mode, 3688 .ndo_change_rx_flags = macsec_dev_change_rx_flags, 3689 .ndo_set_mac_address = macsec_set_mac_address, 3690 .ndo_start_xmit = macsec_start_xmit, 3691 .ndo_get_stats64 = macsec_get_stats64, 3692 .ndo_get_iflink = macsec_get_iflink, 3693 }; 3694 3695 static const struct device_type macsec_type = { 3696 .name = "macsec", 3697 }; 3698 3699 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { 3700 [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, 3701 [IFLA_MACSEC_PORT] = { .type = NLA_U16 }, 3702 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, 3703 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, 3704 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, 3705 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, 3706 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, 3707 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, 3708 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, 3709 [IFLA_MACSEC_ES] = { .type = NLA_U8 }, 3710 [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, 3711 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, 3712 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, 3713 [IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 }, 3714 }; 3715 3716 static void macsec_free_netdev(struct net_device *dev) 3717 { 3718 struct macsec_dev *macsec = macsec_priv(dev); 3719 3720 if (macsec->secy.tx_sc.md_dst) 3721 metadata_dst_free(macsec->secy.tx_sc.md_dst); 3722 free_percpu(macsec->stats); 3723 free_percpu(macsec->secy.tx_sc.stats); 3724 3725 /* Get rid of the macsec's reference to real_dev */ 3726 netdev_put(macsec->real_dev, &macsec->dev_tracker); 3727 } 3728 3729 static void macsec_setup(struct net_device *dev) 3730 { 3731 ether_setup(dev); 3732 dev->min_mtu = 0; 3733 dev->max_mtu = ETH_MAX_MTU; 3734 dev->priv_flags |= IFF_NO_QUEUE; 3735 dev->netdev_ops = &macsec_netdev_ops; 3736 dev->needs_free_netdev = true; 3737 dev->priv_destructor = macsec_free_netdev; 3738 SET_NETDEV_DEVTYPE(dev, &macsec_type); 3739 3740 eth_zero_addr(dev->broadcast); 3741 } 3742 3743 static int macsec_changelink_common(struct net_device *dev, 3744 struct nlattr *data[]) 3745 { 3746 struct macsec_secy *secy; 3747 struct macsec_tx_sc *tx_sc; 3748 3749 secy = &macsec_priv(dev)->secy; 3750 tx_sc = &secy->tx_sc; 3751 3752 if (data[IFLA_MACSEC_ENCODING_SA]) { 3753 struct macsec_tx_sa *tx_sa; 3754 3755 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]); 3756 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]); 3757 3758 secy->operational = tx_sa && tx_sa->active; 3759 } 3760 3761 if (data[IFLA_MACSEC_ENCRYPT]) 3762 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); 3763 3764 if (data[IFLA_MACSEC_PROTECT]) 3765 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]); 3766 3767 if (data[IFLA_MACSEC_INC_SCI]) 3768 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 3769 3770 if (data[IFLA_MACSEC_ES]) 3771 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]); 3772 3773 if (data[IFLA_MACSEC_SCB]) 3774 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]); 3775 3776 if (data[IFLA_MACSEC_REPLAY_PROTECT]) 3777 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]); 3778 3779 if (data[IFLA_MACSEC_VALIDATION]) 3780 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); 3781 3782 if (data[IFLA_MACSEC_CIPHER_SUITE]) { 3783 switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) { 3784 case MACSEC_CIPHER_ID_GCM_AES_128: 3785 case MACSEC_DEFAULT_CIPHER_ID: 3786 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3787 secy->xpn = false; 3788 break; 3789 case MACSEC_CIPHER_ID_GCM_AES_256: 3790 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3791 secy->xpn = false; 3792 break; 3793 case MACSEC_CIPHER_ID_GCM_AES_XPN_128: 3794 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3795 secy->xpn = true; 3796 break; 3797 case MACSEC_CIPHER_ID_GCM_AES_XPN_256: 3798 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3799 secy->xpn = true; 3800 break; 3801 default: 3802 return -EINVAL; 3803 } 3804 } 3805 3806 if (data[IFLA_MACSEC_WINDOW]) { 3807 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); 3808 3809 /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window 3810 * for XPN cipher suites */ 3811 if (secy->xpn && 3812 secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW) 3813 return -EINVAL; 3814 } 3815 3816 return 0; 3817 } 3818 3819 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], 3820 struct nlattr *data[], 3821 struct netlink_ext_ack *extack) 3822 { 3823 struct macsec_dev *macsec = macsec_priv(dev); 3824 bool macsec_offload_state_change = false; 3825 enum macsec_offload offload; 3826 struct macsec_tx_sc tx_sc; 3827 struct macsec_secy secy; 3828 int ret; 3829 3830 if (!data) 3831 return 0; 3832 3833 if (data[IFLA_MACSEC_CIPHER_SUITE] || 3834 data[IFLA_MACSEC_ICV_LEN] || 3835 data[IFLA_MACSEC_SCI] || 3836 data[IFLA_MACSEC_PORT]) 3837 return -EINVAL; 3838 3839 /* Keep a copy of unmodified secy and tx_sc, in case the offload 3840 * propagation fails, to revert macsec_changelink_common. 3841 */ 3842 memcpy(&secy, &macsec->secy, sizeof(secy)); 3843 memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc)); 3844 3845 ret = macsec_changelink_common(dev, data); 3846 if (ret) 3847 goto cleanup; 3848 3849 if (data[IFLA_MACSEC_OFFLOAD]) { 3850 offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]); 3851 if (macsec->offload != offload) { 3852 macsec_offload_state_change = true; 3853 ret = macsec_update_offload(dev, offload); 3854 if (ret) 3855 goto cleanup; 3856 } 3857 } 3858 3859 /* If h/w offloading is available, propagate to the device */ 3860 if (!macsec_offload_state_change && macsec_is_offloaded(macsec)) { 3861 const struct macsec_ops *ops; 3862 struct macsec_context ctx; 3863 3864 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3865 if (!ops) { 3866 ret = -EOPNOTSUPP; 3867 goto cleanup; 3868 } 3869 3870 ctx.secy = &macsec->secy; 3871 ret = macsec_offload(ops->mdo_upd_secy, &ctx); 3872 if (ret) 3873 goto cleanup; 3874 } 3875 3876 return 0; 3877 3878 cleanup: 3879 memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc)); 3880 memcpy(&macsec->secy, &secy, sizeof(secy)); 3881 3882 return ret; 3883 } 3884 3885 static void macsec_del_dev(struct macsec_dev *macsec) 3886 { 3887 int i; 3888 3889 while (macsec->secy.rx_sc) { 3890 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); 3891 3892 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); 3893 free_rx_sc(rx_sc); 3894 } 3895 3896 for (i = 0; i < MACSEC_NUM_AN; i++) { 3897 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); 3898 3899 if (sa) { 3900 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); 3901 clear_tx_sa(sa); 3902 } 3903 } 3904 } 3905 3906 static void macsec_common_dellink(struct net_device *dev, struct list_head *head) 3907 { 3908 struct macsec_dev *macsec = macsec_priv(dev); 3909 struct net_device *real_dev = macsec->real_dev; 3910 3911 /* If h/w offloading is available, propagate to the device */ 3912 if (macsec_is_offloaded(macsec)) { 3913 const struct macsec_ops *ops; 3914 struct macsec_context ctx; 3915 3916 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3917 if (ops) { 3918 ctx.secy = &macsec->secy; 3919 macsec_offload(ops->mdo_del_secy, &ctx); 3920 } 3921 } 3922 3923 unregister_netdevice_queue(dev, head); 3924 list_del_rcu(&macsec->secys); 3925 macsec_del_dev(macsec); 3926 netdev_upper_dev_unlink(real_dev, dev); 3927 3928 macsec_generation++; 3929 } 3930 3931 static void macsec_dellink(struct net_device *dev, struct list_head *head) 3932 { 3933 struct macsec_dev *macsec = macsec_priv(dev); 3934 struct net_device *real_dev = macsec->real_dev; 3935 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3936 3937 macsec_common_dellink(dev, head); 3938 3939 if (list_empty(&rxd->secys)) { 3940 netdev_rx_handler_unregister(real_dev); 3941 kfree(rxd); 3942 } 3943 } 3944 3945 static int register_macsec_dev(struct net_device *real_dev, 3946 struct net_device *dev) 3947 { 3948 struct macsec_dev *macsec = macsec_priv(dev); 3949 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3950 3951 if (!rxd) { 3952 int err; 3953 3954 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL); 3955 if (!rxd) 3956 return -ENOMEM; 3957 3958 INIT_LIST_HEAD(&rxd->secys); 3959 3960 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 3961 rxd); 3962 if (err < 0) { 3963 kfree(rxd); 3964 return err; 3965 } 3966 } 3967 3968 list_add_tail_rcu(&macsec->secys, &rxd->secys); 3969 return 0; 3970 } 3971 3972 static bool sci_exists(struct net_device *dev, sci_t sci) 3973 { 3974 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev); 3975 struct macsec_dev *macsec; 3976 3977 list_for_each_entry(macsec, &rxd->secys, secys) { 3978 if (macsec->secy.sci == sci) 3979 return true; 3980 } 3981 3982 return false; 3983 } 3984 3985 static sci_t dev_to_sci(struct net_device *dev, __be16 port) 3986 { 3987 return make_sci(dev->dev_addr, port); 3988 } 3989 3990 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) 3991 { 3992 struct macsec_dev *macsec = macsec_priv(dev); 3993 struct macsec_secy *secy = &macsec->secy; 3994 3995 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); 3996 if (!macsec->stats) 3997 return -ENOMEM; 3998 3999 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); 4000 if (!secy->tx_sc.stats) 4001 return -ENOMEM; 4002 4003 secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL); 4004 if (!secy->tx_sc.md_dst) 4005 /* macsec and secy percpu stats will be freed when unregistering 4006 * net_device in macsec_free_netdev() 4007 */ 4008 return -ENOMEM; 4009 4010 if (sci == MACSEC_UNDEF_SCI) 4011 sci = dev_to_sci(dev, MACSEC_PORT_ES); 4012 4013 secy->netdev = dev; 4014 secy->operational = true; 4015 secy->key_len = DEFAULT_SAK_LEN; 4016 secy->icv_len = icv_len; 4017 secy->validate_frames = MACSEC_VALIDATE_DEFAULT; 4018 secy->protect_frames = true; 4019 secy->replay_protect = false; 4020 secy->xpn = DEFAULT_XPN; 4021 4022 secy->sci = sci; 4023 secy->tx_sc.md_dst->u.macsec_info.sci = sci; 4024 secy->tx_sc.active = true; 4025 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; 4026 secy->tx_sc.encrypt = DEFAULT_ENCRYPT; 4027 secy->tx_sc.send_sci = DEFAULT_SEND_SCI; 4028 secy->tx_sc.end_station = false; 4029 secy->tx_sc.scb = false; 4030 4031 return 0; 4032 } 4033 4034 static struct lock_class_key macsec_netdev_addr_lock_key; 4035 4036 static int macsec_newlink(struct net *net, struct net_device *dev, 4037 struct nlattr *tb[], struct nlattr *data[], 4038 struct netlink_ext_ack *extack) 4039 { 4040 struct macsec_dev *macsec = macsec_priv(dev); 4041 rx_handler_func_t *rx_handler; 4042 u8 icv_len = MACSEC_DEFAULT_ICV_LEN; 4043 struct net_device *real_dev; 4044 int err, mtu; 4045 sci_t sci; 4046 4047 if (!tb[IFLA_LINK]) 4048 return -EINVAL; 4049 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); 4050 if (!real_dev) 4051 return -ENODEV; 4052 if (real_dev->type != ARPHRD_ETHER) 4053 return -EINVAL; 4054 4055 dev->priv_flags |= IFF_MACSEC; 4056 4057 macsec->real_dev = real_dev; 4058 4059 if (data && data[IFLA_MACSEC_OFFLOAD]) 4060 macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]); 4061 else 4062 /* MACsec offloading is off by default */ 4063 macsec->offload = MACSEC_OFFLOAD_OFF; 4064 4065 /* Check if the offloading mode is supported by the underlying layers */ 4066 if (macsec->offload != MACSEC_OFFLOAD_OFF && 4067 !macsec_check_offload(macsec->offload, macsec)) 4068 return -EOPNOTSUPP; 4069 4070 /* send_sci must be set to true when transmit sci explicitly is set */ 4071 if ((data && data[IFLA_MACSEC_SCI]) && 4072 (data && data[IFLA_MACSEC_INC_SCI])) { 4073 u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 4074 4075 if (!send_sci) 4076 return -EINVAL; 4077 } 4078 4079 if (data && data[IFLA_MACSEC_ICV_LEN]) 4080 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 4081 mtu = real_dev->mtu - icv_len - macsec_extra_len(true); 4082 if (mtu < 0) 4083 dev->mtu = 0; 4084 else 4085 dev->mtu = mtu; 4086 4087 rx_handler = rtnl_dereference(real_dev->rx_handler); 4088 if (rx_handler && rx_handler != macsec_handle_frame) 4089 return -EBUSY; 4090 4091 err = register_netdevice(dev); 4092 if (err < 0) 4093 return err; 4094 4095 netdev_lockdep_set_classes(dev); 4096 lockdep_set_class(&dev->addr_list_lock, 4097 &macsec_netdev_addr_lock_key); 4098 4099 err = netdev_upper_dev_link(real_dev, dev, extack); 4100 if (err < 0) 4101 goto unregister; 4102 4103 /* need to be already registered so that ->init has run and 4104 * the MAC addr is set 4105 */ 4106 if (data && data[IFLA_MACSEC_SCI]) 4107 sci = nla_get_sci(data[IFLA_MACSEC_SCI]); 4108 else if (data && data[IFLA_MACSEC_PORT]) 4109 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT])); 4110 else 4111 sci = dev_to_sci(dev, MACSEC_PORT_ES); 4112 4113 if (rx_handler && sci_exists(real_dev, sci)) { 4114 err = -EBUSY; 4115 goto unlink; 4116 } 4117 4118 err = macsec_add_dev(dev, sci, icv_len); 4119 if (err) 4120 goto unlink; 4121 4122 if (data) { 4123 err = macsec_changelink_common(dev, data); 4124 if (err) 4125 goto del_dev; 4126 } 4127 4128 /* If h/w offloading is available, propagate to the device */ 4129 if (macsec_is_offloaded(macsec)) { 4130 const struct macsec_ops *ops; 4131 struct macsec_context ctx; 4132 4133 ops = macsec_get_ops(macsec, &ctx); 4134 if (ops) { 4135 ctx.secy = &macsec->secy; 4136 err = macsec_offload(ops->mdo_add_secy, &ctx); 4137 if (err) 4138 goto del_dev; 4139 } 4140 } 4141 4142 err = register_macsec_dev(real_dev, dev); 4143 if (err < 0) 4144 goto del_dev; 4145 4146 netif_stacked_transfer_operstate(real_dev, dev); 4147 linkwatch_fire_event(dev); 4148 4149 macsec_generation++; 4150 4151 return 0; 4152 4153 del_dev: 4154 macsec_del_dev(macsec); 4155 unlink: 4156 netdev_upper_dev_unlink(real_dev, dev); 4157 unregister: 4158 unregister_netdevice(dev); 4159 return err; 4160 } 4161 4162 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[], 4163 struct netlink_ext_ack *extack) 4164 { 4165 u64 csid = MACSEC_DEFAULT_CIPHER_ID; 4166 u8 icv_len = MACSEC_DEFAULT_ICV_LEN; 4167 int flag; 4168 bool es, scb, sci; 4169 4170 if (!data) 4171 return 0; 4172 4173 if (data[IFLA_MACSEC_CIPHER_SUITE]) 4174 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); 4175 4176 if (data[IFLA_MACSEC_ICV_LEN]) { 4177 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 4178 if (icv_len != MACSEC_DEFAULT_ICV_LEN) { 4179 char dummy_key[DEFAULT_SAK_LEN] = { 0 }; 4180 struct crypto_aead *dummy_tfm; 4181 4182 dummy_tfm = macsec_alloc_tfm(dummy_key, 4183 DEFAULT_SAK_LEN, 4184 icv_len); 4185 if (IS_ERR(dummy_tfm)) 4186 return PTR_ERR(dummy_tfm); 4187 crypto_free_aead(dummy_tfm); 4188 } 4189 } 4190 4191 switch (csid) { 4192 case MACSEC_CIPHER_ID_GCM_AES_128: 4193 case MACSEC_CIPHER_ID_GCM_AES_256: 4194 case MACSEC_CIPHER_ID_GCM_AES_XPN_128: 4195 case MACSEC_CIPHER_ID_GCM_AES_XPN_256: 4196 case MACSEC_DEFAULT_CIPHER_ID: 4197 if (icv_len < MACSEC_MIN_ICV_LEN || 4198 icv_len > MACSEC_STD_ICV_LEN) 4199 return -EINVAL; 4200 break; 4201 default: 4202 return -EINVAL; 4203 } 4204 4205 if (data[IFLA_MACSEC_ENCODING_SA]) { 4206 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) 4207 return -EINVAL; 4208 } 4209 4210 for (flag = IFLA_MACSEC_ENCODING_SA + 1; 4211 flag < IFLA_MACSEC_VALIDATION; 4212 flag++) { 4213 if (data[flag]) { 4214 if (nla_get_u8(data[flag]) > 1) 4215 return -EINVAL; 4216 } 4217 } 4218 4219 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; 4220 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; 4221 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; 4222 4223 if ((sci && (scb || es)) || (scb && es)) 4224 return -EINVAL; 4225 4226 if (data[IFLA_MACSEC_VALIDATION] && 4227 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 4228 return -EINVAL; 4229 4230 if ((data[IFLA_MACSEC_REPLAY_PROTECT] && 4231 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && 4232 !data[IFLA_MACSEC_WINDOW]) 4233 return -EINVAL; 4234 4235 return 0; 4236 } 4237 4238 static struct net *macsec_get_link_net(const struct net_device *dev) 4239 { 4240 return dev_net(macsec_priv(dev)->real_dev); 4241 } 4242 4243 static size_t macsec_get_size(const struct net_device *dev) 4244 { 4245 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */ 4246 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */ 4247 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */ 4248 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */ 4249 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */ 4250 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */ 4251 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */ 4252 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */ 4253 nla_total_size(1) + /* IFLA_MACSEC_ES */ 4254 nla_total_size(1) + /* IFLA_MACSEC_SCB */ 4255 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */ 4256 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */ 4257 nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */ 4258 0; 4259 } 4260 4261 static int macsec_fill_info(struct sk_buff *skb, 4262 const struct net_device *dev) 4263 { 4264 struct macsec_tx_sc *tx_sc; 4265 struct macsec_dev *macsec; 4266 struct macsec_secy *secy; 4267 u64 csid; 4268 4269 macsec = macsec_priv(dev); 4270 secy = &macsec->secy; 4271 tx_sc = &secy->tx_sc; 4272 4273 switch (secy->key_len) { 4274 case MACSEC_GCM_AES_128_SAK_LEN: 4275 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; 4276 break; 4277 case MACSEC_GCM_AES_256_SAK_LEN: 4278 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; 4279 break; 4280 default: 4281 goto nla_put_failure; 4282 } 4283 4284 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, 4285 IFLA_MACSEC_PAD) || 4286 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 4287 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, 4288 csid, IFLA_MACSEC_PAD) || 4289 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 4290 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 4291 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 4292 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) || 4293 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) || 4294 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || 4295 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || 4296 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || 4297 nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) || 4298 0) 4299 goto nla_put_failure; 4300 4301 if (secy->replay_protect) { 4302 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window)) 4303 goto nla_put_failure; 4304 } 4305 4306 return 0; 4307 4308 nla_put_failure: 4309 return -EMSGSIZE; 4310 } 4311 4312 static struct rtnl_link_ops macsec_link_ops __read_mostly = { 4313 .kind = "macsec", 4314 .priv_size = sizeof(struct macsec_dev), 4315 .maxtype = IFLA_MACSEC_MAX, 4316 .policy = macsec_rtnl_policy, 4317 .setup = macsec_setup, 4318 .validate = macsec_validate_attr, 4319 .newlink = macsec_newlink, 4320 .changelink = macsec_changelink, 4321 .dellink = macsec_dellink, 4322 .get_size = macsec_get_size, 4323 .fill_info = macsec_fill_info, 4324 .get_link_net = macsec_get_link_net, 4325 }; 4326 4327 static bool is_macsec_master(struct net_device *dev) 4328 { 4329 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame; 4330 } 4331 4332 static int macsec_notify(struct notifier_block *this, unsigned long event, 4333 void *ptr) 4334 { 4335 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); 4336 LIST_HEAD(head); 4337 4338 if (!is_macsec_master(real_dev)) 4339 return NOTIFY_DONE; 4340 4341 switch (event) { 4342 case NETDEV_DOWN: 4343 case NETDEV_UP: 4344 case NETDEV_CHANGE: { 4345 struct macsec_dev *m, *n; 4346 struct macsec_rxh_data *rxd; 4347 4348 rxd = macsec_data_rtnl(real_dev); 4349 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 4350 struct net_device *dev = m->secy.netdev; 4351 4352 netif_stacked_transfer_operstate(real_dev, dev); 4353 } 4354 break; 4355 } 4356 case NETDEV_UNREGISTER: { 4357 struct macsec_dev *m, *n; 4358 struct macsec_rxh_data *rxd; 4359 4360 rxd = macsec_data_rtnl(real_dev); 4361 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 4362 macsec_common_dellink(m->secy.netdev, &head); 4363 } 4364 4365 netdev_rx_handler_unregister(real_dev); 4366 kfree(rxd); 4367 4368 unregister_netdevice_many(&head); 4369 break; 4370 } 4371 case NETDEV_CHANGEMTU: { 4372 struct macsec_dev *m; 4373 struct macsec_rxh_data *rxd; 4374 4375 rxd = macsec_data_rtnl(real_dev); 4376 list_for_each_entry(m, &rxd->secys, secys) { 4377 struct net_device *dev = m->secy.netdev; 4378 unsigned int mtu = real_dev->mtu - (m->secy.icv_len + 4379 macsec_extra_len(true)); 4380 4381 if (dev->mtu > mtu) 4382 dev_set_mtu(dev, mtu); 4383 } 4384 } 4385 } 4386 4387 return NOTIFY_OK; 4388 } 4389 4390 static struct notifier_block macsec_notifier = { 4391 .notifier_call = macsec_notify, 4392 }; 4393 4394 static int __init macsec_init(void) 4395 { 4396 int err; 4397 4398 pr_info("MACsec IEEE 802.1AE\n"); 4399 err = register_netdevice_notifier(&macsec_notifier); 4400 if (err) 4401 return err; 4402 4403 err = rtnl_link_register(&macsec_link_ops); 4404 if (err) 4405 goto notifier; 4406 4407 err = genl_register_family(&macsec_fam); 4408 if (err) 4409 goto rtnl; 4410 4411 return 0; 4412 4413 rtnl: 4414 rtnl_link_unregister(&macsec_link_ops); 4415 notifier: 4416 unregister_netdevice_notifier(&macsec_notifier); 4417 return err; 4418 } 4419 4420 static void __exit macsec_exit(void) 4421 { 4422 genl_unregister_family(&macsec_fam); 4423 rtnl_link_unregister(&macsec_link_ops); 4424 unregister_netdevice_notifier(&macsec_notifier); 4425 rcu_barrier(); 4426 } 4427 4428 module_init(macsec_init); 4429 module_exit(macsec_exit); 4430 4431 MODULE_ALIAS_RTNL_LINK("macsec"); 4432 MODULE_ALIAS_GENL_FAMILY("macsec"); 4433 4434 MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); 4435 MODULE_LICENSE("GPL v2"); 4436