1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * drivers/net/macsec.c - MACsec device 4 * 5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> 6 */ 7 8 #include <linux/types.h> 9 #include <linux/skbuff.h> 10 #include <linux/socket.h> 11 #include <linux/module.h> 12 #include <crypto/aead.h> 13 #include <linux/etherdevice.h> 14 #include <linux/netdevice.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/refcount.h> 17 #include <net/genetlink.h> 18 #include <net/sock.h> 19 #include <net/gro_cells.h> 20 #include <net/macsec.h> 21 #include <net/dst_metadata.h> 22 #include <linux/phy.h> 23 #include <linux/byteorder/generic.h> 24 #include <linux/if_arp.h> 25 26 #include <uapi/linux/if_macsec.h> 27 28 /* SecTAG length = macsec_eth_header without the optional SCI */ 29 #define MACSEC_TAG_LEN 6 30 31 struct macsec_eth_header { 32 struct ethhdr eth; 33 /* SecTAG */ 34 u8 tci_an; 35 #if defined(__LITTLE_ENDIAN_BITFIELD) 36 u8 short_length:6, 37 unused:2; 38 #elif defined(__BIG_ENDIAN_BITFIELD) 39 u8 unused:2, 40 short_length:6; 41 #else 42 #error "Please fix <asm/byteorder.h>" 43 #endif 44 __be32 packet_number; 45 u8 secure_channel_id[8]; /* optional */ 46 } __packed; 47 48 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ 49 #define MIN_NON_SHORT_LEN 48 50 51 #define GCM_AES_IV_LEN 12 52 53 #define for_each_rxsc(secy, sc) \ 54 for (sc = rcu_dereference_bh(secy->rx_sc); \ 55 sc; \ 56 sc = rcu_dereference_bh(sc->next)) 57 #define for_each_rxsc_rtnl(secy, sc) \ 58 for (sc = rtnl_dereference(secy->rx_sc); \ 59 sc; \ 60 sc = rtnl_dereference(sc->next)) 61 62 #define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31))) 63 64 struct gcm_iv_xpn { 65 union { 66 u8 short_secure_channel_id[4]; 67 ssci_t ssci; 68 }; 69 __be64 pn; 70 } __packed; 71 72 struct gcm_iv { 73 union { 74 u8 secure_channel_id[8]; 75 sci_t sci; 76 }; 77 __be32 pn; 78 }; 79 80 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT 81 82 struct pcpu_secy_stats { 83 struct macsec_dev_stats stats; 84 struct u64_stats_sync syncp; 85 }; 86 87 /** 88 * struct macsec_dev - private data 89 * @secy: SecY config 90 * @real_dev: pointer to underlying netdevice 91 * @dev_tracker: refcount tracker for @real_dev reference 92 * @stats: MACsec device stats 93 * @secys: linked list of SecY's on the underlying device 94 * @gro_cells: pointer to the Generic Receive Offload cell 95 * @offload: status of offloading on the MACsec device 96 * @insert_tx_tag: when offloading, device requires to insert an 97 * additional tag 98 */ 99 struct macsec_dev { 100 struct macsec_secy secy; 101 struct net_device *real_dev; 102 netdevice_tracker dev_tracker; 103 struct pcpu_secy_stats __percpu *stats; 104 struct list_head secys; 105 struct gro_cells gro_cells; 106 enum macsec_offload offload; 107 bool insert_tx_tag; 108 }; 109 110 /** 111 * struct macsec_rxh_data - rx_handler private argument 112 * @secys: linked list of SecY's on this underlying device 113 */ 114 struct macsec_rxh_data { 115 struct list_head secys; 116 }; 117 118 static struct macsec_dev *macsec_priv(const struct net_device *dev) 119 { 120 return (struct macsec_dev *)netdev_priv(dev); 121 } 122 123 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev) 124 { 125 return rcu_dereference_bh(dev->rx_handler_data); 126 } 127 128 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev) 129 { 130 return rtnl_dereference(dev->rx_handler_data); 131 } 132 133 struct macsec_cb { 134 struct aead_request *req; 135 union { 136 struct macsec_tx_sa *tx_sa; 137 struct macsec_rx_sa *rx_sa; 138 }; 139 u8 assoc_num; 140 bool valid; 141 bool has_sci; 142 }; 143 144 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) 145 { 146 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr); 147 148 if (!sa || !sa->active) 149 return NULL; 150 151 if (!refcount_inc_not_zero(&sa->refcnt)) 152 return NULL; 153 154 return sa; 155 } 156 157 static void free_rx_sc_rcu(struct rcu_head *head) 158 { 159 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); 160 161 free_percpu(rx_sc->stats); 162 kfree(rx_sc); 163 } 164 165 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) 166 { 167 return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL; 168 } 169 170 static void macsec_rxsc_put(struct macsec_rx_sc *sc) 171 { 172 if (refcount_dec_and_test(&sc->refcnt)) 173 call_rcu(&sc->rcu_head, free_rx_sc_rcu); 174 } 175 176 static void free_rxsa(struct rcu_head *head) 177 { 178 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); 179 180 crypto_free_aead(sa->key.tfm); 181 free_percpu(sa->stats); 182 kfree(sa); 183 } 184 185 static void macsec_rxsa_put(struct macsec_rx_sa *sa) 186 { 187 if (refcount_dec_and_test(&sa->refcnt)) 188 call_rcu(&sa->rcu, free_rxsa); 189 } 190 191 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) 192 { 193 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr); 194 195 if (!sa || !sa->active) 196 return NULL; 197 198 if (!refcount_inc_not_zero(&sa->refcnt)) 199 return NULL; 200 201 return sa; 202 } 203 204 static void free_txsa(struct rcu_head *head) 205 { 206 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); 207 208 crypto_free_aead(sa->key.tfm); 209 free_percpu(sa->stats); 210 kfree(sa); 211 } 212 213 static void macsec_txsa_put(struct macsec_tx_sa *sa) 214 { 215 if (refcount_dec_and_test(&sa->refcnt)) 216 call_rcu(&sa->rcu, free_txsa); 217 } 218 219 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) 220 { 221 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb)); 222 return (struct macsec_cb *)skb->cb; 223 } 224 225 #define MACSEC_PORT_SCB (0x0000) 226 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) 227 #define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff) 228 229 #define MACSEC_GCM_AES_128_SAK_LEN 16 230 #define MACSEC_GCM_AES_256_SAK_LEN 32 231 232 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN 233 #define DEFAULT_XPN false 234 #define DEFAULT_SEND_SCI true 235 #define DEFAULT_ENCRYPT false 236 #define DEFAULT_ENCODING_SA 0 237 #define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1)) 238 239 static sci_t make_sci(const u8 *addr, __be16 port) 240 { 241 sci_t sci; 242 243 memcpy(&sci, addr, ETH_ALEN); 244 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port)); 245 246 return sci; 247 } 248 249 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) 250 { 251 sci_t sci; 252 253 if (sci_present) 254 memcpy(&sci, hdr->secure_channel_id, 255 sizeof(hdr->secure_channel_id)); 256 else 257 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); 258 259 return sci; 260 } 261 262 static unsigned int macsec_sectag_len(bool sci_present) 263 { 264 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); 265 } 266 267 static unsigned int macsec_hdr_len(bool sci_present) 268 { 269 return macsec_sectag_len(sci_present) + ETH_HLEN; 270 } 271 272 static unsigned int macsec_extra_len(bool sci_present) 273 { 274 return macsec_sectag_len(sci_present) + sizeof(__be16); 275 } 276 277 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ 278 static void macsec_fill_sectag(struct macsec_eth_header *h, 279 const struct macsec_secy *secy, u32 pn, 280 bool sci_present) 281 { 282 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 283 284 memset(&h->tci_an, 0, macsec_sectag_len(sci_present)); 285 h->eth.h_proto = htons(ETH_P_MACSEC); 286 287 if (sci_present) { 288 h->tci_an |= MACSEC_TCI_SC; 289 memcpy(&h->secure_channel_id, &secy->sci, 290 sizeof(h->secure_channel_id)); 291 } else { 292 if (tx_sc->end_station) 293 h->tci_an |= MACSEC_TCI_ES; 294 if (tx_sc->scb) 295 h->tci_an |= MACSEC_TCI_SCB; 296 } 297 298 h->packet_number = htonl(pn); 299 300 /* with GCM, C/E clear for !encrypt, both set for encrypt */ 301 if (tx_sc->encrypt) 302 h->tci_an |= MACSEC_TCI_CONFID; 303 else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) 304 h->tci_an |= MACSEC_TCI_C; 305 306 h->tci_an |= tx_sc->encoding_sa; 307 } 308 309 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) 310 { 311 if (data_len < MIN_NON_SHORT_LEN) 312 h->short_length = data_len; 313 } 314 315 /* Checks if a MACsec interface is being offloaded to an hardware engine */ 316 static bool macsec_is_offloaded(struct macsec_dev *macsec) 317 { 318 if (macsec->offload == MACSEC_OFFLOAD_MAC || 319 macsec->offload == MACSEC_OFFLOAD_PHY) 320 return true; 321 322 return false; 323 } 324 325 /* Checks if underlying layers implement MACsec offloading functions. */ 326 static bool macsec_check_offload(enum macsec_offload offload, 327 struct macsec_dev *macsec) 328 { 329 if (!macsec || !macsec->real_dev) 330 return false; 331 332 if (offload == MACSEC_OFFLOAD_PHY) 333 return macsec->real_dev->phydev && 334 macsec->real_dev->phydev->macsec_ops; 335 else if (offload == MACSEC_OFFLOAD_MAC) 336 return macsec->real_dev->features & NETIF_F_HW_MACSEC && 337 macsec->real_dev->macsec_ops; 338 339 return false; 340 } 341 342 static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload, 343 struct macsec_dev *macsec, 344 struct macsec_context *ctx) 345 { 346 if (ctx) { 347 memset(ctx, 0, sizeof(*ctx)); 348 ctx->offload = offload; 349 350 if (offload == MACSEC_OFFLOAD_PHY) 351 ctx->phydev = macsec->real_dev->phydev; 352 else if (offload == MACSEC_OFFLOAD_MAC) 353 ctx->netdev = macsec->real_dev; 354 } 355 356 if (offload == MACSEC_OFFLOAD_PHY) 357 return macsec->real_dev->phydev->macsec_ops; 358 else 359 return macsec->real_dev->macsec_ops; 360 } 361 362 /* Returns a pointer to the MACsec ops struct if any and updates the MACsec 363 * context device reference if provided. 364 */ 365 static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec, 366 struct macsec_context *ctx) 367 { 368 if (!macsec_check_offload(macsec->offload, macsec)) 369 return NULL; 370 371 return __macsec_get_ops(macsec->offload, macsec, ctx); 372 } 373 374 /* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */ 375 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn) 376 { 377 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; 378 int len = skb->len - 2 * ETH_ALEN; 379 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; 380 381 /* a) It comprises at least 17 octets */ 382 if (skb->len <= 16) 383 return false; 384 385 /* b) MACsec EtherType: already checked */ 386 387 /* c) V bit is clear */ 388 if (h->tci_an & MACSEC_TCI_VERSION) 389 return false; 390 391 /* d) ES or SCB => !SC */ 392 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && 393 (h->tci_an & MACSEC_TCI_SC)) 394 return false; 395 396 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */ 397 if (h->unused) 398 return false; 399 400 /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */ 401 if (!h->packet_number && !xpn) 402 return false; 403 404 /* length check, f) g) h) i) */ 405 if (h->short_length) 406 return len == extra_len + h->short_length; 407 return len >= extra_len + MIN_NON_SHORT_LEN; 408 } 409 410 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) 411 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN 412 413 static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn, 414 salt_t salt) 415 { 416 struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv; 417 418 gcm_iv->ssci = ssci ^ salt.ssci; 419 gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn; 420 } 421 422 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) 423 { 424 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; 425 426 gcm_iv->sci = sci; 427 gcm_iv->pn = htonl(pn); 428 } 429 430 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) 431 { 432 return (struct macsec_eth_header *)skb_mac_header(skb); 433 } 434 435 static void __macsec_pn_wrapped(struct macsec_secy *secy, 436 struct macsec_tx_sa *tx_sa) 437 { 438 pr_debug("PN wrapped, transitioning to !oper\n"); 439 tx_sa->active = false; 440 if (secy->protect_frames) 441 secy->operational = false; 442 } 443 444 void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa) 445 { 446 spin_lock_bh(&tx_sa->lock); 447 __macsec_pn_wrapped(secy, tx_sa); 448 spin_unlock_bh(&tx_sa->lock); 449 } 450 EXPORT_SYMBOL_GPL(macsec_pn_wrapped); 451 452 static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa, 453 struct macsec_secy *secy) 454 { 455 pn_t pn; 456 457 spin_lock_bh(&tx_sa->lock); 458 459 pn = tx_sa->next_pn_halves; 460 if (secy->xpn) 461 tx_sa->next_pn++; 462 else 463 tx_sa->next_pn_halves.lower++; 464 465 if (tx_sa->next_pn == 0) 466 __macsec_pn_wrapped(secy, tx_sa); 467 spin_unlock_bh(&tx_sa->lock); 468 469 return pn; 470 } 471 472 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) 473 { 474 struct macsec_dev *macsec = netdev_priv(dev); 475 476 skb->dev = macsec->real_dev; 477 skb_reset_mac_header(skb); 478 skb->protocol = eth_hdr(skb)->h_proto; 479 } 480 481 static unsigned int macsec_msdu_len(struct sk_buff *skb) 482 { 483 struct macsec_dev *macsec = macsec_priv(skb->dev); 484 struct macsec_secy *secy = &macsec->secy; 485 bool sci_present = macsec_skb_cb(skb)->has_sci; 486 487 return skb->len - macsec_hdr_len(sci_present) - secy->icv_len; 488 } 489 490 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, 491 struct macsec_tx_sa *tx_sa) 492 { 493 unsigned int msdu_len = macsec_msdu_len(skb); 494 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); 495 496 u64_stats_update_begin(&txsc_stats->syncp); 497 if (tx_sc->encrypt) { 498 txsc_stats->stats.OutOctetsEncrypted += msdu_len; 499 txsc_stats->stats.OutPktsEncrypted++; 500 this_cpu_inc(tx_sa->stats->OutPktsEncrypted); 501 } else { 502 txsc_stats->stats.OutOctetsProtected += msdu_len; 503 txsc_stats->stats.OutPktsProtected++; 504 this_cpu_inc(tx_sa->stats->OutPktsProtected); 505 } 506 u64_stats_update_end(&txsc_stats->syncp); 507 } 508 509 static void count_tx(struct net_device *dev, int ret, int len) 510 { 511 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) 512 dev_sw_netstats_tx_add(dev, 1, len); 513 } 514 515 static void macsec_encrypt_done(void *data, int err) 516 { 517 struct sk_buff *skb = data; 518 struct net_device *dev = skb->dev; 519 struct macsec_dev *macsec = macsec_priv(dev); 520 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; 521 int len, ret; 522 523 aead_request_free(macsec_skb_cb(skb)->req); 524 525 rcu_read_lock_bh(); 526 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 527 /* packet is encrypted/protected so tx_bytes must be calculated */ 528 len = macsec_msdu_len(skb) + 2 * ETH_ALEN; 529 macsec_encrypt_finish(skb, dev); 530 ret = dev_queue_xmit(skb); 531 count_tx(dev, ret, len); 532 rcu_read_unlock_bh(); 533 534 macsec_txsa_put(sa); 535 dev_put(dev); 536 } 537 538 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 539 unsigned char **iv, 540 struct scatterlist **sg, 541 int num_frags) 542 { 543 size_t size, iv_offset, sg_offset; 544 struct aead_request *req; 545 void *tmp; 546 547 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm); 548 iv_offset = size; 549 size += GCM_AES_IV_LEN; 550 551 size = ALIGN(size, __alignof__(struct scatterlist)); 552 sg_offset = size; 553 size += sizeof(struct scatterlist) * num_frags; 554 555 tmp = kmalloc(size, GFP_ATOMIC); 556 if (!tmp) 557 return NULL; 558 559 *iv = (unsigned char *)(tmp + iv_offset); 560 *sg = (struct scatterlist *)(tmp + sg_offset); 561 req = tmp; 562 563 aead_request_set_tfm(req, tfm); 564 565 return req; 566 } 567 568 static struct sk_buff *macsec_encrypt(struct sk_buff *skb, 569 struct net_device *dev) 570 { 571 int ret; 572 struct scatterlist *sg; 573 struct sk_buff *trailer; 574 unsigned char *iv; 575 struct ethhdr *eth; 576 struct macsec_eth_header *hh; 577 size_t unprotected_len; 578 struct aead_request *req; 579 struct macsec_secy *secy; 580 struct macsec_tx_sc *tx_sc; 581 struct macsec_tx_sa *tx_sa; 582 struct macsec_dev *macsec = macsec_priv(dev); 583 bool sci_present; 584 pn_t pn; 585 586 secy = &macsec->secy; 587 tx_sc = &secy->tx_sc; 588 589 /* 10.5.1 TX SA assignment */ 590 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]); 591 if (!tx_sa) { 592 secy->operational = false; 593 kfree_skb(skb); 594 return ERR_PTR(-EINVAL); 595 } 596 597 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || 598 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { 599 struct sk_buff *nskb = skb_copy_expand(skb, 600 MACSEC_NEEDED_HEADROOM, 601 MACSEC_NEEDED_TAILROOM, 602 GFP_ATOMIC); 603 if (likely(nskb)) { 604 consume_skb(skb); 605 skb = nskb; 606 } else { 607 macsec_txsa_put(tx_sa); 608 kfree_skb(skb); 609 return ERR_PTR(-ENOMEM); 610 } 611 } else { 612 skb = skb_unshare(skb, GFP_ATOMIC); 613 if (!skb) { 614 macsec_txsa_put(tx_sa); 615 return ERR_PTR(-ENOMEM); 616 } 617 } 618 619 unprotected_len = skb->len; 620 eth = eth_hdr(skb); 621 sci_present = macsec_send_sci(secy); 622 hh = skb_push(skb, macsec_extra_len(sci_present)); 623 memmove(hh, eth, 2 * ETH_ALEN); 624 625 pn = tx_sa_update_pn(tx_sa, secy); 626 if (pn.full64 == 0) { 627 macsec_txsa_put(tx_sa); 628 kfree_skb(skb); 629 return ERR_PTR(-ENOLINK); 630 } 631 macsec_fill_sectag(hh, secy, pn.lower, sci_present); 632 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); 633 634 skb_put(skb, secy->icv_len); 635 636 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { 637 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 638 639 u64_stats_update_begin(&secy_stats->syncp); 640 secy_stats->stats.OutPktsTooLong++; 641 u64_stats_update_end(&secy_stats->syncp); 642 643 macsec_txsa_put(tx_sa); 644 kfree_skb(skb); 645 return ERR_PTR(-EINVAL); 646 } 647 648 ret = skb_cow_data(skb, 0, &trailer); 649 if (unlikely(ret < 0)) { 650 macsec_txsa_put(tx_sa); 651 kfree_skb(skb); 652 return ERR_PTR(ret); 653 } 654 655 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret); 656 if (!req) { 657 macsec_txsa_put(tx_sa); 658 kfree_skb(skb); 659 return ERR_PTR(-ENOMEM); 660 } 661 662 if (secy->xpn) 663 macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt); 664 else 665 macsec_fill_iv(iv, secy->sci, pn.lower); 666 667 sg_init_table(sg, ret); 668 ret = skb_to_sgvec(skb, sg, 0, skb->len); 669 if (unlikely(ret < 0)) { 670 aead_request_free(req); 671 macsec_txsa_put(tx_sa); 672 kfree_skb(skb); 673 return ERR_PTR(ret); 674 } 675 676 if (tx_sc->encrypt) { 677 int len = skb->len - macsec_hdr_len(sci_present) - 678 secy->icv_len; 679 aead_request_set_crypt(req, sg, sg, len, iv); 680 aead_request_set_ad(req, macsec_hdr_len(sci_present)); 681 } else { 682 aead_request_set_crypt(req, sg, sg, 0, iv); 683 aead_request_set_ad(req, skb->len - secy->icv_len); 684 } 685 686 macsec_skb_cb(skb)->req = req; 687 macsec_skb_cb(skb)->tx_sa = tx_sa; 688 macsec_skb_cb(skb)->has_sci = sci_present; 689 aead_request_set_callback(req, 0, macsec_encrypt_done, skb); 690 691 dev_hold(skb->dev); 692 ret = crypto_aead_encrypt(req); 693 if (ret == -EINPROGRESS) { 694 return ERR_PTR(ret); 695 } else if (ret != 0) { 696 dev_put(skb->dev); 697 kfree_skb(skb); 698 aead_request_free(req); 699 macsec_txsa_put(tx_sa); 700 return ERR_PTR(-EINVAL); 701 } 702 703 dev_put(skb->dev); 704 aead_request_free(req); 705 macsec_txsa_put(tx_sa); 706 707 return skb; 708 } 709 710 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn) 711 { 712 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 713 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats); 714 struct macsec_eth_header *hdr = macsec_ethhdr(skb); 715 u32 lowest_pn = 0; 716 717 spin_lock(&rx_sa->lock); 718 if (rx_sa->next_pn_halves.lower >= secy->replay_window) 719 lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window; 720 721 /* Now perform replay protection check again 722 * (see IEEE 802.1AE-2006 figure 10-5) 723 */ 724 if (secy->replay_protect && pn < lowest_pn && 725 (!secy->xpn || pn_same_half(pn, lowest_pn))) { 726 spin_unlock(&rx_sa->lock); 727 u64_stats_update_begin(&rxsc_stats->syncp); 728 rxsc_stats->stats.InPktsLate++; 729 u64_stats_update_end(&rxsc_stats->syncp); 730 DEV_STATS_INC(secy->netdev, rx_dropped); 731 return false; 732 } 733 734 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { 735 unsigned int msdu_len = macsec_msdu_len(skb); 736 u64_stats_update_begin(&rxsc_stats->syncp); 737 if (hdr->tci_an & MACSEC_TCI_E) 738 rxsc_stats->stats.InOctetsDecrypted += msdu_len; 739 else 740 rxsc_stats->stats.InOctetsValidated += msdu_len; 741 u64_stats_update_end(&rxsc_stats->syncp); 742 } 743 744 if (!macsec_skb_cb(skb)->valid) { 745 spin_unlock(&rx_sa->lock); 746 747 /* 10.6.5 */ 748 if (hdr->tci_an & MACSEC_TCI_C || 749 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 750 u64_stats_update_begin(&rxsc_stats->syncp); 751 rxsc_stats->stats.InPktsNotValid++; 752 u64_stats_update_end(&rxsc_stats->syncp); 753 this_cpu_inc(rx_sa->stats->InPktsNotValid); 754 DEV_STATS_INC(secy->netdev, rx_errors); 755 return false; 756 } 757 758 u64_stats_update_begin(&rxsc_stats->syncp); 759 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) { 760 rxsc_stats->stats.InPktsInvalid++; 761 this_cpu_inc(rx_sa->stats->InPktsInvalid); 762 } else if (pn < lowest_pn) { 763 rxsc_stats->stats.InPktsDelayed++; 764 } else { 765 rxsc_stats->stats.InPktsUnchecked++; 766 } 767 u64_stats_update_end(&rxsc_stats->syncp); 768 } else { 769 u64_stats_update_begin(&rxsc_stats->syncp); 770 if (pn < lowest_pn) { 771 rxsc_stats->stats.InPktsDelayed++; 772 } else { 773 rxsc_stats->stats.InPktsOK++; 774 this_cpu_inc(rx_sa->stats->InPktsOK); 775 } 776 u64_stats_update_end(&rxsc_stats->syncp); 777 778 // Instead of "pn >=" - to support pn overflow in xpn 779 if (pn + 1 > rx_sa->next_pn_halves.lower) { 780 rx_sa->next_pn_halves.lower = pn + 1; 781 } else if (secy->xpn && 782 !pn_same_half(pn, rx_sa->next_pn_halves.lower)) { 783 rx_sa->next_pn_halves.upper++; 784 rx_sa->next_pn_halves.lower = pn + 1; 785 } 786 787 spin_unlock(&rx_sa->lock); 788 } 789 790 return true; 791 } 792 793 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) 794 { 795 skb->pkt_type = PACKET_HOST; 796 skb->protocol = eth_type_trans(skb, dev); 797 798 skb_reset_network_header(skb); 799 if (!skb_transport_header_was_set(skb)) 800 skb_reset_transport_header(skb); 801 skb_reset_mac_len(skb); 802 } 803 804 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) 805 { 806 skb->ip_summed = CHECKSUM_NONE; 807 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); 808 skb_pull(skb, hdr_len); 809 pskb_trim_unique(skb, skb->len - icv_len); 810 } 811 812 static void count_rx(struct net_device *dev, int len) 813 { 814 dev_sw_netstats_rx_add(dev, len); 815 } 816 817 static void macsec_decrypt_done(void *data, int err) 818 { 819 struct sk_buff *skb = data; 820 struct net_device *dev = skb->dev; 821 struct macsec_dev *macsec = macsec_priv(dev); 822 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 823 struct macsec_rx_sc *rx_sc = rx_sa->sc; 824 int len; 825 u32 pn; 826 827 aead_request_free(macsec_skb_cb(skb)->req); 828 829 if (!err) 830 macsec_skb_cb(skb)->valid = true; 831 832 rcu_read_lock_bh(); 833 pn = ntohl(macsec_ethhdr(skb)->packet_number); 834 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { 835 rcu_read_unlock_bh(); 836 kfree_skb(skb); 837 goto out; 838 } 839 840 macsec_finalize_skb(skb, macsec->secy.icv_len, 841 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 842 len = skb->len; 843 macsec_reset_skb(skb, macsec->secy.netdev); 844 845 if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) 846 count_rx(dev, len); 847 848 rcu_read_unlock_bh(); 849 850 out: 851 macsec_rxsa_put(rx_sa); 852 macsec_rxsc_put(rx_sc); 853 dev_put(dev); 854 } 855 856 static struct sk_buff *macsec_decrypt(struct sk_buff *skb, 857 struct net_device *dev, 858 struct macsec_rx_sa *rx_sa, 859 sci_t sci, 860 struct macsec_secy *secy) 861 { 862 int ret; 863 struct scatterlist *sg; 864 struct sk_buff *trailer; 865 unsigned char *iv; 866 struct aead_request *req; 867 struct macsec_eth_header *hdr; 868 u32 hdr_pn; 869 u16 icv_len = secy->icv_len; 870 871 macsec_skb_cb(skb)->valid = false; 872 skb = skb_share_check(skb, GFP_ATOMIC); 873 if (!skb) 874 return ERR_PTR(-ENOMEM); 875 876 ret = skb_cow_data(skb, 0, &trailer); 877 if (unlikely(ret < 0)) { 878 kfree_skb(skb); 879 return ERR_PTR(ret); 880 } 881 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret); 882 if (!req) { 883 kfree_skb(skb); 884 return ERR_PTR(-ENOMEM); 885 } 886 887 hdr = (struct macsec_eth_header *)skb->data; 888 hdr_pn = ntohl(hdr->packet_number); 889 890 if (secy->xpn) { 891 pn_t recovered_pn = rx_sa->next_pn_halves; 892 893 recovered_pn.lower = hdr_pn; 894 if (hdr_pn < rx_sa->next_pn_halves.lower && 895 !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower)) 896 recovered_pn.upper++; 897 898 macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64, 899 rx_sa->key.salt); 900 } else { 901 macsec_fill_iv(iv, sci, hdr_pn); 902 } 903 904 sg_init_table(sg, ret); 905 ret = skb_to_sgvec(skb, sg, 0, skb->len); 906 if (unlikely(ret < 0)) { 907 aead_request_free(req); 908 kfree_skb(skb); 909 return ERR_PTR(ret); 910 } 911 912 if (hdr->tci_an & MACSEC_TCI_E) { 913 /* confidentiality: ethernet + macsec header 914 * authenticated, encrypted payload 915 */ 916 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci); 917 918 aead_request_set_crypt(req, sg, sg, len, iv); 919 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci)); 920 skb = skb_unshare(skb, GFP_ATOMIC); 921 if (!skb) { 922 aead_request_free(req); 923 return ERR_PTR(-ENOMEM); 924 } 925 } else { 926 /* integrity only: all headers + data authenticated */ 927 aead_request_set_crypt(req, sg, sg, icv_len, iv); 928 aead_request_set_ad(req, skb->len - icv_len); 929 } 930 931 macsec_skb_cb(skb)->req = req; 932 skb->dev = dev; 933 aead_request_set_callback(req, 0, macsec_decrypt_done, skb); 934 935 dev_hold(dev); 936 ret = crypto_aead_decrypt(req); 937 if (ret == -EINPROGRESS) { 938 return ERR_PTR(ret); 939 } else if (ret != 0) { 940 /* decryption/authentication failed 941 * 10.6 if validateFrames is disabled, deliver anyway 942 */ 943 if (ret != -EBADMSG) { 944 kfree_skb(skb); 945 skb = ERR_PTR(ret); 946 } 947 } else { 948 macsec_skb_cb(skb)->valid = true; 949 } 950 dev_put(dev); 951 952 aead_request_free(req); 953 954 return skb; 955 } 956 957 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci) 958 { 959 struct macsec_rx_sc *rx_sc; 960 961 for_each_rxsc(secy, rx_sc) { 962 if (rx_sc->sci == sci) 963 return rx_sc; 964 } 965 966 return NULL; 967 } 968 969 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) 970 { 971 struct macsec_rx_sc *rx_sc; 972 973 for_each_rxsc_rtnl(secy, rx_sc) { 974 if (rx_sc->sci == sci) 975 return rx_sc; 976 } 977 978 return NULL; 979 } 980 981 static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) 982 { 983 /* Deliver to the uncontrolled port by default */ 984 enum rx_handler_result ret = RX_HANDLER_PASS; 985 struct ethhdr *hdr = eth_hdr(skb); 986 struct metadata_dst *md_dst; 987 struct macsec_rxh_data *rxd; 988 struct macsec_dev *macsec; 989 bool is_macsec_md_dst; 990 991 rcu_read_lock(); 992 rxd = macsec_data_rcu(skb->dev); 993 md_dst = skb_metadata_dst(skb); 994 is_macsec_md_dst = md_dst && md_dst->type == METADATA_MACSEC; 995 996 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 997 struct sk_buff *nskb; 998 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 999 struct net_device *ndev = macsec->secy.netdev; 1000 1001 /* If h/w offloading is enabled, HW decodes frames and strips 1002 * the SecTAG, so we have to deduce which port to deliver to. 1003 */ 1004 if (macsec_is_offloaded(macsec) && netif_running(ndev)) { 1005 const struct macsec_ops *ops; 1006 1007 ops = macsec_get_ops(macsec, NULL); 1008 1009 if (ops->rx_uses_md_dst && !is_macsec_md_dst) 1010 continue; 1011 1012 if (is_macsec_md_dst) { 1013 struct macsec_rx_sc *rx_sc; 1014 1015 /* All drivers that implement MACsec offload 1016 * support using skb metadata destinations must 1017 * indicate that they do so. 1018 */ 1019 DEBUG_NET_WARN_ON_ONCE(!ops->rx_uses_md_dst); 1020 rx_sc = find_rx_sc(&macsec->secy, 1021 md_dst->u.macsec_info.sci); 1022 if (!rx_sc) 1023 continue; 1024 /* device indicated macsec offload occurred */ 1025 skb->dev = ndev; 1026 skb->pkt_type = PACKET_HOST; 1027 eth_skb_pkt_type(skb, ndev); 1028 ret = RX_HANDLER_ANOTHER; 1029 goto out; 1030 } 1031 1032 /* This datapath is insecure because it is unable to 1033 * enforce isolation of broadcast/multicast traffic and 1034 * unicast traffic with promiscuous mode on the macsec 1035 * netdev. Since the core stack has no mechanism to 1036 * check that the hardware did indeed receive MACsec 1037 * traffic, it is possible that the response handling 1038 * done by the MACsec port was to a plaintext packet. 1039 * This violates the MACsec protocol standard. 1040 */ 1041 if (ether_addr_equal_64bits(hdr->h_dest, 1042 ndev->dev_addr)) { 1043 /* exact match, divert skb to this port */ 1044 skb->dev = ndev; 1045 skb->pkt_type = PACKET_HOST; 1046 ret = RX_HANDLER_ANOTHER; 1047 goto out; 1048 } else if (is_multicast_ether_addr_64bits( 1049 hdr->h_dest)) { 1050 /* multicast frame, deliver on this port too */ 1051 nskb = skb_clone(skb, GFP_ATOMIC); 1052 if (!nskb) 1053 break; 1054 1055 nskb->dev = ndev; 1056 eth_skb_pkt_type(nskb, ndev); 1057 1058 __netif_rx(nskb); 1059 } else if (ndev->flags & IFF_PROMISC) { 1060 skb->dev = ndev; 1061 skb->pkt_type = PACKET_HOST; 1062 ret = RX_HANDLER_ANOTHER; 1063 goto out; 1064 } 1065 1066 continue; 1067 } 1068 1069 /* 10.6 If the management control validateFrames is not 1070 * Strict, frames without a SecTAG are received, counted, and 1071 * delivered to the Controlled Port 1072 */ 1073 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1074 u64_stats_update_begin(&secy_stats->syncp); 1075 secy_stats->stats.InPktsNoTag++; 1076 u64_stats_update_end(&secy_stats->syncp); 1077 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); 1078 continue; 1079 } 1080 1081 /* deliver on this port */ 1082 nskb = skb_clone(skb, GFP_ATOMIC); 1083 if (!nskb) 1084 break; 1085 1086 nskb->dev = ndev; 1087 1088 if (__netif_rx(nskb) == NET_RX_SUCCESS) { 1089 u64_stats_update_begin(&secy_stats->syncp); 1090 secy_stats->stats.InPktsUntagged++; 1091 u64_stats_update_end(&secy_stats->syncp); 1092 } 1093 } 1094 1095 out: 1096 rcu_read_unlock(); 1097 return ret; 1098 } 1099 1100 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) 1101 { 1102 struct sk_buff *skb = *pskb; 1103 struct net_device *dev = skb->dev; 1104 struct macsec_eth_header *hdr; 1105 struct macsec_secy *secy = NULL; 1106 struct macsec_rx_sc *rx_sc; 1107 struct macsec_rx_sa *rx_sa; 1108 struct macsec_rxh_data *rxd; 1109 struct macsec_dev *macsec; 1110 unsigned int len; 1111 sci_t sci; 1112 u32 hdr_pn; 1113 bool cbit; 1114 struct pcpu_rx_sc_stats *rxsc_stats; 1115 struct pcpu_secy_stats *secy_stats; 1116 bool pulled_sci; 1117 int ret; 1118 1119 if (skb_headroom(skb) < ETH_HLEN) 1120 goto drop_direct; 1121 1122 hdr = macsec_ethhdr(skb); 1123 if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) 1124 return handle_not_macsec(skb); 1125 1126 skb = skb_unshare(skb, GFP_ATOMIC); 1127 *pskb = skb; 1128 if (!skb) 1129 return RX_HANDLER_CONSUMED; 1130 1131 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); 1132 if (!pulled_sci) { 1133 if (!pskb_may_pull(skb, macsec_extra_len(false))) 1134 goto drop_direct; 1135 } 1136 1137 hdr = macsec_ethhdr(skb); 1138 1139 /* Frames with a SecTAG that has the TCI E bit set but the C 1140 * bit clear are discarded, as this reserved encoding is used 1141 * to identify frames with a SecTAG that are not to be 1142 * delivered to the Controlled Port. 1143 */ 1144 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E) 1145 return RX_HANDLER_PASS; 1146 1147 /* now, pull the extra length */ 1148 if (hdr->tci_an & MACSEC_TCI_SC) { 1149 if (!pulled_sci) 1150 goto drop_direct; 1151 } 1152 1153 /* ethernet header is part of crypto processing */ 1154 skb_push(skb, ETH_HLEN); 1155 1156 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); 1157 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; 1158 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); 1159 1160 rcu_read_lock(); 1161 rxd = macsec_data_rcu(skb->dev); 1162 1163 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1164 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); 1165 1166 sc = sc ? macsec_rxsc_get(sc) : NULL; 1167 1168 if (sc) { 1169 secy = &macsec->secy; 1170 rx_sc = sc; 1171 break; 1172 } 1173 } 1174 1175 if (!secy) 1176 goto nosci; 1177 1178 dev = secy->netdev; 1179 macsec = macsec_priv(dev); 1180 secy_stats = this_cpu_ptr(macsec->stats); 1181 rxsc_stats = this_cpu_ptr(rx_sc->stats); 1182 1183 if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) { 1184 u64_stats_update_begin(&secy_stats->syncp); 1185 secy_stats->stats.InPktsBadTag++; 1186 u64_stats_update_end(&secy_stats->syncp); 1187 DEV_STATS_INC(secy->netdev, rx_errors); 1188 goto drop_nosa; 1189 } 1190 1191 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]); 1192 if (!rx_sa) { 1193 /* 10.6.1 if the SA is not in use */ 1194 1195 /* If validateFrames is Strict or the C bit in the 1196 * SecTAG is set, discard 1197 */ 1198 if (hdr->tci_an & MACSEC_TCI_C || 1199 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 1200 u64_stats_update_begin(&rxsc_stats->syncp); 1201 rxsc_stats->stats.InPktsNotUsingSA++; 1202 u64_stats_update_end(&rxsc_stats->syncp); 1203 DEV_STATS_INC(secy->netdev, rx_errors); 1204 goto drop_nosa; 1205 } 1206 1207 /* not Strict, the frame (with the SecTAG and ICV 1208 * removed) is delivered to the Controlled Port. 1209 */ 1210 u64_stats_update_begin(&rxsc_stats->syncp); 1211 rxsc_stats->stats.InPktsUnusedSA++; 1212 u64_stats_update_end(&rxsc_stats->syncp); 1213 goto deliver; 1214 } 1215 1216 /* First, PN check to avoid decrypting obviously wrong packets */ 1217 hdr_pn = ntohl(hdr->packet_number); 1218 if (secy->replay_protect) { 1219 bool late; 1220 1221 spin_lock(&rx_sa->lock); 1222 late = rx_sa->next_pn_halves.lower >= secy->replay_window && 1223 hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window); 1224 1225 if (secy->xpn) 1226 late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn); 1227 spin_unlock(&rx_sa->lock); 1228 1229 if (late) { 1230 u64_stats_update_begin(&rxsc_stats->syncp); 1231 rxsc_stats->stats.InPktsLate++; 1232 u64_stats_update_end(&rxsc_stats->syncp); 1233 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); 1234 goto drop; 1235 } 1236 } 1237 1238 macsec_skb_cb(skb)->rx_sa = rx_sa; 1239 1240 /* Disabled && !changed text => skip validation */ 1241 if (hdr->tci_an & MACSEC_TCI_C || 1242 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1243 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1244 1245 if (IS_ERR(skb)) { 1246 /* the decrypt callback needs the reference */ 1247 if (PTR_ERR(skb) != -EINPROGRESS) { 1248 macsec_rxsa_put(rx_sa); 1249 macsec_rxsc_put(rx_sc); 1250 } 1251 rcu_read_unlock(); 1252 *pskb = NULL; 1253 return RX_HANDLER_CONSUMED; 1254 } 1255 1256 if (!macsec_post_decrypt(skb, secy, hdr_pn)) 1257 goto drop; 1258 1259 deliver: 1260 macsec_finalize_skb(skb, secy->icv_len, 1261 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1262 len = skb->len; 1263 macsec_reset_skb(skb, secy->netdev); 1264 1265 if (rx_sa) 1266 macsec_rxsa_put(rx_sa); 1267 macsec_rxsc_put(rx_sc); 1268 1269 skb_orphan(skb); 1270 ret = gro_cells_receive(&macsec->gro_cells, skb); 1271 if (ret == NET_RX_SUCCESS) 1272 count_rx(dev, len); 1273 else 1274 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); 1275 1276 rcu_read_unlock(); 1277 1278 *pskb = NULL; 1279 return RX_HANDLER_CONSUMED; 1280 1281 drop: 1282 macsec_rxsa_put(rx_sa); 1283 drop_nosa: 1284 macsec_rxsc_put(rx_sc); 1285 rcu_read_unlock(); 1286 drop_direct: 1287 kfree_skb(skb); 1288 *pskb = NULL; 1289 return RX_HANDLER_CONSUMED; 1290 1291 nosci: 1292 /* 10.6.1 if the SC is not found */ 1293 cbit = !!(hdr->tci_an & MACSEC_TCI_C); 1294 if (!cbit) 1295 macsec_finalize_skb(skb, MACSEC_DEFAULT_ICV_LEN, 1296 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1297 1298 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1299 struct sk_buff *nskb; 1300 1301 secy_stats = this_cpu_ptr(macsec->stats); 1302 1303 /* If validateFrames is Strict or the C bit in the 1304 * SecTAG is set, discard 1305 */ 1306 if (cbit || 1307 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1308 u64_stats_update_begin(&secy_stats->syncp); 1309 secy_stats->stats.InPktsNoSCI++; 1310 u64_stats_update_end(&secy_stats->syncp); 1311 DEV_STATS_INC(macsec->secy.netdev, rx_errors); 1312 continue; 1313 } 1314 1315 /* not strict, the frame (with the SecTAG and ICV 1316 * removed) is delivered to the Controlled Port. 1317 */ 1318 nskb = skb_clone(skb, GFP_ATOMIC); 1319 if (!nskb) 1320 break; 1321 1322 macsec_reset_skb(nskb, macsec->secy.netdev); 1323 1324 ret = __netif_rx(nskb); 1325 if (ret == NET_RX_SUCCESS) { 1326 u64_stats_update_begin(&secy_stats->syncp); 1327 secy_stats->stats.InPktsUnknownSCI++; 1328 u64_stats_update_end(&secy_stats->syncp); 1329 } else { 1330 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); 1331 } 1332 } 1333 1334 rcu_read_unlock(); 1335 *pskb = skb; 1336 return RX_HANDLER_PASS; 1337 } 1338 1339 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) 1340 { 1341 struct crypto_aead *tfm; 1342 int ret; 1343 1344 tfm = crypto_alloc_aead("gcm(aes)", 0, 0); 1345 1346 if (IS_ERR(tfm)) 1347 return tfm; 1348 1349 ret = crypto_aead_setkey(tfm, key, key_len); 1350 if (ret < 0) 1351 goto fail; 1352 1353 ret = crypto_aead_setauthsize(tfm, icv_len); 1354 if (ret < 0) 1355 goto fail; 1356 1357 return tfm; 1358 fail: 1359 crypto_free_aead(tfm); 1360 return ERR_PTR(ret); 1361 } 1362 1363 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, 1364 int icv_len) 1365 { 1366 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); 1367 if (!rx_sa->stats) 1368 return -ENOMEM; 1369 1370 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1371 if (IS_ERR(rx_sa->key.tfm)) { 1372 free_percpu(rx_sa->stats); 1373 return PTR_ERR(rx_sa->key.tfm); 1374 } 1375 1376 rx_sa->ssci = MACSEC_UNDEF_SSCI; 1377 rx_sa->active = false; 1378 rx_sa->next_pn = 1; 1379 refcount_set(&rx_sa->refcnt, 1); 1380 spin_lock_init(&rx_sa->lock); 1381 1382 return 0; 1383 } 1384 1385 static void clear_rx_sa(struct macsec_rx_sa *rx_sa) 1386 { 1387 rx_sa->active = false; 1388 1389 macsec_rxsa_put(rx_sa); 1390 } 1391 1392 static void free_rx_sc(struct macsec_rx_sc *rx_sc) 1393 { 1394 int i; 1395 1396 for (i = 0; i < MACSEC_NUM_AN; i++) { 1397 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]); 1398 1399 RCU_INIT_POINTER(rx_sc->sa[i], NULL); 1400 if (sa) 1401 clear_rx_sa(sa); 1402 } 1403 1404 macsec_rxsc_put(rx_sc); 1405 } 1406 1407 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) 1408 { 1409 struct macsec_rx_sc *rx_sc, __rcu **rx_scp; 1410 1411 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp); 1412 rx_sc; 1413 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) { 1414 if (rx_sc->sci == sci) { 1415 if (rx_sc->active) 1416 secy->n_rx_sc--; 1417 rcu_assign_pointer(*rx_scp, rx_sc->next); 1418 return rx_sc; 1419 } 1420 } 1421 1422 return NULL; 1423 } 1424 1425 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci, 1426 bool active) 1427 { 1428 struct macsec_rx_sc *rx_sc; 1429 struct macsec_dev *macsec; 1430 struct net_device *real_dev = macsec_priv(dev)->real_dev; 1431 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 1432 struct macsec_secy *secy; 1433 1434 list_for_each_entry(macsec, &rxd->secys, secys) { 1435 if (find_rx_sc_rtnl(&macsec->secy, sci)) 1436 return ERR_PTR(-EEXIST); 1437 } 1438 1439 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); 1440 if (!rx_sc) 1441 return ERR_PTR(-ENOMEM); 1442 1443 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats); 1444 if (!rx_sc->stats) { 1445 kfree(rx_sc); 1446 return ERR_PTR(-ENOMEM); 1447 } 1448 1449 rx_sc->sci = sci; 1450 rx_sc->active = active; 1451 refcount_set(&rx_sc->refcnt, 1); 1452 1453 secy = &macsec_priv(dev)->secy; 1454 rcu_assign_pointer(rx_sc->next, secy->rx_sc); 1455 rcu_assign_pointer(secy->rx_sc, rx_sc); 1456 1457 if (rx_sc->active) 1458 secy->n_rx_sc++; 1459 1460 return rx_sc; 1461 } 1462 1463 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, 1464 int icv_len) 1465 { 1466 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); 1467 if (!tx_sa->stats) 1468 return -ENOMEM; 1469 1470 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1471 if (IS_ERR(tx_sa->key.tfm)) { 1472 free_percpu(tx_sa->stats); 1473 return PTR_ERR(tx_sa->key.tfm); 1474 } 1475 1476 tx_sa->ssci = MACSEC_UNDEF_SSCI; 1477 tx_sa->active = false; 1478 refcount_set(&tx_sa->refcnt, 1); 1479 spin_lock_init(&tx_sa->lock); 1480 1481 return 0; 1482 } 1483 1484 static void clear_tx_sa(struct macsec_tx_sa *tx_sa) 1485 { 1486 tx_sa->active = false; 1487 1488 macsec_txsa_put(tx_sa); 1489 } 1490 1491 static struct genl_family macsec_fam; 1492 1493 static struct net_device *get_dev_from_nl(struct net *net, 1494 struct nlattr **attrs) 1495 { 1496 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]); 1497 struct net_device *dev; 1498 1499 dev = __dev_get_by_index(net, ifindex); 1500 if (!dev) 1501 return ERR_PTR(-ENODEV); 1502 1503 if (!netif_is_macsec(dev)) 1504 return ERR_PTR(-ENODEV); 1505 1506 return dev; 1507 } 1508 1509 static enum macsec_offload nla_get_offload(const struct nlattr *nla) 1510 { 1511 return (__force enum macsec_offload)nla_get_u8(nla); 1512 } 1513 1514 static sci_t nla_get_sci(const struct nlattr *nla) 1515 { 1516 return (__force sci_t)nla_get_u64(nla); 1517 } 1518 1519 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, 1520 int padattr) 1521 { 1522 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); 1523 } 1524 1525 static ssci_t nla_get_ssci(const struct nlattr *nla) 1526 { 1527 return (__force ssci_t)nla_get_u32(nla); 1528 } 1529 1530 static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value) 1531 { 1532 return nla_put_u32(skb, attrtype, (__force u64)value); 1533 } 1534 1535 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, 1536 struct nlattr **attrs, 1537 struct nlattr **tb_sa, 1538 struct net_device **devp, 1539 struct macsec_secy **secyp, 1540 struct macsec_tx_sc **scp, 1541 u8 *assoc_num) 1542 { 1543 struct net_device *dev; 1544 struct macsec_secy *secy; 1545 struct macsec_tx_sc *tx_sc; 1546 struct macsec_tx_sa *tx_sa; 1547 1548 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1549 return ERR_PTR(-EINVAL); 1550 1551 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1552 1553 dev = get_dev_from_nl(net, attrs); 1554 if (IS_ERR(dev)) 1555 return ERR_CAST(dev); 1556 1557 if (*assoc_num >= MACSEC_NUM_AN) 1558 return ERR_PTR(-EINVAL); 1559 1560 secy = &macsec_priv(dev)->secy; 1561 tx_sc = &secy->tx_sc; 1562 1563 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]); 1564 if (!tx_sa) 1565 return ERR_PTR(-ENODEV); 1566 1567 *devp = dev; 1568 *scp = tx_sc; 1569 *secyp = secy; 1570 return tx_sa; 1571 } 1572 1573 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net, 1574 struct nlattr **attrs, 1575 struct nlattr **tb_rxsc, 1576 struct net_device **devp, 1577 struct macsec_secy **secyp) 1578 { 1579 struct net_device *dev; 1580 struct macsec_secy *secy; 1581 struct macsec_rx_sc *rx_sc; 1582 sci_t sci; 1583 1584 dev = get_dev_from_nl(net, attrs); 1585 if (IS_ERR(dev)) 1586 return ERR_CAST(dev); 1587 1588 secy = &macsec_priv(dev)->secy; 1589 1590 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1591 return ERR_PTR(-EINVAL); 1592 1593 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1594 rx_sc = find_rx_sc_rtnl(secy, sci); 1595 if (!rx_sc) 1596 return ERR_PTR(-ENODEV); 1597 1598 *secyp = secy; 1599 *devp = dev; 1600 1601 return rx_sc; 1602 } 1603 1604 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, 1605 struct nlattr **attrs, 1606 struct nlattr **tb_rxsc, 1607 struct nlattr **tb_sa, 1608 struct net_device **devp, 1609 struct macsec_secy **secyp, 1610 struct macsec_rx_sc **scp, 1611 u8 *assoc_num) 1612 { 1613 struct macsec_rx_sc *rx_sc; 1614 struct macsec_rx_sa *rx_sa; 1615 1616 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1617 return ERR_PTR(-EINVAL); 1618 1619 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1620 if (*assoc_num >= MACSEC_NUM_AN) 1621 return ERR_PTR(-EINVAL); 1622 1623 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); 1624 if (IS_ERR(rx_sc)) 1625 return ERR_CAST(rx_sc); 1626 1627 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]); 1628 if (!rx_sa) 1629 return ERR_PTR(-ENODEV); 1630 1631 *scp = rx_sc; 1632 return rx_sa; 1633 } 1634 1635 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { 1636 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, 1637 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, 1638 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, 1639 [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED }, 1640 }; 1641 1642 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { 1643 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, 1644 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, 1645 }; 1646 1647 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { 1648 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, 1649 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, 1650 [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4), 1651 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, 1652 .len = MACSEC_KEYID_LEN, }, 1653 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1654 .len = MACSEC_MAX_KEY_LEN, }, 1655 [MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 }, 1656 [MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY, 1657 .len = MACSEC_SALT_LEN, }, 1658 }; 1659 1660 static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = { 1661 [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 }, 1662 }; 1663 1664 /* Offloads an operation to a device driver */ 1665 static int macsec_offload(int (* const func)(struct macsec_context *), 1666 struct macsec_context *ctx) 1667 { 1668 int ret; 1669 1670 if (unlikely(!func)) 1671 return 0; 1672 1673 if (ctx->offload == MACSEC_OFFLOAD_PHY) 1674 mutex_lock(&ctx->phydev->lock); 1675 1676 ret = (*func)(ctx); 1677 1678 if (ctx->offload == MACSEC_OFFLOAD_PHY) 1679 mutex_unlock(&ctx->phydev->lock); 1680 1681 return ret; 1682 } 1683 1684 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) 1685 { 1686 if (!attrs[MACSEC_ATTR_SA_CONFIG]) 1687 return -EINVAL; 1688 1689 if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL)) 1690 return -EINVAL; 1691 1692 return 0; 1693 } 1694 1695 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) 1696 { 1697 if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) 1698 return -EINVAL; 1699 1700 if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL)) 1701 return -EINVAL; 1702 1703 return 0; 1704 } 1705 1706 static bool validate_add_rxsa(struct nlattr **attrs) 1707 { 1708 if (!attrs[MACSEC_SA_ATTR_AN] || 1709 !attrs[MACSEC_SA_ATTR_KEY] || 1710 !attrs[MACSEC_SA_ATTR_KEYID]) 1711 return false; 1712 1713 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1714 return false; 1715 1716 if (attrs[MACSEC_SA_ATTR_PN] && 1717 nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 1718 return false; 1719 1720 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1721 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1722 return false; 1723 } 1724 1725 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1726 return false; 1727 1728 return true; 1729 } 1730 1731 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) 1732 { 1733 struct net_device *dev; 1734 struct nlattr **attrs = info->attrs; 1735 struct macsec_secy *secy; 1736 struct macsec_rx_sc *rx_sc; 1737 struct macsec_rx_sa *rx_sa; 1738 unsigned char assoc_num; 1739 int pn_len; 1740 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1741 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1742 int err; 1743 1744 if (!attrs[MACSEC_ATTR_IFINDEX]) 1745 return -EINVAL; 1746 1747 if (parse_sa_config(attrs, tb_sa)) 1748 return -EINVAL; 1749 1750 if (parse_rxsc_config(attrs, tb_rxsc)) 1751 return -EINVAL; 1752 1753 if (!validate_add_rxsa(tb_sa)) 1754 return -EINVAL; 1755 1756 rtnl_lock(); 1757 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 1758 if (IS_ERR(rx_sc)) { 1759 rtnl_unlock(); 1760 return PTR_ERR(rx_sc); 1761 } 1762 1763 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1764 1765 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1766 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n", 1767 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1768 rtnl_unlock(); 1769 return -EINVAL; 1770 } 1771 1772 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 1773 if (tb_sa[MACSEC_SA_ATTR_PN] && 1774 nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 1775 pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n", 1776 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 1777 rtnl_unlock(); 1778 return -EINVAL; 1779 } 1780 1781 if (secy->xpn) { 1782 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { 1783 rtnl_unlock(); 1784 return -EINVAL; 1785 } 1786 1787 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { 1788 pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n", 1789 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), 1790 MACSEC_SALT_LEN); 1791 rtnl_unlock(); 1792 return -EINVAL; 1793 } 1794 } 1795 1796 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); 1797 if (rx_sa) { 1798 rtnl_unlock(); 1799 return -EBUSY; 1800 } 1801 1802 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1803 if (!rx_sa) { 1804 rtnl_unlock(); 1805 return -ENOMEM; 1806 } 1807 1808 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1809 secy->key_len, secy->icv_len); 1810 if (err < 0) { 1811 kfree(rx_sa); 1812 rtnl_unlock(); 1813 return err; 1814 } 1815 1816 if (tb_sa[MACSEC_SA_ATTR_PN]) { 1817 spin_lock_bh(&rx_sa->lock); 1818 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 1819 spin_unlock_bh(&rx_sa->lock); 1820 } 1821 1822 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1823 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1824 1825 rx_sa->sc = rx_sc; 1826 1827 if (secy->xpn) { 1828 rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); 1829 nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], 1830 MACSEC_SALT_LEN); 1831 } 1832 1833 /* If h/w offloading is available, propagate to the device */ 1834 if (macsec_is_offloaded(netdev_priv(dev))) { 1835 const struct macsec_ops *ops; 1836 struct macsec_context ctx; 1837 1838 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1839 if (!ops) { 1840 err = -EOPNOTSUPP; 1841 goto cleanup; 1842 } 1843 1844 ctx.sa.assoc_num = assoc_num; 1845 ctx.sa.rx_sa = rx_sa; 1846 ctx.secy = secy; 1847 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1848 secy->key_len); 1849 1850 err = macsec_offload(ops->mdo_add_rxsa, &ctx); 1851 memzero_explicit(ctx.sa.key, secy->key_len); 1852 if (err) 1853 goto cleanup; 1854 } 1855 1856 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1857 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); 1858 1859 rtnl_unlock(); 1860 1861 return 0; 1862 1863 cleanup: 1864 macsec_rxsa_put(rx_sa); 1865 rtnl_unlock(); 1866 return err; 1867 } 1868 1869 static bool validate_add_rxsc(struct nlattr **attrs) 1870 { 1871 if (!attrs[MACSEC_RXSC_ATTR_SCI]) 1872 return false; 1873 1874 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { 1875 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) 1876 return false; 1877 } 1878 1879 return true; 1880 } 1881 1882 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) 1883 { 1884 struct net_device *dev; 1885 sci_t sci = MACSEC_UNDEF_SCI; 1886 struct nlattr **attrs = info->attrs; 1887 struct macsec_rx_sc *rx_sc; 1888 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1889 struct macsec_secy *secy; 1890 bool active = true; 1891 int ret; 1892 1893 if (!attrs[MACSEC_ATTR_IFINDEX]) 1894 return -EINVAL; 1895 1896 if (parse_rxsc_config(attrs, tb_rxsc)) 1897 return -EINVAL; 1898 1899 if (!validate_add_rxsc(tb_rxsc)) 1900 return -EINVAL; 1901 1902 rtnl_lock(); 1903 dev = get_dev_from_nl(genl_info_net(info), attrs); 1904 if (IS_ERR(dev)) { 1905 rtnl_unlock(); 1906 return PTR_ERR(dev); 1907 } 1908 1909 secy = &macsec_priv(dev)->secy; 1910 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1911 1912 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) 1913 active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 1914 1915 rx_sc = create_rx_sc(dev, sci, active); 1916 if (IS_ERR(rx_sc)) { 1917 rtnl_unlock(); 1918 return PTR_ERR(rx_sc); 1919 } 1920 1921 if (macsec_is_offloaded(netdev_priv(dev))) { 1922 const struct macsec_ops *ops; 1923 struct macsec_context ctx; 1924 1925 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1926 if (!ops) { 1927 ret = -EOPNOTSUPP; 1928 goto cleanup; 1929 } 1930 1931 ctx.rx_sc = rx_sc; 1932 ctx.secy = secy; 1933 1934 ret = macsec_offload(ops->mdo_add_rxsc, &ctx); 1935 if (ret) 1936 goto cleanup; 1937 } 1938 1939 rtnl_unlock(); 1940 1941 return 0; 1942 1943 cleanup: 1944 del_rx_sc(secy, sci); 1945 free_rx_sc(rx_sc); 1946 rtnl_unlock(); 1947 return ret; 1948 } 1949 1950 static bool validate_add_txsa(struct nlattr **attrs) 1951 { 1952 if (!attrs[MACSEC_SA_ATTR_AN] || 1953 !attrs[MACSEC_SA_ATTR_PN] || 1954 !attrs[MACSEC_SA_ATTR_KEY] || 1955 !attrs[MACSEC_SA_ATTR_KEYID]) 1956 return false; 1957 1958 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1959 return false; 1960 1961 if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 1962 return false; 1963 1964 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1965 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1966 return false; 1967 } 1968 1969 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1970 return false; 1971 1972 return true; 1973 } 1974 1975 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) 1976 { 1977 struct net_device *dev; 1978 struct nlattr **attrs = info->attrs; 1979 struct macsec_secy *secy; 1980 struct macsec_tx_sc *tx_sc; 1981 struct macsec_tx_sa *tx_sa; 1982 unsigned char assoc_num; 1983 int pn_len; 1984 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1985 bool was_operational; 1986 int err; 1987 1988 if (!attrs[MACSEC_ATTR_IFINDEX]) 1989 return -EINVAL; 1990 1991 if (parse_sa_config(attrs, tb_sa)) 1992 return -EINVAL; 1993 1994 if (!validate_add_txsa(tb_sa)) 1995 return -EINVAL; 1996 1997 rtnl_lock(); 1998 dev = get_dev_from_nl(genl_info_net(info), attrs); 1999 if (IS_ERR(dev)) { 2000 rtnl_unlock(); 2001 return PTR_ERR(dev); 2002 } 2003 2004 secy = &macsec_priv(dev)->secy; 2005 tx_sc = &secy->tx_sc; 2006 2007 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 2008 2009 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 2010 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n", 2011 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 2012 rtnl_unlock(); 2013 return -EINVAL; 2014 } 2015 2016 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2017 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2018 pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n", 2019 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2020 rtnl_unlock(); 2021 return -EINVAL; 2022 } 2023 2024 if (secy->xpn) { 2025 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { 2026 rtnl_unlock(); 2027 return -EINVAL; 2028 } 2029 2030 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { 2031 pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n", 2032 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), 2033 MACSEC_SALT_LEN); 2034 rtnl_unlock(); 2035 return -EINVAL; 2036 } 2037 } 2038 2039 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); 2040 if (tx_sa) { 2041 rtnl_unlock(); 2042 return -EBUSY; 2043 } 2044 2045 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 2046 if (!tx_sa) { 2047 rtnl_unlock(); 2048 return -ENOMEM; 2049 } 2050 2051 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 2052 secy->key_len, secy->icv_len); 2053 if (err < 0) { 2054 kfree(tx_sa); 2055 rtnl_unlock(); 2056 return err; 2057 } 2058 2059 spin_lock_bh(&tx_sa->lock); 2060 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2061 spin_unlock_bh(&tx_sa->lock); 2062 2063 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2064 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2065 2066 was_operational = secy->operational; 2067 if (assoc_num == tx_sc->encoding_sa && tx_sa->active) 2068 secy->operational = true; 2069 2070 if (secy->xpn) { 2071 tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); 2072 nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], 2073 MACSEC_SALT_LEN); 2074 } 2075 2076 /* If h/w offloading is available, propagate to the device */ 2077 if (macsec_is_offloaded(netdev_priv(dev))) { 2078 const struct macsec_ops *ops; 2079 struct macsec_context ctx; 2080 2081 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2082 if (!ops) { 2083 err = -EOPNOTSUPP; 2084 goto cleanup; 2085 } 2086 2087 ctx.sa.assoc_num = assoc_num; 2088 ctx.sa.tx_sa = tx_sa; 2089 ctx.secy = secy; 2090 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 2091 secy->key_len); 2092 2093 err = macsec_offload(ops->mdo_add_txsa, &ctx); 2094 memzero_explicit(ctx.sa.key, secy->key_len); 2095 if (err) 2096 goto cleanup; 2097 } 2098 2099 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 2100 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); 2101 2102 rtnl_unlock(); 2103 2104 return 0; 2105 2106 cleanup: 2107 secy->operational = was_operational; 2108 macsec_txsa_put(tx_sa); 2109 rtnl_unlock(); 2110 return err; 2111 } 2112 2113 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) 2114 { 2115 struct nlattr **attrs = info->attrs; 2116 struct net_device *dev; 2117 struct macsec_secy *secy; 2118 struct macsec_rx_sc *rx_sc; 2119 struct macsec_rx_sa *rx_sa; 2120 u8 assoc_num; 2121 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2122 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2123 int ret; 2124 2125 if (!attrs[MACSEC_ATTR_IFINDEX]) 2126 return -EINVAL; 2127 2128 if (parse_sa_config(attrs, tb_sa)) 2129 return -EINVAL; 2130 2131 if (parse_rxsc_config(attrs, tb_rxsc)) 2132 return -EINVAL; 2133 2134 rtnl_lock(); 2135 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2136 &dev, &secy, &rx_sc, &assoc_num); 2137 if (IS_ERR(rx_sa)) { 2138 rtnl_unlock(); 2139 return PTR_ERR(rx_sa); 2140 } 2141 2142 if (rx_sa->active) { 2143 rtnl_unlock(); 2144 return -EBUSY; 2145 } 2146 2147 /* If h/w offloading is available, propagate to the device */ 2148 if (macsec_is_offloaded(netdev_priv(dev))) { 2149 const struct macsec_ops *ops; 2150 struct macsec_context ctx; 2151 2152 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2153 if (!ops) { 2154 ret = -EOPNOTSUPP; 2155 goto cleanup; 2156 } 2157 2158 ctx.sa.assoc_num = assoc_num; 2159 ctx.sa.rx_sa = rx_sa; 2160 ctx.secy = secy; 2161 2162 ret = macsec_offload(ops->mdo_del_rxsa, &ctx); 2163 if (ret) 2164 goto cleanup; 2165 } 2166 2167 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); 2168 clear_rx_sa(rx_sa); 2169 2170 rtnl_unlock(); 2171 2172 return 0; 2173 2174 cleanup: 2175 rtnl_unlock(); 2176 return ret; 2177 } 2178 2179 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) 2180 { 2181 struct nlattr **attrs = info->attrs; 2182 struct net_device *dev; 2183 struct macsec_secy *secy; 2184 struct macsec_rx_sc *rx_sc; 2185 sci_t sci; 2186 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2187 int ret; 2188 2189 if (!attrs[MACSEC_ATTR_IFINDEX]) 2190 return -EINVAL; 2191 2192 if (parse_rxsc_config(attrs, tb_rxsc)) 2193 return -EINVAL; 2194 2195 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 2196 return -EINVAL; 2197 2198 rtnl_lock(); 2199 dev = get_dev_from_nl(genl_info_net(info), info->attrs); 2200 if (IS_ERR(dev)) { 2201 rtnl_unlock(); 2202 return PTR_ERR(dev); 2203 } 2204 2205 secy = &macsec_priv(dev)->secy; 2206 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 2207 2208 rx_sc = del_rx_sc(secy, sci); 2209 if (!rx_sc) { 2210 rtnl_unlock(); 2211 return -ENODEV; 2212 } 2213 2214 /* If h/w offloading is available, propagate to the device */ 2215 if (macsec_is_offloaded(netdev_priv(dev))) { 2216 const struct macsec_ops *ops; 2217 struct macsec_context ctx; 2218 2219 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2220 if (!ops) { 2221 ret = -EOPNOTSUPP; 2222 goto cleanup; 2223 } 2224 2225 ctx.rx_sc = rx_sc; 2226 ctx.secy = secy; 2227 ret = macsec_offload(ops->mdo_del_rxsc, &ctx); 2228 if (ret) 2229 goto cleanup; 2230 } 2231 2232 free_rx_sc(rx_sc); 2233 rtnl_unlock(); 2234 2235 return 0; 2236 2237 cleanup: 2238 rtnl_unlock(); 2239 return ret; 2240 } 2241 2242 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) 2243 { 2244 struct nlattr **attrs = info->attrs; 2245 struct net_device *dev; 2246 struct macsec_secy *secy; 2247 struct macsec_tx_sc *tx_sc; 2248 struct macsec_tx_sa *tx_sa; 2249 u8 assoc_num; 2250 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2251 int ret; 2252 2253 if (!attrs[MACSEC_ATTR_IFINDEX]) 2254 return -EINVAL; 2255 2256 if (parse_sa_config(attrs, tb_sa)) 2257 return -EINVAL; 2258 2259 rtnl_lock(); 2260 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2261 &dev, &secy, &tx_sc, &assoc_num); 2262 if (IS_ERR(tx_sa)) { 2263 rtnl_unlock(); 2264 return PTR_ERR(tx_sa); 2265 } 2266 2267 if (tx_sa->active) { 2268 rtnl_unlock(); 2269 return -EBUSY; 2270 } 2271 2272 /* If h/w offloading is available, propagate to the device */ 2273 if (macsec_is_offloaded(netdev_priv(dev))) { 2274 const struct macsec_ops *ops; 2275 struct macsec_context ctx; 2276 2277 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2278 if (!ops) { 2279 ret = -EOPNOTSUPP; 2280 goto cleanup; 2281 } 2282 2283 ctx.sa.assoc_num = assoc_num; 2284 ctx.sa.tx_sa = tx_sa; 2285 ctx.secy = secy; 2286 2287 ret = macsec_offload(ops->mdo_del_txsa, &ctx); 2288 if (ret) 2289 goto cleanup; 2290 } 2291 2292 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); 2293 clear_tx_sa(tx_sa); 2294 2295 rtnl_unlock(); 2296 2297 return 0; 2298 2299 cleanup: 2300 rtnl_unlock(); 2301 return ret; 2302 } 2303 2304 static bool validate_upd_sa(struct nlattr **attrs) 2305 { 2306 if (!attrs[MACSEC_SA_ATTR_AN] || 2307 attrs[MACSEC_SA_ATTR_KEY] || 2308 attrs[MACSEC_SA_ATTR_KEYID] || 2309 attrs[MACSEC_SA_ATTR_SSCI] || 2310 attrs[MACSEC_SA_ATTR_SALT]) 2311 return false; 2312 2313 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 2314 return false; 2315 2316 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 2317 return false; 2318 2319 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 2320 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 2321 return false; 2322 } 2323 2324 return true; 2325 } 2326 2327 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) 2328 { 2329 struct nlattr **attrs = info->attrs; 2330 struct net_device *dev; 2331 struct macsec_secy *secy; 2332 struct macsec_tx_sc *tx_sc; 2333 struct macsec_tx_sa *tx_sa; 2334 u8 assoc_num; 2335 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2336 bool was_operational, was_active; 2337 pn_t prev_pn; 2338 int ret = 0; 2339 2340 prev_pn.full64 = 0; 2341 2342 if (!attrs[MACSEC_ATTR_IFINDEX]) 2343 return -EINVAL; 2344 2345 if (parse_sa_config(attrs, tb_sa)) 2346 return -EINVAL; 2347 2348 if (!validate_upd_sa(tb_sa)) 2349 return -EINVAL; 2350 2351 rtnl_lock(); 2352 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2353 &dev, &secy, &tx_sc, &assoc_num); 2354 if (IS_ERR(tx_sa)) { 2355 rtnl_unlock(); 2356 return PTR_ERR(tx_sa); 2357 } 2358 2359 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2360 int pn_len; 2361 2362 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2363 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2364 pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n", 2365 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2366 rtnl_unlock(); 2367 return -EINVAL; 2368 } 2369 2370 spin_lock_bh(&tx_sa->lock); 2371 prev_pn = tx_sa->next_pn_halves; 2372 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2373 spin_unlock_bh(&tx_sa->lock); 2374 } 2375 2376 was_active = tx_sa->active; 2377 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2378 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2379 2380 was_operational = secy->operational; 2381 if (assoc_num == tx_sc->encoding_sa) 2382 secy->operational = tx_sa->active; 2383 2384 /* If h/w offloading is available, propagate to the device */ 2385 if (macsec_is_offloaded(netdev_priv(dev))) { 2386 const struct macsec_ops *ops; 2387 struct macsec_context ctx; 2388 2389 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2390 if (!ops) { 2391 ret = -EOPNOTSUPP; 2392 goto cleanup; 2393 } 2394 2395 ctx.sa.assoc_num = assoc_num; 2396 ctx.sa.tx_sa = tx_sa; 2397 ctx.sa.update_pn = !!prev_pn.full64; 2398 ctx.secy = secy; 2399 2400 ret = macsec_offload(ops->mdo_upd_txsa, &ctx); 2401 if (ret) 2402 goto cleanup; 2403 } 2404 2405 rtnl_unlock(); 2406 2407 return 0; 2408 2409 cleanup: 2410 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2411 spin_lock_bh(&tx_sa->lock); 2412 tx_sa->next_pn_halves = prev_pn; 2413 spin_unlock_bh(&tx_sa->lock); 2414 } 2415 tx_sa->active = was_active; 2416 secy->operational = was_operational; 2417 rtnl_unlock(); 2418 return ret; 2419 } 2420 2421 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) 2422 { 2423 struct nlattr **attrs = info->attrs; 2424 struct net_device *dev; 2425 struct macsec_secy *secy; 2426 struct macsec_rx_sc *rx_sc; 2427 struct macsec_rx_sa *rx_sa; 2428 u8 assoc_num; 2429 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2430 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2431 bool was_active; 2432 pn_t prev_pn; 2433 int ret = 0; 2434 2435 prev_pn.full64 = 0; 2436 2437 if (!attrs[MACSEC_ATTR_IFINDEX]) 2438 return -EINVAL; 2439 2440 if (parse_rxsc_config(attrs, tb_rxsc)) 2441 return -EINVAL; 2442 2443 if (parse_sa_config(attrs, tb_sa)) 2444 return -EINVAL; 2445 2446 if (!validate_upd_sa(tb_sa)) 2447 return -EINVAL; 2448 2449 rtnl_lock(); 2450 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2451 &dev, &secy, &rx_sc, &assoc_num); 2452 if (IS_ERR(rx_sa)) { 2453 rtnl_unlock(); 2454 return PTR_ERR(rx_sa); 2455 } 2456 2457 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2458 int pn_len; 2459 2460 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2461 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2462 pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n", 2463 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2464 rtnl_unlock(); 2465 return -EINVAL; 2466 } 2467 2468 spin_lock_bh(&rx_sa->lock); 2469 prev_pn = rx_sa->next_pn_halves; 2470 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2471 spin_unlock_bh(&rx_sa->lock); 2472 } 2473 2474 was_active = rx_sa->active; 2475 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2476 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2477 2478 /* If h/w offloading is available, propagate to the device */ 2479 if (macsec_is_offloaded(netdev_priv(dev))) { 2480 const struct macsec_ops *ops; 2481 struct macsec_context ctx; 2482 2483 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2484 if (!ops) { 2485 ret = -EOPNOTSUPP; 2486 goto cleanup; 2487 } 2488 2489 ctx.sa.assoc_num = assoc_num; 2490 ctx.sa.rx_sa = rx_sa; 2491 ctx.sa.update_pn = !!prev_pn.full64; 2492 ctx.secy = secy; 2493 2494 ret = macsec_offload(ops->mdo_upd_rxsa, &ctx); 2495 if (ret) 2496 goto cleanup; 2497 } 2498 2499 rtnl_unlock(); 2500 return 0; 2501 2502 cleanup: 2503 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2504 spin_lock_bh(&rx_sa->lock); 2505 rx_sa->next_pn_halves = prev_pn; 2506 spin_unlock_bh(&rx_sa->lock); 2507 } 2508 rx_sa->active = was_active; 2509 rtnl_unlock(); 2510 return ret; 2511 } 2512 2513 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) 2514 { 2515 struct nlattr **attrs = info->attrs; 2516 struct net_device *dev; 2517 struct macsec_secy *secy; 2518 struct macsec_rx_sc *rx_sc; 2519 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2520 unsigned int prev_n_rx_sc; 2521 bool was_active; 2522 int ret; 2523 2524 if (!attrs[MACSEC_ATTR_IFINDEX]) 2525 return -EINVAL; 2526 2527 if (parse_rxsc_config(attrs, tb_rxsc)) 2528 return -EINVAL; 2529 2530 if (!validate_add_rxsc(tb_rxsc)) 2531 return -EINVAL; 2532 2533 rtnl_lock(); 2534 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 2535 if (IS_ERR(rx_sc)) { 2536 rtnl_unlock(); 2537 return PTR_ERR(rx_sc); 2538 } 2539 2540 was_active = rx_sc->active; 2541 prev_n_rx_sc = secy->n_rx_sc; 2542 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { 2543 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 2544 2545 if (rx_sc->active != new) 2546 secy->n_rx_sc += new ? 1 : -1; 2547 2548 rx_sc->active = new; 2549 } 2550 2551 /* If h/w offloading is available, propagate to the device */ 2552 if (macsec_is_offloaded(netdev_priv(dev))) { 2553 const struct macsec_ops *ops; 2554 struct macsec_context ctx; 2555 2556 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2557 if (!ops) { 2558 ret = -EOPNOTSUPP; 2559 goto cleanup; 2560 } 2561 2562 ctx.rx_sc = rx_sc; 2563 ctx.secy = secy; 2564 2565 ret = macsec_offload(ops->mdo_upd_rxsc, &ctx); 2566 if (ret) 2567 goto cleanup; 2568 } 2569 2570 rtnl_unlock(); 2571 2572 return 0; 2573 2574 cleanup: 2575 secy->n_rx_sc = prev_n_rx_sc; 2576 rx_sc->active = was_active; 2577 rtnl_unlock(); 2578 return ret; 2579 } 2580 2581 static bool macsec_is_configured(struct macsec_dev *macsec) 2582 { 2583 struct macsec_secy *secy = &macsec->secy; 2584 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2585 int i; 2586 2587 if (secy->rx_sc) 2588 return true; 2589 2590 for (i = 0; i < MACSEC_NUM_AN; i++) 2591 if (tx_sc->sa[i]) 2592 return true; 2593 2594 return false; 2595 } 2596 2597 static bool macsec_needs_tx_tag(struct macsec_dev *macsec, 2598 const struct macsec_ops *ops) 2599 { 2600 return macsec->offload == MACSEC_OFFLOAD_PHY && 2601 ops->mdo_insert_tx_tag; 2602 } 2603 2604 static void macsec_set_head_tail_room(struct net_device *dev) 2605 { 2606 struct macsec_dev *macsec = macsec_priv(dev); 2607 struct net_device *real_dev = macsec->real_dev; 2608 int needed_headroom, needed_tailroom; 2609 const struct macsec_ops *ops; 2610 2611 ops = macsec_get_ops(macsec, NULL); 2612 if (ops) { 2613 needed_headroom = ops->needed_headroom; 2614 needed_tailroom = ops->needed_tailroom; 2615 } else { 2616 needed_headroom = MACSEC_NEEDED_HEADROOM; 2617 needed_tailroom = MACSEC_NEEDED_TAILROOM; 2618 } 2619 2620 dev->needed_headroom = real_dev->needed_headroom + needed_headroom; 2621 dev->needed_tailroom = real_dev->needed_tailroom + needed_tailroom; 2622 } 2623 2624 static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload) 2625 { 2626 enum macsec_offload prev_offload; 2627 const struct macsec_ops *ops; 2628 struct macsec_context ctx; 2629 struct macsec_dev *macsec; 2630 int ret = 0; 2631 2632 macsec = macsec_priv(dev); 2633 2634 /* Check if the offloading mode is supported by the underlying layers */ 2635 if (offload != MACSEC_OFFLOAD_OFF && 2636 !macsec_check_offload(offload, macsec)) 2637 return -EOPNOTSUPP; 2638 2639 /* Check if the net device is busy. */ 2640 if (netif_running(dev)) 2641 return -EBUSY; 2642 2643 /* Check if the device already has rules configured: we do not support 2644 * rules migration. 2645 */ 2646 if (macsec_is_configured(macsec)) 2647 return -EBUSY; 2648 2649 prev_offload = macsec->offload; 2650 2651 ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload, 2652 macsec, &ctx); 2653 if (!ops) 2654 return -EOPNOTSUPP; 2655 2656 macsec->offload = offload; 2657 2658 ctx.secy = &macsec->secy; 2659 ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx) 2660 : macsec_offload(ops->mdo_add_secy, &ctx); 2661 if (ret) { 2662 macsec->offload = prev_offload; 2663 return ret; 2664 } 2665 2666 macsec_set_head_tail_room(dev); 2667 macsec->insert_tx_tag = macsec_needs_tx_tag(macsec, ops); 2668 2669 return ret; 2670 } 2671 2672 static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) 2673 { 2674 struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1]; 2675 struct nlattr **attrs = info->attrs; 2676 enum macsec_offload offload; 2677 struct macsec_dev *macsec; 2678 struct net_device *dev; 2679 int ret = 0; 2680 2681 if (!attrs[MACSEC_ATTR_IFINDEX]) 2682 return -EINVAL; 2683 2684 if (!attrs[MACSEC_ATTR_OFFLOAD]) 2685 return -EINVAL; 2686 2687 if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX, 2688 attrs[MACSEC_ATTR_OFFLOAD], 2689 macsec_genl_offload_policy, NULL)) 2690 return -EINVAL; 2691 2692 rtnl_lock(); 2693 2694 dev = get_dev_from_nl(genl_info_net(info), attrs); 2695 if (IS_ERR(dev)) { 2696 ret = PTR_ERR(dev); 2697 goto out; 2698 } 2699 macsec = macsec_priv(dev); 2700 2701 if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) { 2702 ret = -EINVAL; 2703 goto out; 2704 } 2705 2706 offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); 2707 2708 if (macsec->offload != offload) 2709 ret = macsec_update_offload(dev, offload); 2710 out: 2711 rtnl_unlock(); 2712 return ret; 2713 } 2714 2715 static void get_tx_sa_stats(struct net_device *dev, int an, 2716 struct macsec_tx_sa *tx_sa, 2717 struct macsec_tx_sa_stats *sum) 2718 { 2719 struct macsec_dev *macsec = macsec_priv(dev); 2720 int cpu; 2721 2722 /* If h/w offloading is available, propagate to the device */ 2723 if (macsec_is_offloaded(macsec)) { 2724 const struct macsec_ops *ops; 2725 struct macsec_context ctx; 2726 2727 ops = macsec_get_ops(macsec, &ctx); 2728 if (ops) { 2729 ctx.sa.assoc_num = an; 2730 ctx.sa.tx_sa = tx_sa; 2731 ctx.stats.tx_sa_stats = sum; 2732 ctx.secy = &macsec_priv(dev)->secy; 2733 macsec_offload(ops->mdo_get_tx_sa_stats, &ctx); 2734 } 2735 return; 2736 } 2737 2738 for_each_possible_cpu(cpu) { 2739 const struct macsec_tx_sa_stats *stats = 2740 per_cpu_ptr(tx_sa->stats, cpu); 2741 2742 sum->OutPktsProtected += stats->OutPktsProtected; 2743 sum->OutPktsEncrypted += stats->OutPktsEncrypted; 2744 } 2745 } 2746 2747 static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum) 2748 { 2749 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, 2750 sum->OutPktsProtected) || 2751 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2752 sum->OutPktsEncrypted)) 2753 return -EMSGSIZE; 2754 2755 return 0; 2756 } 2757 2758 static void get_rx_sa_stats(struct net_device *dev, 2759 struct macsec_rx_sc *rx_sc, int an, 2760 struct macsec_rx_sa *rx_sa, 2761 struct macsec_rx_sa_stats *sum) 2762 { 2763 struct macsec_dev *macsec = macsec_priv(dev); 2764 int cpu; 2765 2766 /* If h/w offloading is available, propagate to the device */ 2767 if (macsec_is_offloaded(macsec)) { 2768 const struct macsec_ops *ops; 2769 struct macsec_context ctx; 2770 2771 ops = macsec_get_ops(macsec, &ctx); 2772 if (ops) { 2773 ctx.sa.assoc_num = an; 2774 ctx.sa.rx_sa = rx_sa; 2775 ctx.stats.rx_sa_stats = sum; 2776 ctx.secy = &macsec_priv(dev)->secy; 2777 ctx.rx_sc = rx_sc; 2778 macsec_offload(ops->mdo_get_rx_sa_stats, &ctx); 2779 } 2780 return; 2781 } 2782 2783 for_each_possible_cpu(cpu) { 2784 const struct macsec_rx_sa_stats *stats = 2785 per_cpu_ptr(rx_sa->stats, cpu); 2786 2787 sum->InPktsOK += stats->InPktsOK; 2788 sum->InPktsInvalid += stats->InPktsInvalid; 2789 sum->InPktsNotValid += stats->InPktsNotValid; 2790 sum->InPktsNotUsingSA += stats->InPktsNotUsingSA; 2791 sum->InPktsUnusedSA += stats->InPktsUnusedSA; 2792 } 2793 } 2794 2795 static int copy_rx_sa_stats(struct sk_buff *skb, 2796 struct macsec_rx_sa_stats *sum) 2797 { 2798 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) || 2799 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, 2800 sum->InPktsInvalid) || 2801 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, 2802 sum->InPktsNotValid) || 2803 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2804 sum->InPktsNotUsingSA) || 2805 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, 2806 sum->InPktsUnusedSA)) 2807 return -EMSGSIZE; 2808 2809 return 0; 2810 } 2811 2812 static void get_rx_sc_stats(struct net_device *dev, 2813 struct macsec_rx_sc *rx_sc, 2814 struct macsec_rx_sc_stats *sum) 2815 { 2816 struct macsec_dev *macsec = macsec_priv(dev); 2817 int cpu; 2818 2819 /* If h/w offloading is available, propagate to the device */ 2820 if (macsec_is_offloaded(macsec)) { 2821 const struct macsec_ops *ops; 2822 struct macsec_context ctx; 2823 2824 ops = macsec_get_ops(macsec, &ctx); 2825 if (ops) { 2826 ctx.stats.rx_sc_stats = sum; 2827 ctx.secy = &macsec_priv(dev)->secy; 2828 ctx.rx_sc = rx_sc; 2829 macsec_offload(ops->mdo_get_rx_sc_stats, &ctx); 2830 } 2831 return; 2832 } 2833 2834 for_each_possible_cpu(cpu) { 2835 const struct pcpu_rx_sc_stats *stats; 2836 struct macsec_rx_sc_stats tmp; 2837 unsigned int start; 2838 2839 stats = per_cpu_ptr(rx_sc->stats, cpu); 2840 do { 2841 start = u64_stats_fetch_begin(&stats->syncp); 2842 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2843 } while (u64_stats_fetch_retry(&stats->syncp, start)); 2844 2845 sum->InOctetsValidated += tmp.InOctetsValidated; 2846 sum->InOctetsDecrypted += tmp.InOctetsDecrypted; 2847 sum->InPktsUnchecked += tmp.InPktsUnchecked; 2848 sum->InPktsDelayed += tmp.InPktsDelayed; 2849 sum->InPktsOK += tmp.InPktsOK; 2850 sum->InPktsInvalid += tmp.InPktsInvalid; 2851 sum->InPktsLate += tmp.InPktsLate; 2852 sum->InPktsNotValid += tmp.InPktsNotValid; 2853 sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA; 2854 sum->InPktsUnusedSA += tmp.InPktsUnusedSA; 2855 } 2856 } 2857 2858 static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum) 2859 { 2860 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, 2861 sum->InOctetsValidated, 2862 MACSEC_RXSC_STATS_ATTR_PAD) || 2863 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, 2864 sum->InOctetsDecrypted, 2865 MACSEC_RXSC_STATS_ATTR_PAD) || 2866 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, 2867 sum->InPktsUnchecked, 2868 MACSEC_RXSC_STATS_ATTR_PAD) || 2869 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, 2870 sum->InPktsDelayed, 2871 MACSEC_RXSC_STATS_ATTR_PAD) || 2872 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, 2873 sum->InPktsOK, 2874 MACSEC_RXSC_STATS_ATTR_PAD) || 2875 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, 2876 sum->InPktsInvalid, 2877 MACSEC_RXSC_STATS_ATTR_PAD) || 2878 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, 2879 sum->InPktsLate, 2880 MACSEC_RXSC_STATS_ATTR_PAD) || 2881 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, 2882 sum->InPktsNotValid, 2883 MACSEC_RXSC_STATS_ATTR_PAD) || 2884 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2885 sum->InPktsNotUsingSA, 2886 MACSEC_RXSC_STATS_ATTR_PAD) || 2887 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, 2888 sum->InPktsUnusedSA, 2889 MACSEC_RXSC_STATS_ATTR_PAD)) 2890 return -EMSGSIZE; 2891 2892 return 0; 2893 } 2894 2895 static void get_tx_sc_stats(struct net_device *dev, 2896 struct macsec_tx_sc_stats *sum) 2897 { 2898 struct macsec_dev *macsec = macsec_priv(dev); 2899 int cpu; 2900 2901 /* If h/w offloading is available, propagate to the device */ 2902 if (macsec_is_offloaded(macsec)) { 2903 const struct macsec_ops *ops; 2904 struct macsec_context ctx; 2905 2906 ops = macsec_get_ops(macsec, &ctx); 2907 if (ops) { 2908 ctx.stats.tx_sc_stats = sum; 2909 ctx.secy = &macsec_priv(dev)->secy; 2910 macsec_offload(ops->mdo_get_tx_sc_stats, &ctx); 2911 } 2912 return; 2913 } 2914 2915 for_each_possible_cpu(cpu) { 2916 const struct pcpu_tx_sc_stats *stats; 2917 struct macsec_tx_sc_stats tmp; 2918 unsigned int start; 2919 2920 stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu); 2921 do { 2922 start = u64_stats_fetch_begin(&stats->syncp); 2923 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2924 } while (u64_stats_fetch_retry(&stats->syncp, start)); 2925 2926 sum->OutPktsProtected += tmp.OutPktsProtected; 2927 sum->OutPktsEncrypted += tmp.OutPktsEncrypted; 2928 sum->OutOctetsProtected += tmp.OutOctetsProtected; 2929 sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted; 2930 } 2931 } 2932 2933 static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum) 2934 { 2935 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, 2936 sum->OutPktsProtected, 2937 MACSEC_TXSC_STATS_ATTR_PAD) || 2938 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2939 sum->OutPktsEncrypted, 2940 MACSEC_TXSC_STATS_ATTR_PAD) || 2941 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, 2942 sum->OutOctetsProtected, 2943 MACSEC_TXSC_STATS_ATTR_PAD) || 2944 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, 2945 sum->OutOctetsEncrypted, 2946 MACSEC_TXSC_STATS_ATTR_PAD)) 2947 return -EMSGSIZE; 2948 2949 return 0; 2950 } 2951 2952 static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum) 2953 { 2954 struct macsec_dev *macsec = macsec_priv(dev); 2955 int cpu; 2956 2957 /* If h/w offloading is available, propagate to the device */ 2958 if (macsec_is_offloaded(macsec)) { 2959 const struct macsec_ops *ops; 2960 struct macsec_context ctx; 2961 2962 ops = macsec_get_ops(macsec, &ctx); 2963 if (ops) { 2964 ctx.stats.dev_stats = sum; 2965 ctx.secy = &macsec_priv(dev)->secy; 2966 macsec_offload(ops->mdo_get_dev_stats, &ctx); 2967 } 2968 return; 2969 } 2970 2971 for_each_possible_cpu(cpu) { 2972 const struct pcpu_secy_stats *stats; 2973 struct macsec_dev_stats tmp; 2974 unsigned int start; 2975 2976 stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu); 2977 do { 2978 start = u64_stats_fetch_begin(&stats->syncp); 2979 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2980 } while (u64_stats_fetch_retry(&stats->syncp, start)); 2981 2982 sum->OutPktsUntagged += tmp.OutPktsUntagged; 2983 sum->InPktsUntagged += tmp.InPktsUntagged; 2984 sum->OutPktsTooLong += tmp.OutPktsTooLong; 2985 sum->InPktsNoTag += tmp.InPktsNoTag; 2986 sum->InPktsBadTag += tmp.InPktsBadTag; 2987 sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI; 2988 sum->InPktsNoSCI += tmp.InPktsNoSCI; 2989 sum->InPktsOverrun += tmp.InPktsOverrun; 2990 } 2991 } 2992 2993 static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum) 2994 { 2995 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, 2996 sum->OutPktsUntagged, 2997 MACSEC_SECY_STATS_ATTR_PAD) || 2998 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, 2999 sum->InPktsUntagged, 3000 MACSEC_SECY_STATS_ATTR_PAD) || 3001 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, 3002 sum->OutPktsTooLong, 3003 MACSEC_SECY_STATS_ATTR_PAD) || 3004 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, 3005 sum->InPktsNoTag, 3006 MACSEC_SECY_STATS_ATTR_PAD) || 3007 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, 3008 sum->InPktsBadTag, 3009 MACSEC_SECY_STATS_ATTR_PAD) || 3010 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, 3011 sum->InPktsUnknownSCI, 3012 MACSEC_SECY_STATS_ATTR_PAD) || 3013 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, 3014 sum->InPktsNoSCI, 3015 MACSEC_SECY_STATS_ATTR_PAD) || 3016 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, 3017 sum->InPktsOverrun, 3018 MACSEC_SECY_STATS_ATTR_PAD)) 3019 return -EMSGSIZE; 3020 3021 return 0; 3022 } 3023 3024 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) 3025 { 3026 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 3027 struct nlattr *secy_nest = nla_nest_start_noflag(skb, 3028 MACSEC_ATTR_SECY); 3029 u64 csid; 3030 3031 if (!secy_nest) 3032 return 1; 3033 3034 switch (secy->key_len) { 3035 case MACSEC_GCM_AES_128_SAK_LEN: 3036 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; 3037 break; 3038 case MACSEC_GCM_AES_256_SAK_LEN: 3039 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; 3040 break; 3041 default: 3042 goto cancel; 3043 } 3044 3045 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, 3046 MACSEC_SECY_ATTR_PAD) || 3047 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, 3048 csid, MACSEC_SECY_ATTR_PAD) || 3049 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 3050 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 3051 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 3052 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) || 3053 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) || 3054 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) || 3055 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) || 3056 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) || 3057 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) || 3058 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa)) 3059 goto cancel; 3060 3061 if (secy->replay_protect) { 3062 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window)) 3063 goto cancel; 3064 } 3065 3066 nla_nest_end(skb, secy_nest); 3067 return 0; 3068 3069 cancel: 3070 nla_nest_cancel(skb, secy_nest); 3071 return 1; 3072 } 3073 3074 static noinline_for_stack int 3075 dump_secy(struct macsec_secy *secy, struct net_device *dev, 3076 struct sk_buff *skb, struct netlink_callback *cb) 3077 { 3078 struct macsec_tx_sc_stats tx_sc_stats = {0, }; 3079 struct macsec_tx_sa_stats tx_sa_stats = {0, }; 3080 struct macsec_rx_sc_stats rx_sc_stats = {0, }; 3081 struct macsec_rx_sa_stats rx_sa_stats = {0, }; 3082 struct macsec_dev *macsec = netdev_priv(dev); 3083 struct macsec_dev_stats dev_stats = {0, }; 3084 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 3085 struct nlattr *txsa_list, *rxsc_list; 3086 struct macsec_rx_sc *rx_sc; 3087 struct nlattr *attr; 3088 void *hdr; 3089 int i, j; 3090 3091 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3092 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); 3093 if (!hdr) 3094 return -EMSGSIZE; 3095 3096 genl_dump_check_consistent(cb, hdr); 3097 3098 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 3099 goto nla_put_failure; 3100 3101 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD); 3102 if (!attr) 3103 goto nla_put_failure; 3104 if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload)) 3105 goto nla_put_failure; 3106 nla_nest_end(skb, attr); 3107 3108 if (nla_put_secy(secy, skb)) 3109 goto nla_put_failure; 3110 3111 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS); 3112 if (!attr) 3113 goto nla_put_failure; 3114 3115 get_tx_sc_stats(dev, &tx_sc_stats); 3116 if (copy_tx_sc_stats(skb, &tx_sc_stats)) { 3117 nla_nest_cancel(skb, attr); 3118 goto nla_put_failure; 3119 } 3120 nla_nest_end(skb, attr); 3121 3122 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS); 3123 if (!attr) 3124 goto nla_put_failure; 3125 get_secy_stats(dev, &dev_stats); 3126 if (copy_secy_stats(skb, &dev_stats)) { 3127 nla_nest_cancel(skb, attr); 3128 goto nla_put_failure; 3129 } 3130 nla_nest_end(skb, attr); 3131 3132 txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST); 3133 if (!txsa_list) 3134 goto nla_put_failure; 3135 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { 3136 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); 3137 struct nlattr *txsa_nest; 3138 u64 pn; 3139 int pn_len; 3140 3141 if (!tx_sa) 3142 continue; 3143 3144 txsa_nest = nla_nest_start_noflag(skb, j++); 3145 if (!txsa_nest) { 3146 nla_nest_cancel(skb, txsa_list); 3147 goto nla_put_failure; 3148 } 3149 3150 attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS); 3151 if (!attr) { 3152 nla_nest_cancel(skb, txsa_nest); 3153 nla_nest_cancel(skb, txsa_list); 3154 goto nla_put_failure; 3155 } 3156 memset(&tx_sa_stats, 0, sizeof(tx_sa_stats)); 3157 get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats); 3158 if (copy_tx_sa_stats(skb, &tx_sa_stats)) { 3159 nla_nest_cancel(skb, attr); 3160 nla_nest_cancel(skb, txsa_nest); 3161 nla_nest_cancel(skb, txsa_list); 3162 goto nla_put_failure; 3163 } 3164 nla_nest_end(skb, attr); 3165 3166 if (secy->xpn) { 3167 pn = tx_sa->next_pn; 3168 pn_len = MACSEC_XPN_PN_LEN; 3169 } else { 3170 pn = tx_sa->next_pn_halves.lower; 3171 pn_len = MACSEC_DEFAULT_PN_LEN; 3172 } 3173 3174 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 3175 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || 3176 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || 3177 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) || 3178 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { 3179 nla_nest_cancel(skb, txsa_nest); 3180 nla_nest_cancel(skb, txsa_list); 3181 goto nla_put_failure; 3182 } 3183 3184 nla_nest_end(skb, txsa_nest); 3185 } 3186 nla_nest_end(skb, txsa_list); 3187 3188 rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST); 3189 if (!rxsc_list) 3190 goto nla_put_failure; 3191 3192 j = 1; 3193 for_each_rxsc_rtnl(secy, rx_sc) { 3194 int k; 3195 struct nlattr *rxsa_list; 3196 struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++); 3197 3198 if (!rxsc_nest) { 3199 nla_nest_cancel(skb, rxsc_list); 3200 goto nla_put_failure; 3201 } 3202 3203 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || 3204 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci, 3205 MACSEC_RXSC_ATTR_PAD)) { 3206 nla_nest_cancel(skb, rxsc_nest); 3207 nla_nest_cancel(skb, rxsc_list); 3208 goto nla_put_failure; 3209 } 3210 3211 attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS); 3212 if (!attr) { 3213 nla_nest_cancel(skb, rxsc_nest); 3214 nla_nest_cancel(skb, rxsc_list); 3215 goto nla_put_failure; 3216 } 3217 memset(&rx_sc_stats, 0, sizeof(rx_sc_stats)); 3218 get_rx_sc_stats(dev, rx_sc, &rx_sc_stats); 3219 if (copy_rx_sc_stats(skb, &rx_sc_stats)) { 3220 nla_nest_cancel(skb, attr); 3221 nla_nest_cancel(skb, rxsc_nest); 3222 nla_nest_cancel(skb, rxsc_list); 3223 goto nla_put_failure; 3224 } 3225 nla_nest_end(skb, attr); 3226 3227 rxsa_list = nla_nest_start_noflag(skb, 3228 MACSEC_RXSC_ATTR_SA_LIST); 3229 if (!rxsa_list) { 3230 nla_nest_cancel(skb, rxsc_nest); 3231 nla_nest_cancel(skb, rxsc_list); 3232 goto nla_put_failure; 3233 } 3234 3235 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { 3236 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); 3237 struct nlattr *rxsa_nest; 3238 u64 pn; 3239 int pn_len; 3240 3241 if (!rx_sa) 3242 continue; 3243 3244 rxsa_nest = nla_nest_start_noflag(skb, k++); 3245 if (!rxsa_nest) { 3246 nla_nest_cancel(skb, rxsa_list); 3247 nla_nest_cancel(skb, rxsc_nest); 3248 nla_nest_cancel(skb, rxsc_list); 3249 goto nla_put_failure; 3250 } 3251 3252 attr = nla_nest_start_noflag(skb, 3253 MACSEC_SA_ATTR_STATS); 3254 if (!attr) { 3255 nla_nest_cancel(skb, rxsa_list); 3256 nla_nest_cancel(skb, rxsc_nest); 3257 nla_nest_cancel(skb, rxsc_list); 3258 goto nla_put_failure; 3259 } 3260 memset(&rx_sa_stats, 0, sizeof(rx_sa_stats)); 3261 get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats); 3262 if (copy_rx_sa_stats(skb, &rx_sa_stats)) { 3263 nla_nest_cancel(skb, attr); 3264 nla_nest_cancel(skb, rxsa_list); 3265 nla_nest_cancel(skb, rxsc_nest); 3266 nla_nest_cancel(skb, rxsc_list); 3267 goto nla_put_failure; 3268 } 3269 nla_nest_end(skb, attr); 3270 3271 if (secy->xpn) { 3272 pn = rx_sa->next_pn; 3273 pn_len = MACSEC_XPN_PN_LEN; 3274 } else { 3275 pn = rx_sa->next_pn_halves.lower; 3276 pn_len = MACSEC_DEFAULT_PN_LEN; 3277 } 3278 3279 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 3280 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || 3281 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || 3282 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) || 3283 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { 3284 nla_nest_cancel(skb, rxsa_nest); 3285 nla_nest_cancel(skb, rxsc_nest); 3286 nla_nest_cancel(skb, rxsc_list); 3287 goto nla_put_failure; 3288 } 3289 nla_nest_end(skb, rxsa_nest); 3290 } 3291 3292 nla_nest_end(skb, rxsa_list); 3293 nla_nest_end(skb, rxsc_nest); 3294 } 3295 3296 nla_nest_end(skb, rxsc_list); 3297 3298 genlmsg_end(skb, hdr); 3299 3300 return 0; 3301 3302 nla_put_failure: 3303 genlmsg_cancel(skb, hdr); 3304 return -EMSGSIZE; 3305 } 3306 3307 static int macsec_generation = 1; /* protected by RTNL */ 3308 3309 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 3310 { 3311 struct net *net = sock_net(skb->sk); 3312 struct net_device *dev; 3313 int dev_idx, d; 3314 3315 dev_idx = cb->args[0]; 3316 3317 d = 0; 3318 rtnl_lock(); 3319 3320 cb->seq = macsec_generation; 3321 3322 for_each_netdev(net, dev) { 3323 struct macsec_secy *secy; 3324 3325 if (d < dev_idx) 3326 goto next; 3327 3328 if (!netif_is_macsec(dev)) 3329 goto next; 3330 3331 secy = &macsec_priv(dev)->secy; 3332 if (dump_secy(secy, dev, skb, cb) < 0) 3333 goto done; 3334 next: 3335 d++; 3336 } 3337 3338 done: 3339 rtnl_unlock(); 3340 cb->args[0] = d; 3341 return skb->len; 3342 } 3343 3344 static const struct genl_small_ops macsec_genl_ops[] = { 3345 { 3346 .cmd = MACSEC_CMD_GET_TXSC, 3347 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3348 .dumpit = macsec_dump_txsc, 3349 }, 3350 { 3351 .cmd = MACSEC_CMD_ADD_RXSC, 3352 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3353 .doit = macsec_add_rxsc, 3354 .flags = GENL_ADMIN_PERM, 3355 }, 3356 { 3357 .cmd = MACSEC_CMD_DEL_RXSC, 3358 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3359 .doit = macsec_del_rxsc, 3360 .flags = GENL_ADMIN_PERM, 3361 }, 3362 { 3363 .cmd = MACSEC_CMD_UPD_RXSC, 3364 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3365 .doit = macsec_upd_rxsc, 3366 .flags = GENL_ADMIN_PERM, 3367 }, 3368 { 3369 .cmd = MACSEC_CMD_ADD_TXSA, 3370 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3371 .doit = macsec_add_txsa, 3372 .flags = GENL_ADMIN_PERM, 3373 }, 3374 { 3375 .cmd = MACSEC_CMD_DEL_TXSA, 3376 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3377 .doit = macsec_del_txsa, 3378 .flags = GENL_ADMIN_PERM, 3379 }, 3380 { 3381 .cmd = MACSEC_CMD_UPD_TXSA, 3382 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3383 .doit = macsec_upd_txsa, 3384 .flags = GENL_ADMIN_PERM, 3385 }, 3386 { 3387 .cmd = MACSEC_CMD_ADD_RXSA, 3388 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3389 .doit = macsec_add_rxsa, 3390 .flags = GENL_ADMIN_PERM, 3391 }, 3392 { 3393 .cmd = MACSEC_CMD_DEL_RXSA, 3394 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3395 .doit = macsec_del_rxsa, 3396 .flags = GENL_ADMIN_PERM, 3397 }, 3398 { 3399 .cmd = MACSEC_CMD_UPD_RXSA, 3400 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3401 .doit = macsec_upd_rxsa, 3402 .flags = GENL_ADMIN_PERM, 3403 }, 3404 { 3405 .cmd = MACSEC_CMD_UPD_OFFLOAD, 3406 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3407 .doit = macsec_upd_offload, 3408 .flags = GENL_ADMIN_PERM, 3409 }, 3410 }; 3411 3412 static struct genl_family macsec_fam __ro_after_init = { 3413 .name = MACSEC_GENL_NAME, 3414 .hdrsize = 0, 3415 .version = MACSEC_GENL_VERSION, 3416 .maxattr = MACSEC_ATTR_MAX, 3417 .policy = macsec_genl_policy, 3418 .netnsok = true, 3419 .module = THIS_MODULE, 3420 .small_ops = macsec_genl_ops, 3421 .n_small_ops = ARRAY_SIZE(macsec_genl_ops), 3422 .resv_start_op = MACSEC_CMD_UPD_OFFLOAD + 1, 3423 }; 3424 3425 static struct sk_buff *macsec_insert_tx_tag(struct sk_buff *skb, 3426 struct net_device *dev) 3427 { 3428 struct macsec_dev *macsec = macsec_priv(dev); 3429 const struct macsec_ops *ops; 3430 struct phy_device *phydev; 3431 struct macsec_context ctx; 3432 int skb_final_len; 3433 int err; 3434 3435 ops = macsec_get_ops(macsec, &ctx); 3436 skb_final_len = skb->len - ETH_HLEN + ops->needed_headroom + 3437 ops->needed_tailroom; 3438 if (unlikely(skb_final_len > macsec->real_dev->mtu)) { 3439 err = -EINVAL; 3440 goto cleanup; 3441 } 3442 3443 phydev = macsec->real_dev->phydev; 3444 3445 err = skb_ensure_writable_head_tail(skb, dev); 3446 if (unlikely(err < 0)) 3447 goto cleanup; 3448 3449 err = ops->mdo_insert_tx_tag(phydev, skb); 3450 if (unlikely(err)) 3451 goto cleanup; 3452 3453 return skb; 3454 cleanup: 3455 kfree_skb(skb); 3456 return ERR_PTR(err); 3457 } 3458 3459 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, 3460 struct net_device *dev) 3461 { 3462 struct macsec_dev *macsec = netdev_priv(dev); 3463 struct macsec_secy *secy = &macsec->secy; 3464 struct pcpu_secy_stats *secy_stats; 3465 int ret, len; 3466 3467 if (macsec_is_offloaded(netdev_priv(dev))) { 3468 struct metadata_dst *md_dst = secy->tx_sc.md_dst; 3469 3470 skb_dst_drop(skb); 3471 dst_hold(&md_dst->dst); 3472 skb_dst_set(skb, &md_dst->dst); 3473 3474 if (macsec->insert_tx_tag) { 3475 skb = macsec_insert_tx_tag(skb, dev); 3476 if (IS_ERR(skb)) { 3477 DEV_STATS_INC(dev, tx_dropped); 3478 return NETDEV_TX_OK; 3479 } 3480 } 3481 3482 skb->dev = macsec->real_dev; 3483 return dev_queue_xmit(skb); 3484 } 3485 3486 /* 10.5 */ 3487 if (!secy->protect_frames) { 3488 secy_stats = this_cpu_ptr(macsec->stats); 3489 u64_stats_update_begin(&secy_stats->syncp); 3490 secy_stats->stats.OutPktsUntagged++; 3491 u64_stats_update_end(&secy_stats->syncp); 3492 skb->dev = macsec->real_dev; 3493 len = skb->len; 3494 ret = dev_queue_xmit(skb); 3495 count_tx(dev, ret, len); 3496 return ret; 3497 } 3498 3499 if (!secy->operational) { 3500 kfree_skb(skb); 3501 DEV_STATS_INC(dev, tx_dropped); 3502 return NETDEV_TX_OK; 3503 } 3504 3505 len = skb->len; 3506 skb = macsec_encrypt(skb, dev); 3507 if (IS_ERR(skb)) { 3508 if (PTR_ERR(skb) != -EINPROGRESS) 3509 DEV_STATS_INC(dev, tx_dropped); 3510 return NETDEV_TX_OK; 3511 } 3512 3513 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 3514 3515 macsec_encrypt_finish(skb, dev); 3516 ret = dev_queue_xmit(skb); 3517 count_tx(dev, ret, len); 3518 return ret; 3519 } 3520 3521 #define MACSEC_FEATURES \ 3522 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 3523 3524 static int macsec_dev_init(struct net_device *dev) 3525 { 3526 struct macsec_dev *macsec = macsec_priv(dev); 3527 struct net_device *real_dev = macsec->real_dev; 3528 int err; 3529 3530 err = gro_cells_init(&macsec->gro_cells, dev); 3531 if (err) 3532 return err; 3533 3534 dev->features = real_dev->features & MACSEC_FEATURES; 3535 dev->features |= NETIF_F_GSO_SOFTWARE; 3536 dev->lltx = true; 3537 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; 3538 3539 macsec_set_head_tail_room(dev); 3540 3541 if (is_zero_ether_addr(dev->dev_addr)) 3542 eth_hw_addr_inherit(dev, real_dev); 3543 if (is_zero_ether_addr(dev->broadcast)) 3544 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 3545 3546 /* Get macsec's reference to real_dev */ 3547 netdev_hold(real_dev, &macsec->dev_tracker, GFP_KERNEL); 3548 3549 return 0; 3550 } 3551 3552 static void macsec_dev_uninit(struct net_device *dev) 3553 { 3554 struct macsec_dev *macsec = macsec_priv(dev); 3555 3556 gro_cells_destroy(&macsec->gro_cells); 3557 } 3558 3559 static netdev_features_t macsec_fix_features(struct net_device *dev, 3560 netdev_features_t features) 3561 { 3562 struct macsec_dev *macsec = macsec_priv(dev); 3563 struct net_device *real_dev = macsec->real_dev; 3564 3565 features &= (real_dev->features & MACSEC_FEATURES) | 3566 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; 3567 3568 return features; 3569 } 3570 3571 static int macsec_dev_open(struct net_device *dev) 3572 { 3573 struct macsec_dev *macsec = macsec_priv(dev); 3574 struct net_device *real_dev = macsec->real_dev; 3575 int err; 3576 3577 err = dev_uc_add(real_dev, dev->dev_addr); 3578 if (err < 0) 3579 return err; 3580 3581 if (dev->flags & IFF_ALLMULTI) { 3582 err = dev_set_allmulti(real_dev, 1); 3583 if (err < 0) 3584 goto del_unicast; 3585 } 3586 3587 if (dev->flags & IFF_PROMISC) { 3588 err = dev_set_promiscuity(real_dev, 1); 3589 if (err < 0) 3590 goto clear_allmulti; 3591 } 3592 3593 /* If h/w offloading is available, propagate to the device */ 3594 if (macsec_is_offloaded(macsec)) { 3595 const struct macsec_ops *ops; 3596 struct macsec_context ctx; 3597 3598 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3599 if (!ops) { 3600 err = -EOPNOTSUPP; 3601 goto clear_allmulti; 3602 } 3603 3604 ctx.secy = &macsec->secy; 3605 err = macsec_offload(ops->mdo_dev_open, &ctx); 3606 if (err) 3607 goto clear_allmulti; 3608 } 3609 3610 if (netif_carrier_ok(real_dev)) 3611 netif_carrier_on(dev); 3612 3613 return 0; 3614 clear_allmulti: 3615 if (dev->flags & IFF_ALLMULTI) 3616 dev_set_allmulti(real_dev, -1); 3617 del_unicast: 3618 dev_uc_del(real_dev, dev->dev_addr); 3619 netif_carrier_off(dev); 3620 return err; 3621 } 3622 3623 static int macsec_dev_stop(struct net_device *dev) 3624 { 3625 struct macsec_dev *macsec = macsec_priv(dev); 3626 struct net_device *real_dev = macsec->real_dev; 3627 3628 netif_carrier_off(dev); 3629 3630 /* If h/w offloading is available, propagate to the device */ 3631 if (macsec_is_offloaded(macsec)) { 3632 const struct macsec_ops *ops; 3633 struct macsec_context ctx; 3634 3635 ops = macsec_get_ops(macsec, &ctx); 3636 if (ops) { 3637 ctx.secy = &macsec->secy; 3638 macsec_offload(ops->mdo_dev_stop, &ctx); 3639 } 3640 } 3641 3642 dev_mc_unsync(real_dev, dev); 3643 dev_uc_unsync(real_dev, dev); 3644 3645 if (dev->flags & IFF_ALLMULTI) 3646 dev_set_allmulti(real_dev, -1); 3647 3648 if (dev->flags & IFF_PROMISC) 3649 dev_set_promiscuity(real_dev, -1); 3650 3651 dev_uc_del(real_dev, dev->dev_addr); 3652 3653 return 0; 3654 } 3655 3656 static void macsec_dev_change_rx_flags(struct net_device *dev, int change) 3657 { 3658 struct net_device *real_dev = macsec_priv(dev)->real_dev; 3659 3660 if (!(dev->flags & IFF_UP)) 3661 return; 3662 3663 if (change & IFF_ALLMULTI) 3664 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 3665 3666 if (change & IFF_PROMISC) 3667 dev_set_promiscuity(real_dev, 3668 dev->flags & IFF_PROMISC ? 1 : -1); 3669 } 3670 3671 static void macsec_dev_set_rx_mode(struct net_device *dev) 3672 { 3673 struct net_device *real_dev = macsec_priv(dev)->real_dev; 3674 3675 dev_mc_sync(real_dev, dev); 3676 dev_uc_sync(real_dev, dev); 3677 } 3678 3679 static int macsec_set_mac_address(struct net_device *dev, void *p) 3680 { 3681 struct macsec_dev *macsec = macsec_priv(dev); 3682 struct net_device *real_dev = macsec->real_dev; 3683 struct sockaddr *addr = p; 3684 u8 old_addr[ETH_ALEN]; 3685 int err; 3686 3687 if (!is_valid_ether_addr(addr->sa_data)) 3688 return -EADDRNOTAVAIL; 3689 3690 if (dev->flags & IFF_UP) { 3691 err = dev_uc_add(real_dev, addr->sa_data); 3692 if (err < 0) 3693 return err; 3694 } 3695 3696 ether_addr_copy(old_addr, dev->dev_addr); 3697 eth_hw_addr_set(dev, addr->sa_data); 3698 3699 /* If h/w offloading is available, propagate to the device */ 3700 if (macsec_is_offloaded(macsec)) { 3701 const struct macsec_ops *ops; 3702 struct macsec_context ctx; 3703 3704 ops = macsec_get_ops(macsec, &ctx); 3705 if (!ops) { 3706 err = -EOPNOTSUPP; 3707 goto restore_old_addr; 3708 } 3709 3710 ctx.secy = &macsec->secy; 3711 err = macsec_offload(ops->mdo_upd_secy, &ctx); 3712 if (err) 3713 goto restore_old_addr; 3714 } 3715 3716 if (dev->flags & IFF_UP) 3717 dev_uc_del(real_dev, old_addr); 3718 3719 return 0; 3720 3721 restore_old_addr: 3722 if (dev->flags & IFF_UP) 3723 dev_uc_del(real_dev, addr->sa_data); 3724 3725 eth_hw_addr_set(dev, old_addr); 3726 3727 return err; 3728 } 3729 3730 static int macsec_change_mtu(struct net_device *dev, int new_mtu) 3731 { 3732 struct macsec_dev *macsec = macsec_priv(dev); 3733 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); 3734 3735 if (macsec->real_dev->mtu - extra < new_mtu) 3736 return -ERANGE; 3737 3738 WRITE_ONCE(dev->mtu, new_mtu); 3739 3740 return 0; 3741 } 3742 3743 static void macsec_get_stats64(struct net_device *dev, 3744 struct rtnl_link_stats64 *s) 3745 { 3746 if (!dev->tstats) 3747 return; 3748 3749 dev_fetch_sw_netstats(s, dev->tstats); 3750 3751 s->rx_dropped = DEV_STATS_READ(dev, rx_dropped); 3752 s->tx_dropped = DEV_STATS_READ(dev, tx_dropped); 3753 s->rx_errors = DEV_STATS_READ(dev, rx_errors); 3754 } 3755 3756 static int macsec_get_iflink(const struct net_device *dev) 3757 { 3758 return READ_ONCE(macsec_priv(dev)->real_dev->ifindex); 3759 } 3760 3761 static const struct net_device_ops macsec_netdev_ops = { 3762 .ndo_init = macsec_dev_init, 3763 .ndo_uninit = macsec_dev_uninit, 3764 .ndo_open = macsec_dev_open, 3765 .ndo_stop = macsec_dev_stop, 3766 .ndo_fix_features = macsec_fix_features, 3767 .ndo_change_mtu = macsec_change_mtu, 3768 .ndo_set_rx_mode = macsec_dev_set_rx_mode, 3769 .ndo_change_rx_flags = macsec_dev_change_rx_flags, 3770 .ndo_set_mac_address = macsec_set_mac_address, 3771 .ndo_start_xmit = macsec_start_xmit, 3772 .ndo_get_stats64 = macsec_get_stats64, 3773 .ndo_get_iflink = macsec_get_iflink, 3774 }; 3775 3776 static const struct device_type macsec_type = { 3777 .name = "macsec", 3778 }; 3779 3780 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { 3781 [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, 3782 [IFLA_MACSEC_PORT] = { .type = NLA_U16 }, 3783 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, 3784 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, 3785 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, 3786 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, 3787 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, 3788 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, 3789 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, 3790 [IFLA_MACSEC_ES] = { .type = NLA_U8 }, 3791 [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, 3792 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, 3793 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, 3794 [IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 }, 3795 }; 3796 3797 static void macsec_free_netdev(struct net_device *dev) 3798 { 3799 struct macsec_dev *macsec = macsec_priv(dev); 3800 3801 dst_release(&macsec->secy.tx_sc.md_dst->dst); 3802 free_percpu(macsec->stats); 3803 free_percpu(macsec->secy.tx_sc.stats); 3804 3805 /* Get rid of the macsec's reference to real_dev */ 3806 netdev_put(macsec->real_dev, &macsec->dev_tracker); 3807 } 3808 3809 static void macsec_setup(struct net_device *dev) 3810 { 3811 ether_setup(dev); 3812 dev->min_mtu = 0; 3813 dev->max_mtu = ETH_MAX_MTU; 3814 dev->priv_flags |= IFF_NO_QUEUE; 3815 dev->netdev_ops = &macsec_netdev_ops; 3816 dev->needs_free_netdev = true; 3817 dev->priv_destructor = macsec_free_netdev; 3818 SET_NETDEV_DEVTYPE(dev, &macsec_type); 3819 3820 eth_zero_addr(dev->broadcast); 3821 } 3822 3823 static int macsec_changelink_common(struct net_device *dev, 3824 struct nlattr *data[]) 3825 { 3826 struct macsec_secy *secy; 3827 struct macsec_tx_sc *tx_sc; 3828 3829 secy = &macsec_priv(dev)->secy; 3830 tx_sc = &secy->tx_sc; 3831 3832 if (data[IFLA_MACSEC_ENCODING_SA]) { 3833 struct macsec_tx_sa *tx_sa; 3834 3835 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]); 3836 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]); 3837 3838 secy->operational = tx_sa && tx_sa->active; 3839 } 3840 3841 if (data[IFLA_MACSEC_ENCRYPT]) 3842 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); 3843 3844 if (data[IFLA_MACSEC_PROTECT]) 3845 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]); 3846 3847 if (data[IFLA_MACSEC_INC_SCI]) 3848 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 3849 3850 if (data[IFLA_MACSEC_ES]) 3851 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]); 3852 3853 if (data[IFLA_MACSEC_SCB]) 3854 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]); 3855 3856 if (data[IFLA_MACSEC_REPLAY_PROTECT]) 3857 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]); 3858 3859 if (data[IFLA_MACSEC_VALIDATION]) 3860 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); 3861 3862 if (data[IFLA_MACSEC_CIPHER_SUITE]) { 3863 switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) { 3864 case MACSEC_CIPHER_ID_GCM_AES_128: 3865 case MACSEC_DEFAULT_CIPHER_ID: 3866 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3867 secy->xpn = false; 3868 break; 3869 case MACSEC_CIPHER_ID_GCM_AES_256: 3870 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3871 secy->xpn = false; 3872 break; 3873 case MACSEC_CIPHER_ID_GCM_AES_XPN_128: 3874 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3875 secy->xpn = true; 3876 break; 3877 case MACSEC_CIPHER_ID_GCM_AES_XPN_256: 3878 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3879 secy->xpn = true; 3880 break; 3881 default: 3882 return -EINVAL; 3883 } 3884 } 3885 3886 if (data[IFLA_MACSEC_WINDOW]) { 3887 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); 3888 3889 /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window 3890 * for XPN cipher suites */ 3891 if (secy->xpn && 3892 secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW) 3893 return -EINVAL; 3894 } 3895 3896 return 0; 3897 } 3898 3899 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], 3900 struct nlattr *data[], 3901 struct netlink_ext_ack *extack) 3902 { 3903 struct macsec_dev *macsec = macsec_priv(dev); 3904 bool macsec_offload_state_change = false; 3905 enum macsec_offload offload; 3906 struct macsec_tx_sc tx_sc; 3907 struct macsec_secy secy; 3908 int ret; 3909 3910 if (!data) 3911 return 0; 3912 3913 if (data[IFLA_MACSEC_CIPHER_SUITE] || 3914 data[IFLA_MACSEC_ICV_LEN] || 3915 data[IFLA_MACSEC_SCI] || 3916 data[IFLA_MACSEC_PORT]) 3917 return -EINVAL; 3918 3919 /* Keep a copy of unmodified secy and tx_sc, in case the offload 3920 * propagation fails, to revert macsec_changelink_common. 3921 */ 3922 memcpy(&secy, &macsec->secy, sizeof(secy)); 3923 memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc)); 3924 3925 ret = macsec_changelink_common(dev, data); 3926 if (ret) 3927 goto cleanup; 3928 3929 if (data[IFLA_MACSEC_OFFLOAD]) { 3930 offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]); 3931 if (macsec->offload != offload) { 3932 macsec_offload_state_change = true; 3933 ret = macsec_update_offload(dev, offload); 3934 if (ret) 3935 goto cleanup; 3936 } 3937 } 3938 3939 /* If h/w offloading is available, propagate to the device */ 3940 if (!macsec_offload_state_change && macsec_is_offloaded(macsec)) { 3941 const struct macsec_ops *ops; 3942 struct macsec_context ctx; 3943 3944 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3945 if (!ops) { 3946 ret = -EOPNOTSUPP; 3947 goto cleanup; 3948 } 3949 3950 ctx.secy = &macsec->secy; 3951 ret = macsec_offload(ops->mdo_upd_secy, &ctx); 3952 if (ret) 3953 goto cleanup; 3954 } 3955 3956 return 0; 3957 3958 cleanup: 3959 memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc)); 3960 memcpy(&macsec->secy, &secy, sizeof(secy)); 3961 3962 return ret; 3963 } 3964 3965 static void macsec_del_dev(struct macsec_dev *macsec) 3966 { 3967 int i; 3968 3969 while (macsec->secy.rx_sc) { 3970 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); 3971 3972 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); 3973 free_rx_sc(rx_sc); 3974 } 3975 3976 for (i = 0; i < MACSEC_NUM_AN; i++) { 3977 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); 3978 3979 if (sa) { 3980 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); 3981 clear_tx_sa(sa); 3982 } 3983 } 3984 } 3985 3986 static void macsec_common_dellink(struct net_device *dev, struct list_head *head) 3987 { 3988 struct macsec_dev *macsec = macsec_priv(dev); 3989 struct net_device *real_dev = macsec->real_dev; 3990 3991 /* If h/w offloading is available, propagate to the device */ 3992 if (macsec_is_offloaded(macsec)) { 3993 const struct macsec_ops *ops; 3994 struct macsec_context ctx; 3995 3996 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3997 if (ops) { 3998 ctx.secy = &macsec->secy; 3999 macsec_offload(ops->mdo_del_secy, &ctx); 4000 } 4001 } 4002 4003 unregister_netdevice_queue(dev, head); 4004 list_del_rcu(&macsec->secys); 4005 macsec_del_dev(macsec); 4006 netdev_upper_dev_unlink(real_dev, dev); 4007 4008 macsec_generation++; 4009 } 4010 4011 static void macsec_dellink(struct net_device *dev, struct list_head *head) 4012 { 4013 struct macsec_dev *macsec = macsec_priv(dev); 4014 struct net_device *real_dev = macsec->real_dev; 4015 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 4016 4017 macsec_common_dellink(dev, head); 4018 4019 if (list_empty(&rxd->secys)) { 4020 netdev_rx_handler_unregister(real_dev); 4021 kfree(rxd); 4022 } 4023 } 4024 4025 static int register_macsec_dev(struct net_device *real_dev, 4026 struct net_device *dev) 4027 { 4028 struct macsec_dev *macsec = macsec_priv(dev); 4029 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 4030 4031 if (!rxd) { 4032 int err; 4033 4034 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL); 4035 if (!rxd) 4036 return -ENOMEM; 4037 4038 INIT_LIST_HEAD(&rxd->secys); 4039 4040 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 4041 rxd); 4042 if (err < 0) { 4043 kfree(rxd); 4044 return err; 4045 } 4046 } 4047 4048 list_add_tail_rcu(&macsec->secys, &rxd->secys); 4049 return 0; 4050 } 4051 4052 static bool sci_exists(struct net_device *dev, sci_t sci) 4053 { 4054 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev); 4055 struct macsec_dev *macsec; 4056 4057 list_for_each_entry(macsec, &rxd->secys, secys) { 4058 if (macsec->secy.sci == sci) 4059 return true; 4060 } 4061 4062 return false; 4063 } 4064 4065 static sci_t dev_to_sci(struct net_device *dev, __be16 port) 4066 { 4067 return make_sci(dev->dev_addr, port); 4068 } 4069 4070 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) 4071 { 4072 struct macsec_dev *macsec = macsec_priv(dev); 4073 struct macsec_secy *secy = &macsec->secy; 4074 4075 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); 4076 if (!macsec->stats) 4077 return -ENOMEM; 4078 4079 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); 4080 if (!secy->tx_sc.stats) 4081 return -ENOMEM; 4082 4083 secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL); 4084 if (!secy->tx_sc.md_dst) 4085 /* macsec and secy percpu stats will be freed when unregistering 4086 * net_device in macsec_free_netdev() 4087 */ 4088 return -ENOMEM; 4089 4090 if (sci == MACSEC_UNDEF_SCI) 4091 sci = dev_to_sci(dev, MACSEC_PORT_ES); 4092 4093 secy->netdev = dev; 4094 secy->operational = true; 4095 secy->key_len = DEFAULT_SAK_LEN; 4096 secy->icv_len = icv_len; 4097 secy->validate_frames = MACSEC_VALIDATE_DEFAULT; 4098 secy->protect_frames = true; 4099 secy->replay_protect = false; 4100 secy->xpn = DEFAULT_XPN; 4101 4102 secy->sci = sci; 4103 secy->tx_sc.md_dst->u.macsec_info.sci = sci; 4104 secy->tx_sc.active = true; 4105 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; 4106 secy->tx_sc.encrypt = DEFAULT_ENCRYPT; 4107 secy->tx_sc.send_sci = DEFAULT_SEND_SCI; 4108 secy->tx_sc.end_station = false; 4109 secy->tx_sc.scb = false; 4110 4111 return 0; 4112 } 4113 4114 static struct lock_class_key macsec_netdev_addr_lock_key; 4115 4116 static int macsec_newlink(struct net *net, struct net_device *dev, 4117 struct nlattr *tb[], struct nlattr *data[], 4118 struct netlink_ext_ack *extack) 4119 { 4120 struct macsec_dev *macsec = macsec_priv(dev); 4121 rx_handler_func_t *rx_handler; 4122 u8 icv_len = MACSEC_DEFAULT_ICV_LEN; 4123 struct net_device *real_dev; 4124 int err, mtu; 4125 sci_t sci; 4126 4127 if (!tb[IFLA_LINK]) 4128 return -EINVAL; 4129 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); 4130 if (!real_dev) 4131 return -ENODEV; 4132 if (real_dev->type != ARPHRD_ETHER) 4133 return -EINVAL; 4134 4135 dev->priv_flags |= IFF_MACSEC; 4136 4137 macsec->real_dev = real_dev; 4138 4139 if (data && data[IFLA_MACSEC_OFFLOAD]) 4140 macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]); 4141 else 4142 /* MACsec offloading is off by default */ 4143 macsec->offload = MACSEC_OFFLOAD_OFF; 4144 4145 /* Check if the offloading mode is supported by the underlying layers */ 4146 if (macsec->offload != MACSEC_OFFLOAD_OFF && 4147 !macsec_check_offload(macsec->offload, macsec)) 4148 return -EOPNOTSUPP; 4149 4150 /* send_sci must be set to true when transmit sci explicitly is set */ 4151 if ((data && data[IFLA_MACSEC_SCI]) && 4152 (data && data[IFLA_MACSEC_INC_SCI])) { 4153 u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 4154 4155 if (!send_sci) 4156 return -EINVAL; 4157 } 4158 4159 if (data && data[IFLA_MACSEC_ICV_LEN]) 4160 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 4161 mtu = real_dev->mtu - icv_len - macsec_extra_len(true); 4162 if (mtu < 0) 4163 dev->mtu = 0; 4164 else 4165 dev->mtu = mtu; 4166 4167 rx_handler = rtnl_dereference(real_dev->rx_handler); 4168 if (rx_handler && rx_handler != macsec_handle_frame) 4169 return -EBUSY; 4170 4171 err = register_netdevice(dev); 4172 if (err < 0) 4173 return err; 4174 4175 netdev_lockdep_set_classes(dev); 4176 lockdep_set_class(&dev->addr_list_lock, 4177 &macsec_netdev_addr_lock_key); 4178 4179 err = netdev_upper_dev_link(real_dev, dev, extack); 4180 if (err < 0) 4181 goto unregister; 4182 4183 /* need to be already registered so that ->init has run and 4184 * the MAC addr is set 4185 */ 4186 if (data && data[IFLA_MACSEC_SCI]) 4187 sci = nla_get_sci(data[IFLA_MACSEC_SCI]); 4188 else if (data && data[IFLA_MACSEC_PORT]) 4189 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT])); 4190 else 4191 sci = dev_to_sci(dev, MACSEC_PORT_ES); 4192 4193 if (rx_handler && sci_exists(real_dev, sci)) { 4194 err = -EBUSY; 4195 goto unlink; 4196 } 4197 4198 err = macsec_add_dev(dev, sci, icv_len); 4199 if (err) 4200 goto unlink; 4201 4202 if (data) { 4203 err = macsec_changelink_common(dev, data); 4204 if (err) 4205 goto del_dev; 4206 } 4207 4208 /* If h/w offloading is available, propagate to the device */ 4209 if (macsec_is_offloaded(macsec)) { 4210 const struct macsec_ops *ops; 4211 struct macsec_context ctx; 4212 4213 ops = macsec_get_ops(macsec, &ctx); 4214 if (ops) { 4215 ctx.secy = &macsec->secy; 4216 err = macsec_offload(ops->mdo_add_secy, &ctx); 4217 if (err) 4218 goto del_dev; 4219 4220 macsec->insert_tx_tag = 4221 macsec_needs_tx_tag(macsec, ops); 4222 } 4223 } 4224 4225 err = register_macsec_dev(real_dev, dev); 4226 if (err < 0) 4227 goto del_dev; 4228 4229 netif_stacked_transfer_operstate(real_dev, dev); 4230 linkwatch_fire_event(dev); 4231 4232 macsec_generation++; 4233 4234 return 0; 4235 4236 del_dev: 4237 macsec_del_dev(macsec); 4238 unlink: 4239 netdev_upper_dev_unlink(real_dev, dev); 4240 unregister: 4241 unregister_netdevice(dev); 4242 return err; 4243 } 4244 4245 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[], 4246 struct netlink_ext_ack *extack) 4247 { 4248 u64 csid = MACSEC_DEFAULT_CIPHER_ID; 4249 u8 icv_len = MACSEC_DEFAULT_ICV_LEN; 4250 int flag; 4251 bool es, scb, sci; 4252 4253 if (!data) 4254 return 0; 4255 4256 if (data[IFLA_MACSEC_CIPHER_SUITE]) 4257 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); 4258 4259 if (data[IFLA_MACSEC_ICV_LEN]) { 4260 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 4261 if (icv_len != MACSEC_DEFAULT_ICV_LEN) { 4262 char dummy_key[DEFAULT_SAK_LEN] = { 0 }; 4263 struct crypto_aead *dummy_tfm; 4264 4265 dummy_tfm = macsec_alloc_tfm(dummy_key, 4266 DEFAULT_SAK_LEN, 4267 icv_len); 4268 if (IS_ERR(dummy_tfm)) 4269 return PTR_ERR(dummy_tfm); 4270 crypto_free_aead(dummy_tfm); 4271 } 4272 } 4273 4274 switch (csid) { 4275 case MACSEC_CIPHER_ID_GCM_AES_128: 4276 case MACSEC_CIPHER_ID_GCM_AES_256: 4277 case MACSEC_CIPHER_ID_GCM_AES_XPN_128: 4278 case MACSEC_CIPHER_ID_GCM_AES_XPN_256: 4279 case MACSEC_DEFAULT_CIPHER_ID: 4280 if (icv_len < MACSEC_MIN_ICV_LEN || 4281 icv_len > MACSEC_STD_ICV_LEN) 4282 return -EINVAL; 4283 break; 4284 default: 4285 return -EINVAL; 4286 } 4287 4288 if (data[IFLA_MACSEC_ENCODING_SA]) { 4289 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) 4290 return -EINVAL; 4291 } 4292 4293 for (flag = IFLA_MACSEC_ENCODING_SA + 1; 4294 flag < IFLA_MACSEC_VALIDATION; 4295 flag++) { 4296 if (data[flag]) { 4297 if (nla_get_u8(data[flag]) > 1) 4298 return -EINVAL; 4299 } 4300 } 4301 4302 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; 4303 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; 4304 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; 4305 4306 if ((sci && (scb || es)) || (scb && es)) 4307 return -EINVAL; 4308 4309 if (data[IFLA_MACSEC_VALIDATION] && 4310 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 4311 return -EINVAL; 4312 4313 if ((data[IFLA_MACSEC_REPLAY_PROTECT] && 4314 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && 4315 !data[IFLA_MACSEC_WINDOW]) 4316 return -EINVAL; 4317 4318 return 0; 4319 } 4320 4321 static struct net *macsec_get_link_net(const struct net_device *dev) 4322 { 4323 return dev_net(macsec_priv(dev)->real_dev); 4324 } 4325 4326 struct net_device *macsec_get_real_dev(const struct net_device *dev) 4327 { 4328 return macsec_priv(dev)->real_dev; 4329 } 4330 EXPORT_SYMBOL_GPL(macsec_get_real_dev); 4331 4332 bool macsec_netdev_is_offloaded(struct net_device *dev) 4333 { 4334 return macsec_is_offloaded(macsec_priv(dev)); 4335 } 4336 EXPORT_SYMBOL_GPL(macsec_netdev_is_offloaded); 4337 4338 static size_t macsec_get_size(const struct net_device *dev) 4339 { 4340 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */ 4341 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */ 4342 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */ 4343 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */ 4344 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */ 4345 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */ 4346 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */ 4347 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */ 4348 nla_total_size(1) + /* IFLA_MACSEC_ES */ 4349 nla_total_size(1) + /* IFLA_MACSEC_SCB */ 4350 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */ 4351 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */ 4352 nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */ 4353 0; 4354 } 4355 4356 static int macsec_fill_info(struct sk_buff *skb, 4357 const struct net_device *dev) 4358 { 4359 struct macsec_tx_sc *tx_sc; 4360 struct macsec_dev *macsec; 4361 struct macsec_secy *secy; 4362 u64 csid; 4363 4364 macsec = macsec_priv(dev); 4365 secy = &macsec->secy; 4366 tx_sc = &secy->tx_sc; 4367 4368 switch (secy->key_len) { 4369 case MACSEC_GCM_AES_128_SAK_LEN: 4370 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; 4371 break; 4372 case MACSEC_GCM_AES_256_SAK_LEN: 4373 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; 4374 break; 4375 default: 4376 goto nla_put_failure; 4377 } 4378 4379 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, 4380 IFLA_MACSEC_PAD) || 4381 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 4382 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, 4383 csid, IFLA_MACSEC_PAD) || 4384 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 4385 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 4386 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 4387 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) || 4388 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) || 4389 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || 4390 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || 4391 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || 4392 nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) || 4393 0) 4394 goto nla_put_failure; 4395 4396 if (secy->replay_protect) { 4397 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window)) 4398 goto nla_put_failure; 4399 } 4400 4401 return 0; 4402 4403 nla_put_failure: 4404 return -EMSGSIZE; 4405 } 4406 4407 static struct rtnl_link_ops macsec_link_ops __read_mostly = { 4408 .kind = "macsec", 4409 .priv_size = sizeof(struct macsec_dev), 4410 .maxtype = IFLA_MACSEC_MAX, 4411 .policy = macsec_rtnl_policy, 4412 .setup = macsec_setup, 4413 .validate = macsec_validate_attr, 4414 .newlink = macsec_newlink, 4415 .changelink = macsec_changelink, 4416 .dellink = macsec_dellink, 4417 .get_size = macsec_get_size, 4418 .fill_info = macsec_fill_info, 4419 .get_link_net = macsec_get_link_net, 4420 }; 4421 4422 static bool is_macsec_master(struct net_device *dev) 4423 { 4424 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame; 4425 } 4426 4427 static int macsec_notify(struct notifier_block *this, unsigned long event, 4428 void *ptr) 4429 { 4430 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); 4431 LIST_HEAD(head); 4432 4433 if (!is_macsec_master(real_dev)) 4434 return NOTIFY_DONE; 4435 4436 switch (event) { 4437 case NETDEV_DOWN: 4438 case NETDEV_UP: 4439 case NETDEV_CHANGE: { 4440 struct macsec_dev *m, *n; 4441 struct macsec_rxh_data *rxd; 4442 4443 rxd = macsec_data_rtnl(real_dev); 4444 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 4445 struct net_device *dev = m->secy.netdev; 4446 4447 netif_stacked_transfer_operstate(real_dev, dev); 4448 } 4449 break; 4450 } 4451 case NETDEV_UNREGISTER: { 4452 struct macsec_dev *m, *n; 4453 struct macsec_rxh_data *rxd; 4454 4455 rxd = macsec_data_rtnl(real_dev); 4456 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 4457 macsec_common_dellink(m->secy.netdev, &head); 4458 } 4459 4460 netdev_rx_handler_unregister(real_dev); 4461 kfree(rxd); 4462 4463 unregister_netdevice_many(&head); 4464 break; 4465 } 4466 case NETDEV_CHANGEMTU: { 4467 struct macsec_dev *m; 4468 struct macsec_rxh_data *rxd; 4469 4470 rxd = macsec_data_rtnl(real_dev); 4471 list_for_each_entry(m, &rxd->secys, secys) { 4472 struct net_device *dev = m->secy.netdev; 4473 unsigned int mtu = real_dev->mtu - (m->secy.icv_len + 4474 macsec_extra_len(true)); 4475 4476 if (dev->mtu > mtu) 4477 dev_set_mtu(dev, mtu); 4478 } 4479 } 4480 } 4481 4482 return NOTIFY_OK; 4483 } 4484 4485 static struct notifier_block macsec_notifier = { 4486 .notifier_call = macsec_notify, 4487 }; 4488 4489 static int __init macsec_init(void) 4490 { 4491 int err; 4492 4493 pr_info("MACsec IEEE 802.1AE\n"); 4494 err = register_netdevice_notifier(&macsec_notifier); 4495 if (err) 4496 return err; 4497 4498 err = rtnl_link_register(&macsec_link_ops); 4499 if (err) 4500 goto notifier; 4501 4502 err = genl_register_family(&macsec_fam); 4503 if (err) 4504 goto rtnl; 4505 4506 return 0; 4507 4508 rtnl: 4509 rtnl_link_unregister(&macsec_link_ops); 4510 notifier: 4511 unregister_netdevice_notifier(&macsec_notifier); 4512 return err; 4513 } 4514 4515 static void __exit macsec_exit(void) 4516 { 4517 genl_unregister_family(&macsec_fam); 4518 rtnl_link_unregister(&macsec_link_ops); 4519 unregister_netdevice_notifier(&macsec_notifier); 4520 rcu_barrier(); 4521 } 4522 4523 module_init(macsec_init); 4524 module_exit(macsec_exit); 4525 4526 MODULE_ALIAS_RTNL_LINK("macsec"); 4527 MODULE_ALIAS_GENL_FAMILY("macsec"); 4528 4529 MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); 4530 MODULE_LICENSE("GPL v2"); 4531