1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * drivers/net/macsec.c - MACsec device 4 * 5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> 6 */ 7 8 #include <linux/types.h> 9 #include <linux/skbuff.h> 10 #include <linux/socket.h> 11 #include <linux/module.h> 12 #include <crypto/aead.h> 13 #include <linux/etherdevice.h> 14 #include <linux/netdevice.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/refcount.h> 17 #include <net/genetlink.h> 18 #include <net/sock.h> 19 #include <net/gro_cells.h> 20 #include <net/macsec.h> 21 #include <net/dst_metadata.h> 22 #include <linux/phy.h> 23 #include <linux/byteorder/generic.h> 24 #include <linux/if_arp.h> 25 26 #include <uapi/linux/if_macsec.h> 27 28 /* SecTAG length = macsec_eth_header without the optional SCI */ 29 #define MACSEC_TAG_LEN 6 30 31 struct macsec_eth_header { 32 struct ethhdr eth; 33 /* SecTAG */ 34 u8 tci_an; 35 #if defined(__LITTLE_ENDIAN_BITFIELD) 36 u8 short_length:6, 37 unused:2; 38 #elif defined(__BIG_ENDIAN_BITFIELD) 39 u8 unused:2, 40 short_length:6; 41 #else 42 #error "Please fix <asm/byteorder.h>" 43 #endif 44 __be32 packet_number; 45 u8 secure_channel_id[8]; /* optional */ 46 } __packed; 47 48 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ 49 #define MIN_NON_SHORT_LEN 48 50 51 #define GCM_AES_IV_LEN 12 52 53 #define for_each_rxsc(secy, sc) \ 54 for (sc = rcu_dereference_bh(secy->rx_sc); \ 55 sc; \ 56 sc = rcu_dereference_bh(sc->next)) 57 #define for_each_rxsc_rtnl(secy, sc) \ 58 for (sc = rtnl_dereference(secy->rx_sc); \ 59 sc; \ 60 sc = rtnl_dereference(sc->next)) 61 62 #define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31))) 63 64 struct gcm_iv_xpn { 65 union { 66 u8 short_secure_channel_id[4]; 67 ssci_t ssci; 68 }; 69 __be64 pn; 70 } __packed; 71 72 struct gcm_iv { 73 union { 74 u8 secure_channel_id[8]; 75 sci_t sci; 76 }; 77 __be32 pn; 78 }; 79 80 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT 81 82 struct pcpu_secy_stats { 83 struct macsec_dev_stats stats; 84 struct u64_stats_sync syncp; 85 }; 86 87 /** 88 * struct macsec_dev - private data 89 * @secy: SecY config 90 * @real_dev: pointer to underlying netdevice 91 * @dev_tracker: refcount tracker for @real_dev reference 92 * @stats: MACsec device stats 93 * @secys: linked list of SecY's on the underlying device 94 * @gro_cells: pointer to the Generic Receive Offload cell 95 * @offload: status of offloading on the MACsec device 96 * @insert_tx_tag: when offloading, device requires to insert an 97 * additional tag 98 */ 99 struct macsec_dev { 100 struct macsec_secy secy; 101 struct net_device *real_dev; 102 netdevice_tracker dev_tracker; 103 struct pcpu_secy_stats __percpu *stats; 104 struct list_head secys; 105 struct gro_cells gro_cells; 106 enum macsec_offload offload; 107 bool insert_tx_tag; 108 }; 109 110 /** 111 * struct macsec_rxh_data - rx_handler private argument 112 * @secys: linked list of SecY's on this underlying device 113 */ 114 struct macsec_rxh_data { 115 struct list_head secys; 116 }; 117 118 static struct macsec_dev *macsec_priv(const struct net_device *dev) 119 { 120 return (struct macsec_dev *)netdev_priv(dev); 121 } 122 123 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev) 124 { 125 return rcu_dereference_bh(dev->rx_handler_data); 126 } 127 128 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev) 129 { 130 return rtnl_dereference(dev->rx_handler_data); 131 } 132 133 struct macsec_cb { 134 struct aead_request *req; 135 union { 136 struct macsec_tx_sa *tx_sa; 137 struct macsec_rx_sa *rx_sa; 138 }; 139 u8 assoc_num; 140 bool valid; 141 bool has_sci; 142 }; 143 144 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) 145 { 146 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr); 147 148 if (!sa || !sa->active) 149 return NULL; 150 151 if (!refcount_inc_not_zero(&sa->refcnt)) 152 return NULL; 153 154 return sa; 155 } 156 157 static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc) 158 { 159 struct macsec_rx_sa *sa = NULL; 160 int an; 161 162 for (an = 0; an < MACSEC_NUM_AN; an++) { 163 sa = macsec_rxsa_get(rx_sc->sa[an]); 164 if (sa) 165 break; 166 } 167 return sa; 168 } 169 170 static void free_rx_sc_rcu(struct rcu_head *head) 171 { 172 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); 173 174 free_percpu(rx_sc->stats); 175 kfree(rx_sc); 176 } 177 178 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) 179 { 180 return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL; 181 } 182 183 static void macsec_rxsc_put(struct macsec_rx_sc *sc) 184 { 185 if (refcount_dec_and_test(&sc->refcnt)) 186 call_rcu(&sc->rcu_head, free_rx_sc_rcu); 187 } 188 189 static void free_rxsa(struct rcu_head *head) 190 { 191 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); 192 193 crypto_free_aead(sa->key.tfm); 194 free_percpu(sa->stats); 195 kfree(sa); 196 } 197 198 static void macsec_rxsa_put(struct macsec_rx_sa *sa) 199 { 200 if (refcount_dec_and_test(&sa->refcnt)) 201 call_rcu(&sa->rcu, free_rxsa); 202 } 203 204 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) 205 { 206 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr); 207 208 if (!sa || !sa->active) 209 return NULL; 210 211 if (!refcount_inc_not_zero(&sa->refcnt)) 212 return NULL; 213 214 return sa; 215 } 216 217 static void free_txsa(struct rcu_head *head) 218 { 219 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); 220 221 crypto_free_aead(sa->key.tfm); 222 free_percpu(sa->stats); 223 kfree(sa); 224 } 225 226 static void macsec_txsa_put(struct macsec_tx_sa *sa) 227 { 228 if (refcount_dec_and_test(&sa->refcnt)) 229 call_rcu(&sa->rcu, free_txsa); 230 } 231 232 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) 233 { 234 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb)); 235 return (struct macsec_cb *)skb->cb; 236 } 237 238 #define MACSEC_PORT_SCB (0x0000) 239 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) 240 #define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff) 241 242 #define MACSEC_GCM_AES_128_SAK_LEN 16 243 #define MACSEC_GCM_AES_256_SAK_LEN 32 244 245 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN 246 #define DEFAULT_XPN false 247 #define DEFAULT_SEND_SCI true 248 #define DEFAULT_ENCRYPT false 249 #define DEFAULT_ENCODING_SA 0 250 #define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1)) 251 252 static sci_t make_sci(const u8 *addr, __be16 port) 253 { 254 sci_t sci; 255 256 memcpy(&sci, addr, ETH_ALEN); 257 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port)); 258 259 return sci; 260 } 261 262 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) 263 { 264 sci_t sci; 265 266 if (sci_present) 267 memcpy(&sci, hdr->secure_channel_id, 268 sizeof(hdr->secure_channel_id)); 269 else 270 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); 271 272 return sci; 273 } 274 275 static unsigned int macsec_sectag_len(bool sci_present) 276 { 277 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); 278 } 279 280 static unsigned int macsec_hdr_len(bool sci_present) 281 { 282 return macsec_sectag_len(sci_present) + ETH_HLEN; 283 } 284 285 static unsigned int macsec_extra_len(bool sci_present) 286 { 287 return macsec_sectag_len(sci_present) + sizeof(__be16); 288 } 289 290 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ 291 static void macsec_fill_sectag(struct macsec_eth_header *h, 292 const struct macsec_secy *secy, u32 pn, 293 bool sci_present) 294 { 295 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 296 297 memset(&h->tci_an, 0, macsec_sectag_len(sci_present)); 298 h->eth.h_proto = htons(ETH_P_MACSEC); 299 300 if (sci_present) { 301 h->tci_an |= MACSEC_TCI_SC; 302 memcpy(&h->secure_channel_id, &secy->sci, 303 sizeof(h->secure_channel_id)); 304 } else { 305 if (tx_sc->end_station) 306 h->tci_an |= MACSEC_TCI_ES; 307 if (tx_sc->scb) 308 h->tci_an |= MACSEC_TCI_SCB; 309 } 310 311 h->packet_number = htonl(pn); 312 313 /* with GCM, C/E clear for !encrypt, both set for encrypt */ 314 if (tx_sc->encrypt) 315 h->tci_an |= MACSEC_TCI_CONFID; 316 else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) 317 h->tci_an |= MACSEC_TCI_C; 318 319 h->tci_an |= tx_sc->encoding_sa; 320 } 321 322 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) 323 { 324 if (data_len < MIN_NON_SHORT_LEN) 325 h->short_length = data_len; 326 } 327 328 /* Checks if a MACsec interface is being offloaded to an hardware engine */ 329 static bool macsec_is_offloaded(struct macsec_dev *macsec) 330 { 331 if (macsec->offload == MACSEC_OFFLOAD_MAC || 332 macsec->offload == MACSEC_OFFLOAD_PHY) 333 return true; 334 335 return false; 336 } 337 338 /* Checks if underlying layers implement MACsec offloading functions. */ 339 static bool macsec_check_offload(enum macsec_offload offload, 340 struct macsec_dev *macsec) 341 { 342 if (!macsec || !macsec->real_dev) 343 return false; 344 345 if (offload == MACSEC_OFFLOAD_PHY) 346 return macsec->real_dev->phydev && 347 macsec->real_dev->phydev->macsec_ops; 348 else if (offload == MACSEC_OFFLOAD_MAC) 349 return macsec->real_dev->features & NETIF_F_HW_MACSEC && 350 macsec->real_dev->macsec_ops; 351 352 return false; 353 } 354 355 static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload, 356 struct macsec_dev *macsec, 357 struct macsec_context *ctx) 358 { 359 if (ctx) { 360 memset(ctx, 0, sizeof(*ctx)); 361 ctx->offload = offload; 362 363 if (offload == MACSEC_OFFLOAD_PHY) 364 ctx->phydev = macsec->real_dev->phydev; 365 else if (offload == MACSEC_OFFLOAD_MAC) 366 ctx->netdev = macsec->real_dev; 367 } 368 369 if (offload == MACSEC_OFFLOAD_PHY) 370 return macsec->real_dev->phydev->macsec_ops; 371 else 372 return macsec->real_dev->macsec_ops; 373 } 374 375 /* Returns a pointer to the MACsec ops struct if any and updates the MACsec 376 * context device reference if provided. 377 */ 378 static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec, 379 struct macsec_context *ctx) 380 { 381 if (!macsec_check_offload(macsec->offload, macsec)) 382 return NULL; 383 384 return __macsec_get_ops(macsec->offload, macsec, ctx); 385 } 386 387 /* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */ 388 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn) 389 { 390 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; 391 int len = skb->len - 2 * ETH_ALEN; 392 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; 393 394 /* a) It comprises at least 17 octets */ 395 if (skb->len <= 16) 396 return false; 397 398 /* b) MACsec EtherType: already checked */ 399 400 /* c) V bit is clear */ 401 if (h->tci_an & MACSEC_TCI_VERSION) 402 return false; 403 404 /* d) ES or SCB => !SC */ 405 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && 406 (h->tci_an & MACSEC_TCI_SC)) 407 return false; 408 409 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */ 410 if (h->unused) 411 return false; 412 413 /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */ 414 if (!h->packet_number && !xpn) 415 return false; 416 417 /* length check, f) g) h) i) */ 418 if (h->short_length) 419 return len == extra_len + h->short_length; 420 return len >= extra_len + MIN_NON_SHORT_LEN; 421 } 422 423 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) 424 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN 425 426 static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn, 427 salt_t salt) 428 { 429 struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv; 430 431 gcm_iv->ssci = ssci ^ salt.ssci; 432 gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn; 433 } 434 435 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) 436 { 437 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; 438 439 gcm_iv->sci = sci; 440 gcm_iv->pn = htonl(pn); 441 } 442 443 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) 444 { 445 return (struct macsec_eth_header *)skb_mac_header(skb); 446 } 447 448 static void __macsec_pn_wrapped(struct macsec_secy *secy, 449 struct macsec_tx_sa *tx_sa) 450 { 451 pr_debug("PN wrapped, transitioning to !oper\n"); 452 tx_sa->active = false; 453 if (secy->protect_frames) 454 secy->operational = false; 455 } 456 457 void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa) 458 { 459 spin_lock_bh(&tx_sa->lock); 460 __macsec_pn_wrapped(secy, tx_sa); 461 spin_unlock_bh(&tx_sa->lock); 462 } 463 EXPORT_SYMBOL_GPL(macsec_pn_wrapped); 464 465 static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa, 466 struct macsec_secy *secy) 467 { 468 pn_t pn; 469 470 spin_lock_bh(&tx_sa->lock); 471 472 pn = tx_sa->next_pn_halves; 473 if (secy->xpn) 474 tx_sa->next_pn++; 475 else 476 tx_sa->next_pn_halves.lower++; 477 478 if (tx_sa->next_pn == 0) 479 __macsec_pn_wrapped(secy, tx_sa); 480 spin_unlock_bh(&tx_sa->lock); 481 482 return pn; 483 } 484 485 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) 486 { 487 struct macsec_dev *macsec = netdev_priv(dev); 488 489 skb->dev = macsec->real_dev; 490 skb_reset_mac_header(skb); 491 skb->protocol = eth_hdr(skb)->h_proto; 492 } 493 494 static unsigned int macsec_msdu_len(struct sk_buff *skb) 495 { 496 struct macsec_dev *macsec = macsec_priv(skb->dev); 497 struct macsec_secy *secy = &macsec->secy; 498 bool sci_present = macsec_skb_cb(skb)->has_sci; 499 500 return skb->len - macsec_hdr_len(sci_present) - secy->icv_len; 501 } 502 503 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, 504 struct macsec_tx_sa *tx_sa) 505 { 506 unsigned int msdu_len = macsec_msdu_len(skb); 507 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); 508 509 u64_stats_update_begin(&txsc_stats->syncp); 510 if (tx_sc->encrypt) { 511 txsc_stats->stats.OutOctetsEncrypted += msdu_len; 512 txsc_stats->stats.OutPktsEncrypted++; 513 this_cpu_inc(tx_sa->stats->OutPktsEncrypted); 514 } else { 515 txsc_stats->stats.OutOctetsProtected += msdu_len; 516 txsc_stats->stats.OutPktsProtected++; 517 this_cpu_inc(tx_sa->stats->OutPktsProtected); 518 } 519 u64_stats_update_end(&txsc_stats->syncp); 520 } 521 522 static void count_tx(struct net_device *dev, int ret, int len) 523 { 524 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) 525 dev_sw_netstats_tx_add(dev, 1, len); 526 } 527 528 static void macsec_encrypt_done(void *data, int err) 529 { 530 struct sk_buff *skb = data; 531 struct net_device *dev = skb->dev; 532 struct macsec_dev *macsec = macsec_priv(dev); 533 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; 534 int len, ret; 535 536 aead_request_free(macsec_skb_cb(skb)->req); 537 538 rcu_read_lock_bh(); 539 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 540 /* packet is encrypted/protected so tx_bytes must be calculated */ 541 len = macsec_msdu_len(skb) + 2 * ETH_ALEN; 542 macsec_encrypt_finish(skb, dev); 543 ret = dev_queue_xmit(skb); 544 count_tx(dev, ret, len); 545 rcu_read_unlock_bh(); 546 547 macsec_txsa_put(sa); 548 dev_put(dev); 549 } 550 551 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 552 unsigned char **iv, 553 struct scatterlist **sg, 554 int num_frags) 555 { 556 size_t size, iv_offset, sg_offset; 557 struct aead_request *req; 558 void *tmp; 559 560 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm); 561 iv_offset = size; 562 size += GCM_AES_IV_LEN; 563 564 size = ALIGN(size, __alignof__(struct scatterlist)); 565 sg_offset = size; 566 size += sizeof(struct scatterlist) * num_frags; 567 568 tmp = kmalloc(size, GFP_ATOMIC); 569 if (!tmp) 570 return NULL; 571 572 *iv = (unsigned char *)(tmp + iv_offset); 573 *sg = (struct scatterlist *)(tmp + sg_offset); 574 req = tmp; 575 576 aead_request_set_tfm(req, tfm); 577 578 return req; 579 } 580 581 static struct sk_buff *macsec_encrypt(struct sk_buff *skb, 582 struct net_device *dev) 583 { 584 int ret; 585 struct scatterlist *sg; 586 struct sk_buff *trailer; 587 unsigned char *iv; 588 struct ethhdr *eth; 589 struct macsec_eth_header *hh; 590 size_t unprotected_len; 591 struct aead_request *req; 592 struct macsec_secy *secy; 593 struct macsec_tx_sc *tx_sc; 594 struct macsec_tx_sa *tx_sa; 595 struct macsec_dev *macsec = macsec_priv(dev); 596 bool sci_present; 597 pn_t pn; 598 599 secy = &macsec->secy; 600 tx_sc = &secy->tx_sc; 601 602 /* 10.5.1 TX SA assignment */ 603 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]); 604 if (!tx_sa) { 605 secy->operational = false; 606 kfree_skb(skb); 607 return ERR_PTR(-EINVAL); 608 } 609 610 ret = skb_ensure_writable_head_tail(skb, dev); 611 if (unlikely(ret < 0)) { 612 macsec_txsa_put(tx_sa); 613 kfree_skb(skb); 614 return ERR_PTR(ret); 615 } 616 617 unprotected_len = skb->len; 618 eth = eth_hdr(skb); 619 sci_present = macsec_send_sci(secy); 620 hh = skb_push(skb, macsec_extra_len(sci_present)); 621 memmove(hh, eth, 2 * ETH_ALEN); 622 623 pn = tx_sa_update_pn(tx_sa, secy); 624 if (pn.full64 == 0) { 625 macsec_txsa_put(tx_sa); 626 kfree_skb(skb); 627 return ERR_PTR(-ENOLINK); 628 } 629 macsec_fill_sectag(hh, secy, pn.lower, sci_present); 630 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); 631 632 skb_put(skb, secy->icv_len); 633 634 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { 635 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 636 637 u64_stats_update_begin(&secy_stats->syncp); 638 secy_stats->stats.OutPktsTooLong++; 639 u64_stats_update_end(&secy_stats->syncp); 640 641 macsec_txsa_put(tx_sa); 642 kfree_skb(skb); 643 return ERR_PTR(-EINVAL); 644 } 645 646 ret = skb_cow_data(skb, 0, &trailer); 647 if (unlikely(ret < 0)) { 648 macsec_txsa_put(tx_sa); 649 kfree_skb(skb); 650 return ERR_PTR(ret); 651 } 652 653 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret); 654 if (!req) { 655 macsec_txsa_put(tx_sa); 656 kfree_skb(skb); 657 return ERR_PTR(-ENOMEM); 658 } 659 660 if (secy->xpn) 661 macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt); 662 else 663 macsec_fill_iv(iv, secy->sci, pn.lower); 664 665 sg_init_table(sg, ret); 666 ret = skb_to_sgvec(skb, sg, 0, skb->len); 667 if (unlikely(ret < 0)) { 668 aead_request_free(req); 669 macsec_txsa_put(tx_sa); 670 kfree_skb(skb); 671 return ERR_PTR(ret); 672 } 673 674 if (tx_sc->encrypt) { 675 int len = skb->len - macsec_hdr_len(sci_present) - 676 secy->icv_len; 677 aead_request_set_crypt(req, sg, sg, len, iv); 678 aead_request_set_ad(req, macsec_hdr_len(sci_present)); 679 } else { 680 aead_request_set_crypt(req, sg, sg, 0, iv); 681 aead_request_set_ad(req, skb->len - secy->icv_len); 682 } 683 684 macsec_skb_cb(skb)->req = req; 685 macsec_skb_cb(skb)->tx_sa = tx_sa; 686 macsec_skb_cb(skb)->has_sci = sci_present; 687 aead_request_set_callback(req, 0, macsec_encrypt_done, skb); 688 689 dev_hold(skb->dev); 690 ret = crypto_aead_encrypt(req); 691 if (ret == -EINPROGRESS) { 692 return ERR_PTR(ret); 693 } else if (ret != 0) { 694 dev_put(skb->dev); 695 kfree_skb(skb); 696 aead_request_free(req); 697 macsec_txsa_put(tx_sa); 698 return ERR_PTR(-EINVAL); 699 } 700 701 dev_put(skb->dev); 702 aead_request_free(req); 703 macsec_txsa_put(tx_sa); 704 705 return skb; 706 } 707 708 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn) 709 { 710 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 711 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats); 712 struct macsec_eth_header *hdr = macsec_ethhdr(skb); 713 u32 lowest_pn = 0; 714 715 spin_lock(&rx_sa->lock); 716 if (rx_sa->next_pn_halves.lower >= secy->replay_window) 717 lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window; 718 719 /* Now perform replay protection check again 720 * (see IEEE 802.1AE-2006 figure 10-5) 721 */ 722 if (secy->replay_protect && pn < lowest_pn && 723 (!secy->xpn || pn_same_half(pn, lowest_pn))) { 724 spin_unlock(&rx_sa->lock); 725 u64_stats_update_begin(&rxsc_stats->syncp); 726 rxsc_stats->stats.InPktsLate++; 727 u64_stats_update_end(&rxsc_stats->syncp); 728 DEV_STATS_INC(secy->netdev, rx_dropped); 729 return false; 730 } 731 732 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { 733 unsigned int msdu_len = macsec_msdu_len(skb); 734 u64_stats_update_begin(&rxsc_stats->syncp); 735 if (hdr->tci_an & MACSEC_TCI_E) 736 rxsc_stats->stats.InOctetsDecrypted += msdu_len; 737 else 738 rxsc_stats->stats.InOctetsValidated += msdu_len; 739 u64_stats_update_end(&rxsc_stats->syncp); 740 } 741 742 if (!macsec_skb_cb(skb)->valid) { 743 spin_unlock(&rx_sa->lock); 744 745 /* 10.6.5 */ 746 if (hdr->tci_an & MACSEC_TCI_C || 747 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 748 u64_stats_update_begin(&rxsc_stats->syncp); 749 rxsc_stats->stats.InPktsNotValid++; 750 u64_stats_update_end(&rxsc_stats->syncp); 751 this_cpu_inc(rx_sa->stats->InPktsNotValid); 752 DEV_STATS_INC(secy->netdev, rx_errors); 753 return false; 754 } 755 756 u64_stats_update_begin(&rxsc_stats->syncp); 757 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) { 758 rxsc_stats->stats.InPktsInvalid++; 759 this_cpu_inc(rx_sa->stats->InPktsInvalid); 760 } else if (pn < lowest_pn) { 761 rxsc_stats->stats.InPktsDelayed++; 762 } else { 763 rxsc_stats->stats.InPktsUnchecked++; 764 } 765 u64_stats_update_end(&rxsc_stats->syncp); 766 } else { 767 u64_stats_update_begin(&rxsc_stats->syncp); 768 if (pn < lowest_pn) { 769 rxsc_stats->stats.InPktsDelayed++; 770 } else { 771 rxsc_stats->stats.InPktsOK++; 772 this_cpu_inc(rx_sa->stats->InPktsOK); 773 } 774 u64_stats_update_end(&rxsc_stats->syncp); 775 776 // Instead of "pn >=" - to support pn overflow in xpn 777 if (pn + 1 > rx_sa->next_pn_halves.lower) { 778 rx_sa->next_pn_halves.lower = pn + 1; 779 } else if (secy->xpn && 780 !pn_same_half(pn, rx_sa->next_pn_halves.lower)) { 781 rx_sa->next_pn_halves.upper++; 782 rx_sa->next_pn_halves.lower = pn + 1; 783 } 784 785 spin_unlock(&rx_sa->lock); 786 } 787 788 return true; 789 } 790 791 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) 792 { 793 skb->pkt_type = PACKET_HOST; 794 skb->protocol = eth_type_trans(skb, dev); 795 796 skb_reset_network_header(skb); 797 if (!skb_transport_header_was_set(skb)) 798 skb_reset_transport_header(skb); 799 skb_reset_mac_len(skb); 800 } 801 802 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) 803 { 804 skb->ip_summed = CHECKSUM_NONE; 805 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); 806 skb_pull(skb, hdr_len); 807 pskb_trim_unique(skb, skb->len - icv_len); 808 } 809 810 static void count_rx(struct net_device *dev, int len) 811 { 812 dev_sw_netstats_rx_add(dev, len); 813 } 814 815 static void macsec_decrypt_done(void *data, int err) 816 { 817 struct sk_buff *skb = data; 818 struct net_device *dev = skb->dev; 819 struct macsec_dev *macsec = macsec_priv(dev); 820 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 821 struct macsec_rx_sc *rx_sc = rx_sa->sc; 822 int len; 823 u32 pn; 824 825 aead_request_free(macsec_skb_cb(skb)->req); 826 827 if (!err) 828 macsec_skb_cb(skb)->valid = true; 829 830 rcu_read_lock_bh(); 831 pn = ntohl(macsec_ethhdr(skb)->packet_number); 832 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { 833 rcu_read_unlock_bh(); 834 kfree_skb(skb); 835 goto out; 836 } 837 838 macsec_finalize_skb(skb, macsec->secy.icv_len, 839 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 840 len = skb->len; 841 macsec_reset_skb(skb, macsec->secy.netdev); 842 843 if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) 844 count_rx(dev, len); 845 846 rcu_read_unlock_bh(); 847 848 out: 849 macsec_rxsa_put(rx_sa); 850 macsec_rxsc_put(rx_sc); 851 dev_put(dev); 852 } 853 854 static struct sk_buff *macsec_decrypt(struct sk_buff *skb, 855 struct net_device *dev, 856 struct macsec_rx_sa *rx_sa, 857 sci_t sci, 858 struct macsec_secy *secy) 859 { 860 int ret; 861 struct scatterlist *sg; 862 struct sk_buff *trailer; 863 unsigned char *iv; 864 struct aead_request *req; 865 struct macsec_eth_header *hdr; 866 u32 hdr_pn; 867 u16 icv_len = secy->icv_len; 868 869 macsec_skb_cb(skb)->valid = false; 870 skb = skb_share_check(skb, GFP_ATOMIC); 871 if (!skb) 872 return ERR_PTR(-ENOMEM); 873 874 ret = skb_cow_data(skb, 0, &trailer); 875 if (unlikely(ret < 0)) { 876 kfree_skb(skb); 877 return ERR_PTR(ret); 878 } 879 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret); 880 if (!req) { 881 kfree_skb(skb); 882 return ERR_PTR(-ENOMEM); 883 } 884 885 hdr = (struct macsec_eth_header *)skb->data; 886 hdr_pn = ntohl(hdr->packet_number); 887 888 if (secy->xpn) { 889 pn_t recovered_pn = rx_sa->next_pn_halves; 890 891 recovered_pn.lower = hdr_pn; 892 if (hdr_pn < rx_sa->next_pn_halves.lower && 893 !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower)) 894 recovered_pn.upper++; 895 896 macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64, 897 rx_sa->key.salt); 898 } else { 899 macsec_fill_iv(iv, sci, hdr_pn); 900 } 901 902 sg_init_table(sg, ret); 903 ret = skb_to_sgvec(skb, sg, 0, skb->len); 904 if (unlikely(ret < 0)) { 905 aead_request_free(req); 906 kfree_skb(skb); 907 return ERR_PTR(ret); 908 } 909 910 if (hdr->tci_an & MACSEC_TCI_E) { 911 /* confidentiality: ethernet + macsec header 912 * authenticated, encrypted payload 913 */ 914 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci); 915 916 aead_request_set_crypt(req, sg, sg, len, iv); 917 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci)); 918 skb = skb_unshare(skb, GFP_ATOMIC); 919 if (!skb) { 920 aead_request_free(req); 921 return ERR_PTR(-ENOMEM); 922 } 923 } else { 924 /* integrity only: all headers + data authenticated */ 925 aead_request_set_crypt(req, sg, sg, icv_len, iv); 926 aead_request_set_ad(req, skb->len - icv_len); 927 } 928 929 macsec_skb_cb(skb)->req = req; 930 skb->dev = dev; 931 aead_request_set_callback(req, 0, macsec_decrypt_done, skb); 932 933 dev_hold(dev); 934 ret = crypto_aead_decrypt(req); 935 if (ret == -EINPROGRESS) { 936 return ERR_PTR(ret); 937 } else if (ret != 0) { 938 /* decryption/authentication failed 939 * 10.6 if validateFrames is disabled, deliver anyway 940 */ 941 if (ret != -EBADMSG) { 942 kfree_skb(skb); 943 skb = ERR_PTR(ret); 944 } 945 } else { 946 macsec_skb_cb(skb)->valid = true; 947 } 948 dev_put(dev); 949 950 aead_request_free(req); 951 952 return skb; 953 } 954 955 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci) 956 { 957 struct macsec_rx_sc *rx_sc; 958 959 for_each_rxsc(secy, rx_sc) { 960 if (rx_sc->sci == sci) 961 return rx_sc; 962 } 963 964 return NULL; 965 } 966 967 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) 968 { 969 struct macsec_rx_sc *rx_sc; 970 971 for_each_rxsc_rtnl(secy, rx_sc) { 972 if (rx_sc->sci == sci) 973 return rx_sc; 974 } 975 976 return NULL; 977 } 978 979 static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) 980 { 981 /* Deliver to the uncontrolled port by default */ 982 enum rx_handler_result ret = RX_HANDLER_PASS; 983 struct ethhdr *hdr = eth_hdr(skb); 984 struct metadata_dst *md_dst; 985 struct macsec_rxh_data *rxd; 986 struct macsec_dev *macsec; 987 988 rcu_read_lock(); 989 rxd = macsec_data_rcu(skb->dev); 990 md_dst = skb_metadata_dst(skb); 991 992 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 993 struct sk_buff *nskb; 994 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 995 struct net_device *ndev = macsec->secy.netdev; 996 997 /* If h/w offloading is enabled, HW decodes frames and strips 998 * the SecTAG, so we have to deduce which port to deliver to. 999 */ 1000 if (macsec_is_offloaded(macsec) && netif_running(ndev)) { 1001 struct macsec_rx_sc *rx_sc = NULL; 1002 1003 if (md_dst && md_dst->type == METADATA_MACSEC) 1004 rx_sc = find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci); 1005 1006 if (md_dst && md_dst->type == METADATA_MACSEC && !rx_sc) 1007 continue; 1008 1009 if (ether_addr_equal_64bits(hdr->h_dest, 1010 ndev->dev_addr)) { 1011 /* exact match, divert skb to this port */ 1012 skb->dev = ndev; 1013 skb->pkt_type = PACKET_HOST; 1014 ret = RX_HANDLER_ANOTHER; 1015 goto out; 1016 } else if (is_multicast_ether_addr_64bits( 1017 hdr->h_dest)) { 1018 /* multicast frame, deliver on this port too */ 1019 nskb = skb_clone(skb, GFP_ATOMIC); 1020 if (!nskb) 1021 break; 1022 1023 nskb->dev = ndev; 1024 if (ether_addr_equal_64bits(hdr->h_dest, 1025 ndev->broadcast)) 1026 nskb->pkt_type = PACKET_BROADCAST; 1027 else 1028 nskb->pkt_type = PACKET_MULTICAST; 1029 1030 __netif_rx(nskb); 1031 } else if (rx_sc || ndev->flags & IFF_PROMISC) { 1032 skb->dev = ndev; 1033 skb->pkt_type = PACKET_HOST; 1034 ret = RX_HANDLER_ANOTHER; 1035 goto out; 1036 } 1037 1038 continue; 1039 } 1040 1041 /* 10.6 If the management control validateFrames is not 1042 * Strict, frames without a SecTAG are received, counted, and 1043 * delivered to the Controlled Port 1044 */ 1045 if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1046 u64_stats_update_begin(&secy_stats->syncp); 1047 secy_stats->stats.InPktsNoTag++; 1048 u64_stats_update_end(&secy_stats->syncp); 1049 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); 1050 continue; 1051 } 1052 1053 /* deliver on this port */ 1054 nskb = skb_clone(skb, GFP_ATOMIC); 1055 if (!nskb) 1056 break; 1057 1058 nskb->dev = ndev; 1059 1060 if (__netif_rx(nskb) == NET_RX_SUCCESS) { 1061 u64_stats_update_begin(&secy_stats->syncp); 1062 secy_stats->stats.InPktsUntagged++; 1063 u64_stats_update_end(&secy_stats->syncp); 1064 } 1065 } 1066 1067 out: 1068 rcu_read_unlock(); 1069 return ret; 1070 } 1071 1072 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) 1073 { 1074 struct sk_buff *skb = *pskb; 1075 struct net_device *dev = skb->dev; 1076 struct macsec_eth_header *hdr; 1077 struct macsec_secy *secy = NULL; 1078 struct macsec_rx_sc *rx_sc; 1079 struct macsec_rx_sa *rx_sa; 1080 struct macsec_rxh_data *rxd; 1081 struct macsec_dev *macsec; 1082 unsigned int len; 1083 sci_t sci; 1084 u32 hdr_pn; 1085 bool cbit; 1086 struct pcpu_rx_sc_stats *rxsc_stats; 1087 struct pcpu_secy_stats *secy_stats; 1088 bool pulled_sci; 1089 int ret; 1090 1091 if (skb_headroom(skb) < ETH_HLEN) 1092 goto drop_direct; 1093 1094 hdr = macsec_ethhdr(skb); 1095 if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) 1096 return handle_not_macsec(skb); 1097 1098 skb = skb_unshare(skb, GFP_ATOMIC); 1099 *pskb = skb; 1100 if (!skb) 1101 return RX_HANDLER_CONSUMED; 1102 1103 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); 1104 if (!pulled_sci) { 1105 if (!pskb_may_pull(skb, macsec_extra_len(false))) 1106 goto drop_direct; 1107 } 1108 1109 hdr = macsec_ethhdr(skb); 1110 1111 /* Frames with a SecTAG that has the TCI E bit set but the C 1112 * bit clear are discarded, as this reserved encoding is used 1113 * to identify frames with a SecTAG that are not to be 1114 * delivered to the Controlled Port. 1115 */ 1116 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E) 1117 return RX_HANDLER_PASS; 1118 1119 /* now, pull the extra length */ 1120 if (hdr->tci_an & MACSEC_TCI_SC) { 1121 if (!pulled_sci) 1122 goto drop_direct; 1123 } 1124 1125 /* ethernet header is part of crypto processing */ 1126 skb_push(skb, ETH_HLEN); 1127 1128 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); 1129 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; 1130 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); 1131 1132 rcu_read_lock(); 1133 rxd = macsec_data_rcu(skb->dev); 1134 1135 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1136 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); 1137 1138 sc = sc ? macsec_rxsc_get(sc) : NULL; 1139 1140 if (sc) { 1141 secy = &macsec->secy; 1142 rx_sc = sc; 1143 break; 1144 } 1145 } 1146 1147 if (!secy) 1148 goto nosci; 1149 1150 dev = secy->netdev; 1151 macsec = macsec_priv(dev); 1152 secy_stats = this_cpu_ptr(macsec->stats); 1153 rxsc_stats = this_cpu_ptr(rx_sc->stats); 1154 1155 if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) { 1156 u64_stats_update_begin(&secy_stats->syncp); 1157 secy_stats->stats.InPktsBadTag++; 1158 u64_stats_update_end(&secy_stats->syncp); 1159 DEV_STATS_INC(secy->netdev, rx_errors); 1160 goto drop_nosa; 1161 } 1162 1163 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]); 1164 if (!rx_sa) { 1165 /* 10.6.1 if the SA is not in use */ 1166 1167 /* If validateFrames is Strict or the C bit in the 1168 * SecTAG is set, discard 1169 */ 1170 struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc); 1171 if (hdr->tci_an & MACSEC_TCI_C || 1172 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 1173 u64_stats_update_begin(&rxsc_stats->syncp); 1174 rxsc_stats->stats.InPktsNotUsingSA++; 1175 u64_stats_update_end(&rxsc_stats->syncp); 1176 DEV_STATS_INC(secy->netdev, rx_errors); 1177 if (active_rx_sa) 1178 this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA); 1179 goto drop_nosa; 1180 } 1181 1182 /* not Strict, the frame (with the SecTAG and ICV 1183 * removed) is delivered to the Controlled Port. 1184 */ 1185 u64_stats_update_begin(&rxsc_stats->syncp); 1186 rxsc_stats->stats.InPktsUnusedSA++; 1187 u64_stats_update_end(&rxsc_stats->syncp); 1188 if (active_rx_sa) 1189 this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA); 1190 goto deliver; 1191 } 1192 1193 /* First, PN check to avoid decrypting obviously wrong packets */ 1194 hdr_pn = ntohl(hdr->packet_number); 1195 if (secy->replay_protect) { 1196 bool late; 1197 1198 spin_lock(&rx_sa->lock); 1199 late = rx_sa->next_pn_halves.lower >= secy->replay_window && 1200 hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window); 1201 1202 if (secy->xpn) 1203 late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn); 1204 spin_unlock(&rx_sa->lock); 1205 1206 if (late) { 1207 u64_stats_update_begin(&rxsc_stats->syncp); 1208 rxsc_stats->stats.InPktsLate++; 1209 u64_stats_update_end(&rxsc_stats->syncp); 1210 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); 1211 goto drop; 1212 } 1213 } 1214 1215 macsec_skb_cb(skb)->rx_sa = rx_sa; 1216 1217 /* Disabled && !changed text => skip validation */ 1218 if (hdr->tci_an & MACSEC_TCI_C || 1219 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1220 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1221 1222 if (IS_ERR(skb)) { 1223 /* the decrypt callback needs the reference */ 1224 if (PTR_ERR(skb) != -EINPROGRESS) { 1225 macsec_rxsa_put(rx_sa); 1226 macsec_rxsc_put(rx_sc); 1227 } 1228 rcu_read_unlock(); 1229 *pskb = NULL; 1230 return RX_HANDLER_CONSUMED; 1231 } 1232 1233 if (!macsec_post_decrypt(skb, secy, hdr_pn)) 1234 goto drop; 1235 1236 deliver: 1237 macsec_finalize_skb(skb, secy->icv_len, 1238 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1239 len = skb->len; 1240 macsec_reset_skb(skb, secy->netdev); 1241 1242 if (rx_sa) 1243 macsec_rxsa_put(rx_sa); 1244 macsec_rxsc_put(rx_sc); 1245 1246 skb_orphan(skb); 1247 ret = gro_cells_receive(&macsec->gro_cells, skb); 1248 if (ret == NET_RX_SUCCESS) 1249 count_rx(dev, len); 1250 else 1251 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); 1252 1253 rcu_read_unlock(); 1254 1255 *pskb = NULL; 1256 return RX_HANDLER_CONSUMED; 1257 1258 drop: 1259 macsec_rxsa_put(rx_sa); 1260 drop_nosa: 1261 macsec_rxsc_put(rx_sc); 1262 rcu_read_unlock(); 1263 drop_direct: 1264 kfree_skb(skb); 1265 *pskb = NULL; 1266 return RX_HANDLER_CONSUMED; 1267 1268 nosci: 1269 /* 10.6.1 if the SC is not found */ 1270 cbit = !!(hdr->tci_an & MACSEC_TCI_C); 1271 if (!cbit) 1272 macsec_finalize_skb(skb, MACSEC_DEFAULT_ICV_LEN, 1273 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1274 1275 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1276 struct sk_buff *nskb; 1277 1278 secy_stats = this_cpu_ptr(macsec->stats); 1279 1280 /* If validateFrames is Strict or the C bit in the 1281 * SecTAG is set, discard 1282 */ 1283 if (cbit || 1284 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1285 u64_stats_update_begin(&secy_stats->syncp); 1286 secy_stats->stats.InPktsNoSCI++; 1287 u64_stats_update_end(&secy_stats->syncp); 1288 DEV_STATS_INC(macsec->secy.netdev, rx_errors); 1289 continue; 1290 } 1291 1292 /* not strict, the frame (with the SecTAG and ICV 1293 * removed) is delivered to the Controlled Port. 1294 */ 1295 nskb = skb_clone(skb, GFP_ATOMIC); 1296 if (!nskb) 1297 break; 1298 1299 macsec_reset_skb(nskb, macsec->secy.netdev); 1300 1301 ret = __netif_rx(nskb); 1302 if (ret == NET_RX_SUCCESS) { 1303 u64_stats_update_begin(&secy_stats->syncp); 1304 secy_stats->stats.InPktsUnknownSCI++; 1305 u64_stats_update_end(&secy_stats->syncp); 1306 } else { 1307 DEV_STATS_INC(macsec->secy.netdev, rx_dropped); 1308 } 1309 } 1310 1311 rcu_read_unlock(); 1312 *pskb = skb; 1313 return RX_HANDLER_PASS; 1314 } 1315 1316 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) 1317 { 1318 struct crypto_aead *tfm; 1319 int ret; 1320 1321 tfm = crypto_alloc_aead("gcm(aes)", 0, 0); 1322 1323 if (IS_ERR(tfm)) 1324 return tfm; 1325 1326 ret = crypto_aead_setkey(tfm, key, key_len); 1327 if (ret < 0) 1328 goto fail; 1329 1330 ret = crypto_aead_setauthsize(tfm, icv_len); 1331 if (ret < 0) 1332 goto fail; 1333 1334 return tfm; 1335 fail: 1336 crypto_free_aead(tfm); 1337 return ERR_PTR(ret); 1338 } 1339 1340 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, 1341 int icv_len) 1342 { 1343 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); 1344 if (!rx_sa->stats) 1345 return -ENOMEM; 1346 1347 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1348 if (IS_ERR(rx_sa->key.tfm)) { 1349 free_percpu(rx_sa->stats); 1350 return PTR_ERR(rx_sa->key.tfm); 1351 } 1352 1353 rx_sa->ssci = MACSEC_UNDEF_SSCI; 1354 rx_sa->active = false; 1355 rx_sa->next_pn = 1; 1356 refcount_set(&rx_sa->refcnt, 1); 1357 spin_lock_init(&rx_sa->lock); 1358 1359 return 0; 1360 } 1361 1362 static void clear_rx_sa(struct macsec_rx_sa *rx_sa) 1363 { 1364 rx_sa->active = false; 1365 1366 macsec_rxsa_put(rx_sa); 1367 } 1368 1369 static void free_rx_sc(struct macsec_rx_sc *rx_sc) 1370 { 1371 int i; 1372 1373 for (i = 0; i < MACSEC_NUM_AN; i++) { 1374 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]); 1375 1376 RCU_INIT_POINTER(rx_sc->sa[i], NULL); 1377 if (sa) 1378 clear_rx_sa(sa); 1379 } 1380 1381 macsec_rxsc_put(rx_sc); 1382 } 1383 1384 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) 1385 { 1386 struct macsec_rx_sc *rx_sc, __rcu **rx_scp; 1387 1388 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp); 1389 rx_sc; 1390 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) { 1391 if (rx_sc->sci == sci) { 1392 if (rx_sc->active) 1393 secy->n_rx_sc--; 1394 rcu_assign_pointer(*rx_scp, rx_sc->next); 1395 return rx_sc; 1396 } 1397 } 1398 1399 return NULL; 1400 } 1401 1402 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci, 1403 bool active) 1404 { 1405 struct macsec_rx_sc *rx_sc; 1406 struct macsec_dev *macsec; 1407 struct net_device *real_dev = macsec_priv(dev)->real_dev; 1408 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 1409 struct macsec_secy *secy; 1410 1411 list_for_each_entry(macsec, &rxd->secys, secys) { 1412 if (find_rx_sc_rtnl(&macsec->secy, sci)) 1413 return ERR_PTR(-EEXIST); 1414 } 1415 1416 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); 1417 if (!rx_sc) 1418 return ERR_PTR(-ENOMEM); 1419 1420 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats); 1421 if (!rx_sc->stats) { 1422 kfree(rx_sc); 1423 return ERR_PTR(-ENOMEM); 1424 } 1425 1426 rx_sc->sci = sci; 1427 rx_sc->active = active; 1428 refcount_set(&rx_sc->refcnt, 1); 1429 1430 secy = &macsec_priv(dev)->secy; 1431 rcu_assign_pointer(rx_sc->next, secy->rx_sc); 1432 rcu_assign_pointer(secy->rx_sc, rx_sc); 1433 1434 if (rx_sc->active) 1435 secy->n_rx_sc++; 1436 1437 return rx_sc; 1438 } 1439 1440 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, 1441 int icv_len) 1442 { 1443 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); 1444 if (!tx_sa->stats) 1445 return -ENOMEM; 1446 1447 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1448 if (IS_ERR(tx_sa->key.tfm)) { 1449 free_percpu(tx_sa->stats); 1450 return PTR_ERR(tx_sa->key.tfm); 1451 } 1452 1453 tx_sa->ssci = MACSEC_UNDEF_SSCI; 1454 tx_sa->active = false; 1455 refcount_set(&tx_sa->refcnt, 1); 1456 spin_lock_init(&tx_sa->lock); 1457 1458 return 0; 1459 } 1460 1461 static void clear_tx_sa(struct macsec_tx_sa *tx_sa) 1462 { 1463 tx_sa->active = false; 1464 1465 macsec_txsa_put(tx_sa); 1466 } 1467 1468 static struct genl_family macsec_fam; 1469 1470 static struct net_device *get_dev_from_nl(struct net *net, 1471 struct nlattr **attrs) 1472 { 1473 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]); 1474 struct net_device *dev; 1475 1476 dev = __dev_get_by_index(net, ifindex); 1477 if (!dev) 1478 return ERR_PTR(-ENODEV); 1479 1480 if (!netif_is_macsec(dev)) 1481 return ERR_PTR(-ENODEV); 1482 1483 return dev; 1484 } 1485 1486 static enum macsec_offload nla_get_offload(const struct nlattr *nla) 1487 { 1488 return (__force enum macsec_offload)nla_get_u8(nla); 1489 } 1490 1491 static sci_t nla_get_sci(const struct nlattr *nla) 1492 { 1493 return (__force sci_t)nla_get_u64(nla); 1494 } 1495 1496 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, 1497 int padattr) 1498 { 1499 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); 1500 } 1501 1502 static ssci_t nla_get_ssci(const struct nlattr *nla) 1503 { 1504 return (__force ssci_t)nla_get_u32(nla); 1505 } 1506 1507 static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value) 1508 { 1509 return nla_put_u32(skb, attrtype, (__force u64)value); 1510 } 1511 1512 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, 1513 struct nlattr **attrs, 1514 struct nlattr **tb_sa, 1515 struct net_device **devp, 1516 struct macsec_secy **secyp, 1517 struct macsec_tx_sc **scp, 1518 u8 *assoc_num) 1519 { 1520 struct net_device *dev; 1521 struct macsec_secy *secy; 1522 struct macsec_tx_sc *tx_sc; 1523 struct macsec_tx_sa *tx_sa; 1524 1525 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1526 return ERR_PTR(-EINVAL); 1527 1528 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1529 1530 dev = get_dev_from_nl(net, attrs); 1531 if (IS_ERR(dev)) 1532 return ERR_CAST(dev); 1533 1534 if (*assoc_num >= MACSEC_NUM_AN) 1535 return ERR_PTR(-EINVAL); 1536 1537 secy = &macsec_priv(dev)->secy; 1538 tx_sc = &secy->tx_sc; 1539 1540 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]); 1541 if (!tx_sa) 1542 return ERR_PTR(-ENODEV); 1543 1544 *devp = dev; 1545 *scp = tx_sc; 1546 *secyp = secy; 1547 return tx_sa; 1548 } 1549 1550 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net, 1551 struct nlattr **attrs, 1552 struct nlattr **tb_rxsc, 1553 struct net_device **devp, 1554 struct macsec_secy **secyp) 1555 { 1556 struct net_device *dev; 1557 struct macsec_secy *secy; 1558 struct macsec_rx_sc *rx_sc; 1559 sci_t sci; 1560 1561 dev = get_dev_from_nl(net, attrs); 1562 if (IS_ERR(dev)) 1563 return ERR_CAST(dev); 1564 1565 secy = &macsec_priv(dev)->secy; 1566 1567 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1568 return ERR_PTR(-EINVAL); 1569 1570 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1571 rx_sc = find_rx_sc_rtnl(secy, sci); 1572 if (!rx_sc) 1573 return ERR_PTR(-ENODEV); 1574 1575 *secyp = secy; 1576 *devp = dev; 1577 1578 return rx_sc; 1579 } 1580 1581 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, 1582 struct nlattr **attrs, 1583 struct nlattr **tb_rxsc, 1584 struct nlattr **tb_sa, 1585 struct net_device **devp, 1586 struct macsec_secy **secyp, 1587 struct macsec_rx_sc **scp, 1588 u8 *assoc_num) 1589 { 1590 struct macsec_rx_sc *rx_sc; 1591 struct macsec_rx_sa *rx_sa; 1592 1593 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1594 return ERR_PTR(-EINVAL); 1595 1596 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1597 if (*assoc_num >= MACSEC_NUM_AN) 1598 return ERR_PTR(-EINVAL); 1599 1600 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); 1601 if (IS_ERR(rx_sc)) 1602 return ERR_CAST(rx_sc); 1603 1604 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]); 1605 if (!rx_sa) 1606 return ERR_PTR(-ENODEV); 1607 1608 *scp = rx_sc; 1609 return rx_sa; 1610 } 1611 1612 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { 1613 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, 1614 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, 1615 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, 1616 [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED }, 1617 }; 1618 1619 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { 1620 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, 1621 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, 1622 }; 1623 1624 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { 1625 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, 1626 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, 1627 [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4), 1628 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, 1629 .len = MACSEC_KEYID_LEN, }, 1630 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1631 .len = MACSEC_MAX_KEY_LEN, }, 1632 [MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 }, 1633 [MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY, 1634 .len = MACSEC_SALT_LEN, }, 1635 }; 1636 1637 static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = { 1638 [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 }, 1639 }; 1640 1641 /* Offloads an operation to a device driver */ 1642 static int macsec_offload(int (* const func)(struct macsec_context *), 1643 struct macsec_context *ctx) 1644 { 1645 int ret; 1646 1647 if (unlikely(!func)) 1648 return 0; 1649 1650 if (ctx->offload == MACSEC_OFFLOAD_PHY) 1651 mutex_lock(&ctx->phydev->lock); 1652 1653 ret = (*func)(ctx); 1654 1655 if (ctx->offload == MACSEC_OFFLOAD_PHY) 1656 mutex_unlock(&ctx->phydev->lock); 1657 1658 return ret; 1659 } 1660 1661 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) 1662 { 1663 if (!attrs[MACSEC_ATTR_SA_CONFIG]) 1664 return -EINVAL; 1665 1666 if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL)) 1667 return -EINVAL; 1668 1669 return 0; 1670 } 1671 1672 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) 1673 { 1674 if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) 1675 return -EINVAL; 1676 1677 if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL)) 1678 return -EINVAL; 1679 1680 return 0; 1681 } 1682 1683 static bool validate_add_rxsa(struct nlattr **attrs) 1684 { 1685 if (!attrs[MACSEC_SA_ATTR_AN] || 1686 !attrs[MACSEC_SA_ATTR_KEY] || 1687 !attrs[MACSEC_SA_ATTR_KEYID]) 1688 return false; 1689 1690 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1691 return false; 1692 1693 if (attrs[MACSEC_SA_ATTR_PN] && 1694 nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 1695 return false; 1696 1697 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1698 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1699 return false; 1700 } 1701 1702 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1703 return false; 1704 1705 return true; 1706 } 1707 1708 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) 1709 { 1710 struct net_device *dev; 1711 struct nlattr **attrs = info->attrs; 1712 struct macsec_secy *secy; 1713 struct macsec_rx_sc *rx_sc; 1714 struct macsec_rx_sa *rx_sa; 1715 unsigned char assoc_num; 1716 int pn_len; 1717 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1718 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1719 int err; 1720 1721 if (!attrs[MACSEC_ATTR_IFINDEX]) 1722 return -EINVAL; 1723 1724 if (parse_sa_config(attrs, tb_sa)) 1725 return -EINVAL; 1726 1727 if (parse_rxsc_config(attrs, tb_rxsc)) 1728 return -EINVAL; 1729 1730 if (!validate_add_rxsa(tb_sa)) 1731 return -EINVAL; 1732 1733 rtnl_lock(); 1734 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 1735 if (IS_ERR(rx_sc)) { 1736 rtnl_unlock(); 1737 return PTR_ERR(rx_sc); 1738 } 1739 1740 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1741 1742 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1743 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n", 1744 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1745 rtnl_unlock(); 1746 return -EINVAL; 1747 } 1748 1749 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 1750 if (tb_sa[MACSEC_SA_ATTR_PN] && 1751 nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 1752 pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n", 1753 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 1754 rtnl_unlock(); 1755 return -EINVAL; 1756 } 1757 1758 if (secy->xpn) { 1759 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { 1760 rtnl_unlock(); 1761 return -EINVAL; 1762 } 1763 1764 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { 1765 pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n", 1766 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), 1767 MACSEC_SALT_LEN); 1768 rtnl_unlock(); 1769 return -EINVAL; 1770 } 1771 } 1772 1773 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); 1774 if (rx_sa) { 1775 rtnl_unlock(); 1776 return -EBUSY; 1777 } 1778 1779 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1780 if (!rx_sa) { 1781 rtnl_unlock(); 1782 return -ENOMEM; 1783 } 1784 1785 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1786 secy->key_len, secy->icv_len); 1787 if (err < 0) { 1788 kfree(rx_sa); 1789 rtnl_unlock(); 1790 return err; 1791 } 1792 1793 if (tb_sa[MACSEC_SA_ATTR_PN]) { 1794 spin_lock_bh(&rx_sa->lock); 1795 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 1796 spin_unlock_bh(&rx_sa->lock); 1797 } 1798 1799 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1800 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1801 1802 rx_sa->sc = rx_sc; 1803 1804 if (secy->xpn) { 1805 rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); 1806 nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], 1807 MACSEC_SALT_LEN); 1808 } 1809 1810 /* If h/w offloading is available, propagate to the device */ 1811 if (macsec_is_offloaded(netdev_priv(dev))) { 1812 const struct macsec_ops *ops; 1813 struct macsec_context ctx; 1814 1815 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1816 if (!ops) { 1817 err = -EOPNOTSUPP; 1818 goto cleanup; 1819 } 1820 1821 ctx.sa.assoc_num = assoc_num; 1822 ctx.sa.rx_sa = rx_sa; 1823 ctx.secy = secy; 1824 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1825 secy->key_len); 1826 1827 err = macsec_offload(ops->mdo_add_rxsa, &ctx); 1828 memzero_explicit(ctx.sa.key, secy->key_len); 1829 if (err) 1830 goto cleanup; 1831 } 1832 1833 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1834 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); 1835 1836 rtnl_unlock(); 1837 1838 return 0; 1839 1840 cleanup: 1841 macsec_rxsa_put(rx_sa); 1842 rtnl_unlock(); 1843 return err; 1844 } 1845 1846 static bool validate_add_rxsc(struct nlattr **attrs) 1847 { 1848 if (!attrs[MACSEC_RXSC_ATTR_SCI]) 1849 return false; 1850 1851 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { 1852 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) 1853 return false; 1854 } 1855 1856 return true; 1857 } 1858 1859 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) 1860 { 1861 struct net_device *dev; 1862 sci_t sci = MACSEC_UNDEF_SCI; 1863 struct nlattr **attrs = info->attrs; 1864 struct macsec_rx_sc *rx_sc; 1865 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1866 struct macsec_secy *secy; 1867 bool active = true; 1868 int ret; 1869 1870 if (!attrs[MACSEC_ATTR_IFINDEX]) 1871 return -EINVAL; 1872 1873 if (parse_rxsc_config(attrs, tb_rxsc)) 1874 return -EINVAL; 1875 1876 if (!validate_add_rxsc(tb_rxsc)) 1877 return -EINVAL; 1878 1879 rtnl_lock(); 1880 dev = get_dev_from_nl(genl_info_net(info), attrs); 1881 if (IS_ERR(dev)) { 1882 rtnl_unlock(); 1883 return PTR_ERR(dev); 1884 } 1885 1886 secy = &macsec_priv(dev)->secy; 1887 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1888 1889 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) 1890 active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 1891 1892 rx_sc = create_rx_sc(dev, sci, active); 1893 if (IS_ERR(rx_sc)) { 1894 rtnl_unlock(); 1895 return PTR_ERR(rx_sc); 1896 } 1897 1898 if (macsec_is_offloaded(netdev_priv(dev))) { 1899 const struct macsec_ops *ops; 1900 struct macsec_context ctx; 1901 1902 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1903 if (!ops) { 1904 ret = -EOPNOTSUPP; 1905 goto cleanup; 1906 } 1907 1908 ctx.rx_sc = rx_sc; 1909 ctx.secy = secy; 1910 1911 ret = macsec_offload(ops->mdo_add_rxsc, &ctx); 1912 if (ret) 1913 goto cleanup; 1914 } 1915 1916 rtnl_unlock(); 1917 1918 return 0; 1919 1920 cleanup: 1921 del_rx_sc(secy, sci); 1922 free_rx_sc(rx_sc); 1923 rtnl_unlock(); 1924 return ret; 1925 } 1926 1927 static bool validate_add_txsa(struct nlattr **attrs) 1928 { 1929 if (!attrs[MACSEC_SA_ATTR_AN] || 1930 !attrs[MACSEC_SA_ATTR_PN] || 1931 !attrs[MACSEC_SA_ATTR_KEY] || 1932 !attrs[MACSEC_SA_ATTR_KEYID]) 1933 return false; 1934 1935 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1936 return false; 1937 1938 if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 1939 return false; 1940 1941 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1942 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1943 return false; 1944 } 1945 1946 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1947 return false; 1948 1949 return true; 1950 } 1951 1952 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) 1953 { 1954 struct net_device *dev; 1955 struct nlattr **attrs = info->attrs; 1956 struct macsec_secy *secy; 1957 struct macsec_tx_sc *tx_sc; 1958 struct macsec_tx_sa *tx_sa; 1959 unsigned char assoc_num; 1960 int pn_len; 1961 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1962 bool was_operational; 1963 int err; 1964 1965 if (!attrs[MACSEC_ATTR_IFINDEX]) 1966 return -EINVAL; 1967 1968 if (parse_sa_config(attrs, tb_sa)) 1969 return -EINVAL; 1970 1971 if (!validate_add_txsa(tb_sa)) 1972 return -EINVAL; 1973 1974 rtnl_lock(); 1975 dev = get_dev_from_nl(genl_info_net(info), attrs); 1976 if (IS_ERR(dev)) { 1977 rtnl_unlock(); 1978 return PTR_ERR(dev); 1979 } 1980 1981 secy = &macsec_priv(dev)->secy; 1982 tx_sc = &secy->tx_sc; 1983 1984 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1985 1986 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1987 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n", 1988 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1989 rtnl_unlock(); 1990 return -EINVAL; 1991 } 1992 1993 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 1994 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 1995 pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n", 1996 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 1997 rtnl_unlock(); 1998 return -EINVAL; 1999 } 2000 2001 if (secy->xpn) { 2002 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { 2003 rtnl_unlock(); 2004 return -EINVAL; 2005 } 2006 2007 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { 2008 pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n", 2009 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), 2010 MACSEC_SALT_LEN); 2011 rtnl_unlock(); 2012 return -EINVAL; 2013 } 2014 } 2015 2016 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); 2017 if (tx_sa) { 2018 rtnl_unlock(); 2019 return -EBUSY; 2020 } 2021 2022 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 2023 if (!tx_sa) { 2024 rtnl_unlock(); 2025 return -ENOMEM; 2026 } 2027 2028 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 2029 secy->key_len, secy->icv_len); 2030 if (err < 0) { 2031 kfree(tx_sa); 2032 rtnl_unlock(); 2033 return err; 2034 } 2035 2036 spin_lock_bh(&tx_sa->lock); 2037 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2038 spin_unlock_bh(&tx_sa->lock); 2039 2040 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2041 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2042 2043 was_operational = secy->operational; 2044 if (assoc_num == tx_sc->encoding_sa && tx_sa->active) 2045 secy->operational = true; 2046 2047 if (secy->xpn) { 2048 tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); 2049 nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], 2050 MACSEC_SALT_LEN); 2051 } 2052 2053 /* If h/w offloading is available, propagate to the device */ 2054 if (macsec_is_offloaded(netdev_priv(dev))) { 2055 const struct macsec_ops *ops; 2056 struct macsec_context ctx; 2057 2058 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2059 if (!ops) { 2060 err = -EOPNOTSUPP; 2061 goto cleanup; 2062 } 2063 2064 ctx.sa.assoc_num = assoc_num; 2065 ctx.sa.tx_sa = tx_sa; 2066 ctx.secy = secy; 2067 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 2068 secy->key_len); 2069 2070 err = macsec_offload(ops->mdo_add_txsa, &ctx); 2071 memzero_explicit(ctx.sa.key, secy->key_len); 2072 if (err) 2073 goto cleanup; 2074 } 2075 2076 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 2077 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); 2078 2079 rtnl_unlock(); 2080 2081 return 0; 2082 2083 cleanup: 2084 secy->operational = was_operational; 2085 macsec_txsa_put(tx_sa); 2086 rtnl_unlock(); 2087 return err; 2088 } 2089 2090 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) 2091 { 2092 struct nlattr **attrs = info->attrs; 2093 struct net_device *dev; 2094 struct macsec_secy *secy; 2095 struct macsec_rx_sc *rx_sc; 2096 struct macsec_rx_sa *rx_sa; 2097 u8 assoc_num; 2098 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2099 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2100 int ret; 2101 2102 if (!attrs[MACSEC_ATTR_IFINDEX]) 2103 return -EINVAL; 2104 2105 if (parse_sa_config(attrs, tb_sa)) 2106 return -EINVAL; 2107 2108 if (parse_rxsc_config(attrs, tb_rxsc)) 2109 return -EINVAL; 2110 2111 rtnl_lock(); 2112 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2113 &dev, &secy, &rx_sc, &assoc_num); 2114 if (IS_ERR(rx_sa)) { 2115 rtnl_unlock(); 2116 return PTR_ERR(rx_sa); 2117 } 2118 2119 if (rx_sa->active) { 2120 rtnl_unlock(); 2121 return -EBUSY; 2122 } 2123 2124 /* If h/w offloading is available, propagate to the device */ 2125 if (macsec_is_offloaded(netdev_priv(dev))) { 2126 const struct macsec_ops *ops; 2127 struct macsec_context ctx; 2128 2129 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2130 if (!ops) { 2131 ret = -EOPNOTSUPP; 2132 goto cleanup; 2133 } 2134 2135 ctx.sa.assoc_num = assoc_num; 2136 ctx.sa.rx_sa = rx_sa; 2137 ctx.secy = secy; 2138 2139 ret = macsec_offload(ops->mdo_del_rxsa, &ctx); 2140 if (ret) 2141 goto cleanup; 2142 } 2143 2144 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); 2145 clear_rx_sa(rx_sa); 2146 2147 rtnl_unlock(); 2148 2149 return 0; 2150 2151 cleanup: 2152 rtnl_unlock(); 2153 return ret; 2154 } 2155 2156 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) 2157 { 2158 struct nlattr **attrs = info->attrs; 2159 struct net_device *dev; 2160 struct macsec_secy *secy; 2161 struct macsec_rx_sc *rx_sc; 2162 sci_t sci; 2163 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2164 int ret; 2165 2166 if (!attrs[MACSEC_ATTR_IFINDEX]) 2167 return -EINVAL; 2168 2169 if (parse_rxsc_config(attrs, tb_rxsc)) 2170 return -EINVAL; 2171 2172 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 2173 return -EINVAL; 2174 2175 rtnl_lock(); 2176 dev = get_dev_from_nl(genl_info_net(info), info->attrs); 2177 if (IS_ERR(dev)) { 2178 rtnl_unlock(); 2179 return PTR_ERR(dev); 2180 } 2181 2182 secy = &macsec_priv(dev)->secy; 2183 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 2184 2185 rx_sc = del_rx_sc(secy, sci); 2186 if (!rx_sc) { 2187 rtnl_unlock(); 2188 return -ENODEV; 2189 } 2190 2191 /* If h/w offloading is available, propagate to the device */ 2192 if (macsec_is_offloaded(netdev_priv(dev))) { 2193 const struct macsec_ops *ops; 2194 struct macsec_context ctx; 2195 2196 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2197 if (!ops) { 2198 ret = -EOPNOTSUPP; 2199 goto cleanup; 2200 } 2201 2202 ctx.rx_sc = rx_sc; 2203 ctx.secy = secy; 2204 ret = macsec_offload(ops->mdo_del_rxsc, &ctx); 2205 if (ret) 2206 goto cleanup; 2207 } 2208 2209 free_rx_sc(rx_sc); 2210 rtnl_unlock(); 2211 2212 return 0; 2213 2214 cleanup: 2215 rtnl_unlock(); 2216 return ret; 2217 } 2218 2219 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) 2220 { 2221 struct nlattr **attrs = info->attrs; 2222 struct net_device *dev; 2223 struct macsec_secy *secy; 2224 struct macsec_tx_sc *tx_sc; 2225 struct macsec_tx_sa *tx_sa; 2226 u8 assoc_num; 2227 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2228 int ret; 2229 2230 if (!attrs[MACSEC_ATTR_IFINDEX]) 2231 return -EINVAL; 2232 2233 if (parse_sa_config(attrs, tb_sa)) 2234 return -EINVAL; 2235 2236 rtnl_lock(); 2237 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2238 &dev, &secy, &tx_sc, &assoc_num); 2239 if (IS_ERR(tx_sa)) { 2240 rtnl_unlock(); 2241 return PTR_ERR(tx_sa); 2242 } 2243 2244 if (tx_sa->active) { 2245 rtnl_unlock(); 2246 return -EBUSY; 2247 } 2248 2249 /* If h/w offloading is available, propagate to the device */ 2250 if (macsec_is_offloaded(netdev_priv(dev))) { 2251 const struct macsec_ops *ops; 2252 struct macsec_context ctx; 2253 2254 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2255 if (!ops) { 2256 ret = -EOPNOTSUPP; 2257 goto cleanup; 2258 } 2259 2260 ctx.sa.assoc_num = assoc_num; 2261 ctx.sa.tx_sa = tx_sa; 2262 ctx.secy = secy; 2263 2264 ret = macsec_offload(ops->mdo_del_txsa, &ctx); 2265 if (ret) 2266 goto cleanup; 2267 } 2268 2269 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); 2270 clear_tx_sa(tx_sa); 2271 2272 rtnl_unlock(); 2273 2274 return 0; 2275 2276 cleanup: 2277 rtnl_unlock(); 2278 return ret; 2279 } 2280 2281 static bool validate_upd_sa(struct nlattr **attrs) 2282 { 2283 if (!attrs[MACSEC_SA_ATTR_AN] || 2284 attrs[MACSEC_SA_ATTR_KEY] || 2285 attrs[MACSEC_SA_ATTR_KEYID] || 2286 attrs[MACSEC_SA_ATTR_SSCI] || 2287 attrs[MACSEC_SA_ATTR_SALT]) 2288 return false; 2289 2290 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 2291 return false; 2292 2293 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0) 2294 return false; 2295 2296 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 2297 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 2298 return false; 2299 } 2300 2301 return true; 2302 } 2303 2304 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) 2305 { 2306 struct nlattr **attrs = info->attrs; 2307 struct net_device *dev; 2308 struct macsec_secy *secy; 2309 struct macsec_tx_sc *tx_sc; 2310 struct macsec_tx_sa *tx_sa; 2311 u8 assoc_num; 2312 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2313 bool was_operational, was_active; 2314 pn_t prev_pn; 2315 int ret = 0; 2316 2317 prev_pn.full64 = 0; 2318 2319 if (!attrs[MACSEC_ATTR_IFINDEX]) 2320 return -EINVAL; 2321 2322 if (parse_sa_config(attrs, tb_sa)) 2323 return -EINVAL; 2324 2325 if (!validate_upd_sa(tb_sa)) 2326 return -EINVAL; 2327 2328 rtnl_lock(); 2329 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2330 &dev, &secy, &tx_sc, &assoc_num); 2331 if (IS_ERR(tx_sa)) { 2332 rtnl_unlock(); 2333 return PTR_ERR(tx_sa); 2334 } 2335 2336 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2337 int pn_len; 2338 2339 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2340 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2341 pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n", 2342 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2343 rtnl_unlock(); 2344 return -EINVAL; 2345 } 2346 2347 spin_lock_bh(&tx_sa->lock); 2348 prev_pn = tx_sa->next_pn_halves; 2349 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2350 spin_unlock_bh(&tx_sa->lock); 2351 } 2352 2353 was_active = tx_sa->active; 2354 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2355 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2356 2357 was_operational = secy->operational; 2358 if (assoc_num == tx_sc->encoding_sa) 2359 secy->operational = tx_sa->active; 2360 2361 /* If h/w offloading is available, propagate to the device */ 2362 if (macsec_is_offloaded(netdev_priv(dev))) { 2363 const struct macsec_ops *ops; 2364 struct macsec_context ctx; 2365 2366 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2367 if (!ops) { 2368 ret = -EOPNOTSUPP; 2369 goto cleanup; 2370 } 2371 2372 ctx.sa.assoc_num = assoc_num; 2373 ctx.sa.tx_sa = tx_sa; 2374 ctx.sa.update_pn = !!prev_pn.full64; 2375 ctx.secy = secy; 2376 2377 ret = macsec_offload(ops->mdo_upd_txsa, &ctx); 2378 if (ret) 2379 goto cleanup; 2380 } 2381 2382 rtnl_unlock(); 2383 2384 return 0; 2385 2386 cleanup: 2387 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2388 spin_lock_bh(&tx_sa->lock); 2389 tx_sa->next_pn_halves = prev_pn; 2390 spin_unlock_bh(&tx_sa->lock); 2391 } 2392 tx_sa->active = was_active; 2393 secy->operational = was_operational; 2394 rtnl_unlock(); 2395 return ret; 2396 } 2397 2398 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) 2399 { 2400 struct nlattr **attrs = info->attrs; 2401 struct net_device *dev; 2402 struct macsec_secy *secy; 2403 struct macsec_rx_sc *rx_sc; 2404 struct macsec_rx_sa *rx_sa; 2405 u8 assoc_num; 2406 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2407 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2408 bool was_active; 2409 pn_t prev_pn; 2410 int ret = 0; 2411 2412 prev_pn.full64 = 0; 2413 2414 if (!attrs[MACSEC_ATTR_IFINDEX]) 2415 return -EINVAL; 2416 2417 if (parse_rxsc_config(attrs, tb_rxsc)) 2418 return -EINVAL; 2419 2420 if (parse_sa_config(attrs, tb_sa)) 2421 return -EINVAL; 2422 2423 if (!validate_upd_sa(tb_sa)) 2424 return -EINVAL; 2425 2426 rtnl_lock(); 2427 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2428 &dev, &secy, &rx_sc, &assoc_num); 2429 if (IS_ERR(rx_sa)) { 2430 rtnl_unlock(); 2431 return PTR_ERR(rx_sa); 2432 } 2433 2434 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2435 int pn_len; 2436 2437 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2438 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2439 pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n", 2440 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2441 rtnl_unlock(); 2442 return -EINVAL; 2443 } 2444 2445 spin_lock_bh(&rx_sa->lock); 2446 prev_pn = rx_sa->next_pn_halves; 2447 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2448 spin_unlock_bh(&rx_sa->lock); 2449 } 2450 2451 was_active = rx_sa->active; 2452 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2453 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2454 2455 /* If h/w offloading is available, propagate to the device */ 2456 if (macsec_is_offloaded(netdev_priv(dev))) { 2457 const struct macsec_ops *ops; 2458 struct macsec_context ctx; 2459 2460 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2461 if (!ops) { 2462 ret = -EOPNOTSUPP; 2463 goto cleanup; 2464 } 2465 2466 ctx.sa.assoc_num = assoc_num; 2467 ctx.sa.rx_sa = rx_sa; 2468 ctx.sa.update_pn = !!prev_pn.full64; 2469 ctx.secy = secy; 2470 2471 ret = macsec_offload(ops->mdo_upd_rxsa, &ctx); 2472 if (ret) 2473 goto cleanup; 2474 } 2475 2476 rtnl_unlock(); 2477 return 0; 2478 2479 cleanup: 2480 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2481 spin_lock_bh(&rx_sa->lock); 2482 rx_sa->next_pn_halves = prev_pn; 2483 spin_unlock_bh(&rx_sa->lock); 2484 } 2485 rx_sa->active = was_active; 2486 rtnl_unlock(); 2487 return ret; 2488 } 2489 2490 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) 2491 { 2492 struct nlattr **attrs = info->attrs; 2493 struct net_device *dev; 2494 struct macsec_secy *secy; 2495 struct macsec_rx_sc *rx_sc; 2496 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2497 unsigned int prev_n_rx_sc; 2498 bool was_active; 2499 int ret; 2500 2501 if (!attrs[MACSEC_ATTR_IFINDEX]) 2502 return -EINVAL; 2503 2504 if (parse_rxsc_config(attrs, tb_rxsc)) 2505 return -EINVAL; 2506 2507 if (!validate_add_rxsc(tb_rxsc)) 2508 return -EINVAL; 2509 2510 rtnl_lock(); 2511 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 2512 if (IS_ERR(rx_sc)) { 2513 rtnl_unlock(); 2514 return PTR_ERR(rx_sc); 2515 } 2516 2517 was_active = rx_sc->active; 2518 prev_n_rx_sc = secy->n_rx_sc; 2519 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { 2520 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 2521 2522 if (rx_sc->active != new) 2523 secy->n_rx_sc += new ? 1 : -1; 2524 2525 rx_sc->active = new; 2526 } 2527 2528 /* If h/w offloading is available, propagate to the device */ 2529 if (macsec_is_offloaded(netdev_priv(dev))) { 2530 const struct macsec_ops *ops; 2531 struct macsec_context ctx; 2532 2533 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2534 if (!ops) { 2535 ret = -EOPNOTSUPP; 2536 goto cleanup; 2537 } 2538 2539 ctx.rx_sc = rx_sc; 2540 ctx.secy = secy; 2541 2542 ret = macsec_offload(ops->mdo_upd_rxsc, &ctx); 2543 if (ret) 2544 goto cleanup; 2545 } 2546 2547 rtnl_unlock(); 2548 2549 return 0; 2550 2551 cleanup: 2552 secy->n_rx_sc = prev_n_rx_sc; 2553 rx_sc->active = was_active; 2554 rtnl_unlock(); 2555 return ret; 2556 } 2557 2558 static bool macsec_is_configured(struct macsec_dev *macsec) 2559 { 2560 struct macsec_secy *secy = &macsec->secy; 2561 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2562 int i; 2563 2564 if (secy->rx_sc) 2565 return true; 2566 2567 for (i = 0; i < MACSEC_NUM_AN; i++) 2568 if (tx_sc->sa[i]) 2569 return true; 2570 2571 return false; 2572 } 2573 2574 static bool macsec_needs_tx_tag(struct macsec_dev *macsec, 2575 const struct macsec_ops *ops) 2576 { 2577 return macsec->offload == MACSEC_OFFLOAD_PHY && 2578 ops->mdo_insert_tx_tag; 2579 } 2580 2581 static void macsec_set_head_tail_room(struct net_device *dev) 2582 { 2583 struct macsec_dev *macsec = macsec_priv(dev); 2584 struct net_device *real_dev = macsec->real_dev; 2585 int needed_headroom, needed_tailroom; 2586 const struct macsec_ops *ops; 2587 2588 ops = macsec_get_ops(macsec, NULL); 2589 if (ops) { 2590 needed_headroom = ops->needed_headroom; 2591 needed_tailroom = ops->needed_tailroom; 2592 } else { 2593 needed_headroom = MACSEC_NEEDED_HEADROOM; 2594 needed_tailroom = MACSEC_NEEDED_TAILROOM; 2595 } 2596 2597 dev->needed_headroom = real_dev->needed_headroom + needed_headroom; 2598 dev->needed_tailroom = real_dev->needed_tailroom + needed_tailroom; 2599 } 2600 2601 static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload) 2602 { 2603 enum macsec_offload prev_offload; 2604 const struct macsec_ops *ops; 2605 struct macsec_context ctx; 2606 struct macsec_dev *macsec; 2607 int ret = 0; 2608 2609 macsec = macsec_priv(dev); 2610 2611 /* Check if the offloading mode is supported by the underlying layers */ 2612 if (offload != MACSEC_OFFLOAD_OFF && 2613 !macsec_check_offload(offload, macsec)) 2614 return -EOPNOTSUPP; 2615 2616 /* Check if the net device is busy. */ 2617 if (netif_running(dev)) 2618 return -EBUSY; 2619 2620 /* Check if the device already has rules configured: we do not support 2621 * rules migration. 2622 */ 2623 if (macsec_is_configured(macsec)) 2624 return -EBUSY; 2625 2626 prev_offload = macsec->offload; 2627 2628 ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload, 2629 macsec, &ctx); 2630 if (!ops) 2631 return -EOPNOTSUPP; 2632 2633 macsec->offload = offload; 2634 2635 ctx.secy = &macsec->secy; 2636 ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx) 2637 : macsec_offload(ops->mdo_add_secy, &ctx); 2638 if (ret) { 2639 macsec->offload = prev_offload; 2640 return ret; 2641 } 2642 2643 macsec_set_head_tail_room(dev); 2644 macsec->insert_tx_tag = macsec_needs_tx_tag(macsec, ops); 2645 2646 return ret; 2647 } 2648 2649 static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) 2650 { 2651 struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1]; 2652 struct nlattr **attrs = info->attrs; 2653 enum macsec_offload offload; 2654 struct macsec_dev *macsec; 2655 struct net_device *dev; 2656 int ret = 0; 2657 2658 if (!attrs[MACSEC_ATTR_IFINDEX]) 2659 return -EINVAL; 2660 2661 if (!attrs[MACSEC_ATTR_OFFLOAD]) 2662 return -EINVAL; 2663 2664 if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX, 2665 attrs[MACSEC_ATTR_OFFLOAD], 2666 macsec_genl_offload_policy, NULL)) 2667 return -EINVAL; 2668 2669 rtnl_lock(); 2670 2671 dev = get_dev_from_nl(genl_info_net(info), attrs); 2672 if (IS_ERR(dev)) { 2673 ret = PTR_ERR(dev); 2674 goto out; 2675 } 2676 macsec = macsec_priv(dev); 2677 2678 if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) { 2679 ret = -EINVAL; 2680 goto out; 2681 } 2682 2683 offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); 2684 2685 if (macsec->offload != offload) 2686 ret = macsec_update_offload(dev, offload); 2687 out: 2688 rtnl_unlock(); 2689 return ret; 2690 } 2691 2692 static void get_tx_sa_stats(struct net_device *dev, int an, 2693 struct macsec_tx_sa *tx_sa, 2694 struct macsec_tx_sa_stats *sum) 2695 { 2696 struct macsec_dev *macsec = macsec_priv(dev); 2697 int cpu; 2698 2699 /* If h/w offloading is available, propagate to the device */ 2700 if (macsec_is_offloaded(macsec)) { 2701 const struct macsec_ops *ops; 2702 struct macsec_context ctx; 2703 2704 ops = macsec_get_ops(macsec, &ctx); 2705 if (ops) { 2706 ctx.sa.assoc_num = an; 2707 ctx.sa.tx_sa = tx_sa; 2708 ctx.stats.tx_sa_stats = sum; 2709 ctx.secy = &macsec_priv(dev)->secy; 2710 macsec_offload(ops->mdo_get_tx_sa_stats, &ctx); 2711 } 2712 return; 2713 } 2714 2715 for_each_possible_cpu(cpu) { 2716 const struct macsec_tx_sa_stats *stats = 2717 per_cpu_ptr(tx_sa->stats, cpu); 2718 2719 sum->OutPktsProtected += stats->OutPktsProtected; 2720 sum->OutPktsEncrypted += stats->OutPktsEncrypted; 2721 } 2722 } 2723 2724 static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum) 2725 { 2726 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, 2727 sum->OutPktsProtected) || 2728 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2729 sum->OutPktsEncrypted)) 2730 return -EMSGSIZE; 2731 2732 return 0; 2733 } 2734 2735 static void get_rx_sa_stats(struct net_device *dev, 2736 struct macsec_rx_sc *rx_sc, int an, 2737 struct macsec_rx_sa *rx_sa, 2738 struct macsec_rx_sa_stats *sum) 2739 { 2740 struct macsec_dev *macsec = macsec_priv(dev); 2741 int cpu; 2742 2743 /* If h/w offloading is available, propagate to the device */ 2744 if (macsec_is_offloaded(macsec)) { 2745 const struct macsec_ops *ops; 2746 struct macsec_context ctx; 2747 2748 ops = macsec_get_ops(macsec, &ctx); 2749 if (ops) { 2750 ctx.sa.assoc_num = an; 2751 ctx.sa.rx_sa = rx_sa; 2752 ctx.stats.rx_sa_stats = sum; 2753 ctx.secy = &macsec_priv(dev)->secy; 2754 ctx.rx_sc = rx_sc; 2755 macsec_offload(ops->mdo_get_rx_sa_stats, &ctx); 2756 } 2757 return; 2758 } 2759 2760 for_each_possible_cpu(cpu) { 2761 const struct macsec_rx_sa_stats *stats = 2762 per_cpu_ptr(rx_sa->stats, cpu); 2763 2764 sum->InPktsOK += stats->InPktsOK; 2765 sum->InPktsInvalid += stats->InPktsInvalid; 2766 sum->InPktsNotValid += stats->InPktsNotValid; 2767 sum->InPktsNotUsingSA += stats->InPktsNotUsingSA; 2768 sum->InPktsUnusedSA += stats->InPktsUnusedSA; 2769 } 2770 } 2771 2772 static int copy_rx_sa_stats(struct sk_buff *skb, 2773 struct macsec_rx_sa_stats *sum) 2774 { 2775 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) || 2776 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, 2777 sum->InPktsInvalid) || 2778 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, 2779 sum->InPktsNotValid) || 2780 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2781 sum->InPktsNotUsingSA) || 2782 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, 2783 sum->InPktsUnusedSA)) 2784 return -EMSGSIZE; 2785 2786 return 0; 2787 } 2788 2789 static void get_rx_sc_stats(struct net_device *dev, 2790 struct macsec_rx_sc *rx_sc, 2791 struct macsec_rx_sc_stats *sum) 2792 { 2793 struct macsec_dev *macsec = macsec_priv(dev); 2794 int cpu; 2795 2796 /* If h/w offloading is available, propagate to the device */ 2797 if (macsec_is_offloaded(macsec)) { 2798 const struct macsec_ops *ops; 2799 struct macsec_context ctx; 2800 2801 ops = macsec_get_ops(macsec, &ctx); 2802 if (ops) { 2803 ctx.stats.rx_sc_stats = sum; 2804 ctx.secy = &macsec_priv(dev)->secy; 2805 ctx.rx_sc = rx_sc; 2806 macsec_offload(ops->mdo_get_rx_sc_stats, &ctx); 2807 } 2808 return; 2809 } 2810 2811 for_each_possible_cpu(cpu) { 2812 const struct pcpu_rx_sc_stats *stats; 2813 struct macsec_rx_sc_stats tmp; 2814 unsigned int start; 2815 2816 stats = per_cpu_ptr(rx_sc->stats, cpu); 2817 do { 2818 start = u64_stats_fetch_begin(&stats->syncp); 2819 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2820 } while (u64_stats_fetch_retry(&stats->syncp, start)); 2821 2822 sum->InOctetsValidated += tmp.InOctetsValidated; 2823 sum->InOctetsDecrypted += tmp.InOctetsDecrypted; 2824 sum->InPktsUnchecked += tmp.InPktsUnchecked; 2825 sum->InPktsDelayed += tmp.InPktsDelayed; 2826 sum->InPktsOK += tmp.InPktsOK; 2827 sum->InPktsInvalid += tmp.InPktsInvalid; 2828 sum->InPktsLate += tmp.InPktsLate; 2829 sum->InPktsNotValid += tmp.InPktsNotValid; 2830 sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA; 2831 sum->InPktsUnusedSA += tmp.InPktsUnusedSA; 2832 } 2833 } 2834 2835 static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum) 2836 { 2837 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, 2838 sum->InOctetsValidated, 2839 MACSEC_RXSC_STATS_ATTR_PAD) || 2840 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, 2841 sum->InOctetsDecrypted, 2842 MACSEC_RXSC_STATS_ATTR_PAD) || 2843 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, 2844 sum->InPktsUnchecked, 2845 MACSEC_RXSC_STATS_ATTR_PAD) || 2846 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, 2847 sum->InPktsDelayed, 2848 MACSEC_RXSC_STATS_ATTR_PAD) || 2849 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, 2850 sum->InPktsOK, 2851 MACSEC_RXSC_STATS_ATTR_PAD) || 2852 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, 2853 sum->InPktsInvalid, 2854 MACSEC_RXSC_STATS_ATTR_PAD) || 2855 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, 2856 sum->InPktsLate, 2857 MACSEC_RXSC_STATS_ATTR_PAD) || 2858 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, 2859 sum->InPktsNotValid, 2860 MACSEC_RXSC_STATS_ATTR_PAD) || 2861 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2862 sum->InPktsNotUsingSA, 2863 MACSEC_RXSC_STATS_ATTR_PAD) || 2864 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, 2865 sum->InPktsUnusedSA, 2866 MACSEC_RXSC_STATS_ATTR_PAD)) 2867 return -EMSGSIZE; 2868 2869 return 0; 2870 } 2871 2872 static void get_tx_sc_stats(struct net_device *dev, 2873 struct macsec_tx_sc_stats *sum) 2874 { 2875 struct macsec_dev *macsec = macsec_priv(dev); 2876 int cpu; 2877 2878 /* If h/w offloading is available, propagate to the device */ 2879 if (macsec_is_offloaded(macsec)) { 2880 const struct macsec_ops *ops; 2881 struct macsec_context ctx; 2882 2883 ops = macsec_get_ops(macsec, &ctx); 2884 if (ops) { 2885 ctx.stats.tx_sc_stats = sum; 2886 ctx.secy = &macsec_priv(dev)->secy; 2887 macsec_offload(ops->mdo_get_tx_sc_stats, &ctx); 2888 } 2889 return; 2890 } 2891 2892 for_each_possible_cpu(cpu) { 2893 const struct pcpu_tx_sc_stats *stats; 2894 struct macsec_tx_sc_stats tmp; 2895 unsigned int start; 2896 2897 stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu); 2898 do { 2899 start = u64_stats_fetch_begin(&stats->syncp); 2900 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2901 } while (u64_stats_fetch_retry(&stats->syncp, start)); 2902 2903 sum->OutPktsProtected += tmp.OutPktsProtected; 2904 sum->OutPktsEncrypted += tmp.OutPktsEncrypted; 2905 sum->OutOctetsProtected += tmp.OutOctetsProtected; 2906 sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted; 2907 } 2908 } 2909 2910 static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum) 2911 { 2912 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, 2913 sum->OutPktsProtected, 2914 MACSEC_TXSC_STATS_ATTR_PAD) || 2915 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2916 sum->OutPktsEncrypted, 2917 MACSEC_TXSC_STATS_ATTR_PAD) || 2918 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, 2919 sum->OutOctetsProtected, 2920 MACSEC_TXSC_STATS_ATTR_PAD) || 2921 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, 2922 sum->OutOctetsEncrypted, 2923 MACSEC_TXSC_STATS_ATTR_PAD)) 2924 return -EMSGSIZE; 2925 2926 return 0; 2927 } 2928 2929 static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum) 2930 { 2931 struct macsec_dev *macsec = macsec_priv(dev); 2932 int cpu; 2933 2934 /* If h/w offloading is available, propagate to the device */ 2935 if (macsec_is_offloaded(macsec)) { 2936 const struct macsec_ops *ops; 2937 struct macsec_context ctx; 2938 2939 ops = macsec_get_ops(macsec, &ctx); 2940 if (ops) { 2941 ctx.stats.dev_stats = sum; 2942 ctx.secy = &macsec_priv(dev)->secy; 2943 macsec_offload(ops->mdo_get_dev_stats, &ctx); 2944 } 2945 return; 2946 } 2947 2948 for_each_possible_cpu(cpu) { 2949 const struct pcpu_secy_stats *stats; 2950 struct macsec_dev_stats tmp; 2951 unsigned int start; 2952 2953 stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu); 2954 do { 2955 start = u64_stats_fetch_begin(&stats->syncp); 2956 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2957 } while (u64_stats_fetch_retry(&stats->syncp, start)); 2958 2959 sum->OutPktsUntagged += tmp.OutPktsUntagged; 2960 sum->InPktsUntagged += tmp.InPktsUntagged; 2961 sum->OutPktsTooLong += tmp.OutPktsTooLong; 2962 sum->InPktsNoTag += tmp.InPktsNoTag; 2963 sum->InPktsBadTag += tmp.InPktsBadTag; 2964 sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI; 2965 sum->InPktsNoSCI += tmp.InPktsNoSCI; 2966 sum->InPktsOverrun += tmp.InPktsOverrun; 2967 } 2968 } 2969 2970 static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum) 2971 { 2972 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, 2973 sum->OutPktsUntagged, 2974 MACSEC_SECY_STATS_ATTR_PAD) || 2975 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, 2976 sum->InPktsUntagged, 2977 MACSEC_SECY_STATS_ATTR_PAD) || 2978 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, 2979 sum->OutPktsTooLong, 2980 MACSEC_SECY_STATS_ATTR_PAD) || 2981 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, 2982 sum->InPktsNoTag, 2983 MACSEC_SECY_STATS_ATTR_PAD) || 2984 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, 2985 sum->InPktsBadTag, 2986 MACSEC_SECY_STATS_ATTR_PAD) || 2987 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, 2988 sum->InPktsUnknownSCI, 2989 MACSEC_SECY_STATS_ATTR_PAD) || 2990 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, 2991 sum->InPktsNoSCI, 2992 MACSEC_SECY_STATS_ATTR_PAD) || 2993 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, 2994 sum->InPktsOverrun, 2995 MACSEC_SECY_STATS_ATTR_PAD)) 2996 return -EMSGSIZE; 2997 2998 return 0; 2999 } 3000 3001 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) 3002 { 3003 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 3004 struct nlattr *secy_nest = nla_nest_start_noflag(skb, 3005 MACSEC_ATTR_SECY); 3006 u64 csid; 3007 3008 if (!secy_nest) 3009 return 1; 3010 3011 switch (secy->key_len) { 3012 case MACSEC_GCM_AES_128_SAK_LEN: 3013 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; 3014 break; 3015 case MACSEC_GCM_AES_256_SAK_LEN: 3016 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; 3017 break; 3018 default: 3019 goto cancel; 3020 } 3021 3022 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, 3023 MACSEC_SECY_ATTR_PAD) || 3024 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, 3025 csid, MACSEC_SECY_ATTR_PAD) || 3026 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 3027 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 3028 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 3029 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) || 3030 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) || 3031 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) || 3032 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) || 3033 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) || 3034 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) || 3035 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa)) 3036 goto cancel; 3037 3038 if (secy->replay_protect) { 3039 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window)) 3040 goto cancel; 3041 } 3042 3043 nla_nest_end(skb, secy_nest); 3044 return 0; 3045 3046 cancel: 3047 nla_nest_cancel(skb, secy_nest); 3048 return 1; 3049 } 3050 3051 static noinline_for_stack int 3052 dump_secy(struct macsec_secy *secy, struct net_device *dev, 3053 struct sk_buff *skb, struct netlink_callback *cb) 3054 { 3055 struct macsec_tx_sc_stats tx_sc_stats = {0, }; 3056 struct macsec_tx_sa_stats tx_sa_stats = {0, }; 3057 struct macsec_rx_sc_stats rx_sc_stats = {0, }; 3058 struct macsec_rx_sa_stats rx_sa_stats = {0, }; 3059 struct macsec_dev *macsec = netdev_priv(dev); 3060 struct macsec_dev_stats dev_stats = {0, }; 3061 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 3062 struct nlattr *txsa_list, *rxsc_list; 3063 struct macsec_rx_sc *rx_sc; 3064 struct nlattr *attr; 3065 void *hdr; 3066 int i, j; 3067 3068 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3069 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); 3070 if (!hdr) 3071 return -EMSGSIZE; 3072 3073 genl_dump_check_consistent(cb, hdr); 3074 3075 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 3076 goto nla_put_failure; 3077 3078 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD); 3079 if (!attr) 3080 goto nla_put_failure; 3081 if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload)) 3082 goto nla_put_failure; 3083 nla_nest_end(skb, attr); 3084 3085 if (nla_put_secy(secy, skb)) 3086 goto nla_put_failure; 3087 3088 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS); 3089 if (!attr) 3090 goto nla_put_failure; 3091 3092 get_tx_sc_stats(dev, &tx_sc_stats); 3093 if (copy_tx_sc_stats(skb, &tx_sc_stats)) { 3094 nla_nest_cancel(skb, attr); 3095 goto nla_put_failure; 3096 } 3097 nla_nest_end(skb, attr); 3098 3099 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS); 3100 if (!attr) 3101 goto nla_put_failure; 3102 get_secy_stats(dev, &dev_stats); 3103 if (copy_secy_stats(skb, &dev_stats)) { 3104 nla_nest_cancel(skb, attr); 3105 goto nla_put_failure; 3106 } 3107 nla_nest_end(skb, attr); 3108 3109 txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST); 3110 if (!txsa_list) 3111 goto nla_put_failure; 3112 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { 3113 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); 3114 struct nlattr *txsa_nest; 3115 u64 pn; 3116 int pn_len; 3117 3118 if (!tx_sa) 3119 continue; 3120 3121 txsa_nest = nla_nest_start_noflag(skb, j++); 3122 if (!txsa_nest) { 3123 nla_nest_cancel(skb, txsa_list); 3124 goto nla_put_failure; 3125 } 3126 3127 attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS); 3128 if (!attr) { 3129 nla_nest_cancel(skb, txsa_nest); 3130 nla_nest_cancel(skb, txsa_list); 3131 goto nla_put_failure; 3132 } 3133 memset(&tx_sa_stats, 0, sizeof(tx_sa_stats)); 3134 get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats); 3135 if (copy_tx_sa_stats(skb, &tx_sa_stats)) { 3136 nla_nest_cancel(skb, attr); 3137 nla_nest_cancel(skb, txsa_nest); 3138 nla_nest_cancel(skb, txsa_list); 3139 goto nla_put_failure; 3140 } 3141 nla_nest_end(skb, attr); 3142 3143 if (secy->xpn) { 3144 pn = tx_sa->next_pn; 3145 pn_len = MACSEC_XPN_PN_LEN; 3146 } else { 3147 pn = tx_sa->next_pn_halves.lower; 3148 pn_len = MACSEC_DEFAULT_PN_LEN; 3149 } 3150 3151 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 3152 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || 3153 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || 3154 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) || 3155 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { 3156 nla_nest_cancel(skb, txsa_nest); 3157 nla_nest_cancel(skb, txsa_list); 3158 goto nla_put_failure; 3159 } 3160 3161 nla_nest_end(skb, txsa_nest); 3162 } 3163 nla_nest_end(skb, txsa_list); 3164 3165 rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST); 3166 if (!rxsc_list) 3167 goto nla_put_failure; 3168 3169 j = 1; 3170 for_each_rxsc_rtnl(secy, rx_sc) { 3171 int k; 3172 struct nlattr *rxsa_list; 3173 struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++); 3174 3175 if (!rxsc_nest) { 3176 nla_nest_cancel(skb, rxsc_list); 3177 goto nla_put_failure; 3178 } 3179 3180 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || 3181 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci, 3182 MACSEC_RXSC_ATTR_PAD)) { 3183 nla_nest_cancel(skb, rxsc_nest); 3184 nla_nest_cancel(skb, rxsc_list); 3185 goto nla_put_failure; 3186 } 3187 3188 attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS); 3189 if (!attr) { 3190 nla_nest_cancel(skb, rxsc_nest); 3191 nla_nest_cancel(skb, rxsc_list); 3192 goto nla_put_failure; 3193 } 3194 memset(&rx_sc_stats, 0, sizeof(rx_sc_stats)); 3195 get_rx_sc_stats(dev, rx_sc, &rx_sc_stats); 3196 if (copy_rx_sc_stats(skb, &rx_sc_stats)) { 3197 nla_nest_cancel(skb, attr); 3198 nla_nest_cancel(skb, rxsc_nest); 3199 nla_nest_cancel(skb, rxsc_list); 3200 goto nla_put_failure; 3201 } 3202 nla_nest_end(skb, attr); 3203 3204 rxsa_list = nla_nest_start_noflag(skb, 3205 MACSEC_RXSC_ATTR_SA_LIST); 3206 if (!rxsa_list) { 3207 nla_nest_cancel(skb, rxsc_nest); 3208 nla_nest_cancel(skb, rxsc_list); 3209 goto nla_put_failure; 3210 } 3211 3212 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { 3213 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); 3214 struct nlattr *rxsa_nest; 3215 u64 pn; 3216 int pn_len; 3217 3218 if (!rx_sa) 3219 continue; 3220 3221 rxsa_nest = nla_nest_start_noflag(skb, k++); 3222 if (!rxsa_nest) { 3223 nla_nest_cancel(skb, rxsa_list); 3224 nla_nest_cancel(skb, rxsc_nest); 3225 nla_nest_cancel(skb, rxsc_list); 3226 goto nla_put_failure; 3227 } 3228 3229 attr = nla_nest_start_noflag(skb, 3230 MACSEC_SA_ATTR_STATS); 3231 if (!attr) { 3232 nla_nest_cancel(skb, rxsa_list); 3233 nla_nest_cancel(skb, rxsc_nest); 3234 nla_nest_cancel(skb, rxsc_list); 3235 goto nla_put_failure; 3236 } 3237 memset(&rx_sa_stats, 0, sizeof(rx_sa_stats)); 3238 get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats); 3239 if (copy_rx_sa_stats(skb, &rx_sa_stats)) { 3240 nla_nest_cancel(skb, attr); 3241 nla_nest_cancel(skb, rxsa_list); 3242 nla_nest_cancel(skb, rxsc_nest); 3243 nla_nest_cancel(skb, rxsc_list); 3244 goto nla_put_failure; 3245 } 3246 nla_nest_end(skb, attr); 3247 3248 if (secy->xpn) { 3249 pn = rx_sa->next_pn; 3250 pn_len = MACSEC_XPN_PN_LEN; 3251 } else { 3252 pn = rx_sa->next_pn_halves.lower; 3253 pn_len = MACSEC_DEFAULT_PN_LEN; 3254 } 3255 3256 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 3257 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || 3258 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || 3259 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) || 3260 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { 3261 nla_nest_cancel(skb, rxsa_nest); 3262 nla_nest_cancel(skb, rxsc_nest); 3263 nla_nest_cancel(skb, rxsc_list); 3264 goto nla_put_failure; 3265 } 3266 nla_nest_end(skb, rxsa_nest); 3267 } 3268 3269 nla_nest_end(skb, rxsa_list); 3270 nla_nest_end(skb, rxsc_nest); 3271 } 3272 3273 nla_nest_end(skb, rxsc_list); 3274 3275 genlmsg_end(skb, hdr); 3276 3277 return 0; 3278 3279 nla_put_failure: 3280 genlmsg_cancel(skb, hdr); 3281 return -EMSGSIZE; 3282 } 3283 3284 static int macsec_generation = 1; /* protected by RTNL */ 3285 3286 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 3287 { 3288 struct net *net = sock_net(skb->sk); 3289 struct net_device *dev; 3290 int dev_idx, d; 3291 3292 dev_idx = cb->args[0]; 3293 3294 d = 0; 3295 rtnl_lock(); 3296 3297 cb->seq = macsec_generation; 3298 3299 for_each_netdev(net, dev) { 3300 struct macsec_secy *secy; 3301 3302 if (d < dev_idx) 3303 goto next; 3304 3305 if (!netif_is_macsec(dev)) 3306 goto next; 3307 3308 secy = &macsec_priv(dev)->secy; 3309 if (dump_secy(secy, dev, skb, cb) < 0) 3310 goto done; 3311 next: 3312 d++; 3313 } 3314 3315 done: 3316 rtnl_unlock(); 3317 cb->args[0] = d; 3318 return skb->len; 3319 } 3320 3321 static const struct genl_small_ops macsec_genl_ops[] = { 3322 { 3323 .cmd = MACSEC_CMD_GET_TXSC, 3324 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3325 .dumpit = macsec_dump_txsc, 3326 }, 3327 { 3328 .cmd = MACSEC_CMD_ADD_RXSC, 3329 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3330 .doit = macsec_add_rxsc, 3331 .flags = GENL_ADMIN_PERM, 3332 }, 3333 { 3334 .cmd = MACSEC_CMD_DEL_RXSC, 3335 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3336 .doit = macsec_del_rxsc, 3337 .flags = GENL_ADMIN_PERM, 3338 }, 3339 { 3340 .cmd = MACSEC_CMD_UPD_RXSC, 3341 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3342 .doit = macsec_upd_rxsc, 3343 .flags = GENL_ADMIN_PERM, 3344 }, 3345 { 3346 .cmd = MACSEC_CMD_ADD_TXSA, 3347 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3348 .doit = macsec_add_txsa, 3349 .flags = GENL_ADMIN_PERM, 3350 }, 3351 { 3352 .cmd = MACSEC_CMD_DEL_TXSA, 3353 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3354 .doit = macsec_del_txsa, 3355 .flags = GENL_ADMIN_PERM, 3356 }, 3357 { 3358 .cmd = MACSEC_CMD_UPD_TXSA, 3359 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3360 .doit = macsec_upd_txsa, 3361 .flags = GENL_ADMIN_PERM, 3362 }, 3363 { 3364 .cmd = MACSEC_CMD_ADD_RXSA, 3365 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3366 .doit = macsec_add_rxsa, 3367 .flags = GENL_ADMIN_PERM, 3368 }, 3369 { 3370 .cmd = MACSEC_CMD_DEL_RXSA, 3371 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3372 .doit = macsec_del_rxsa, 3373 .flags = GENL_ADMIN_PERM, 3374 }, 3375 { 3376 .cmd = MACSEC_CMD_UPD_RXSA, 3377 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3378 .doit = macsec_upd_rxsa, 3379 .flags = GENL_ADMIN_PERM, 3380 }, 3381 { 3382 .cmd = MACSEC_CMD_UPD_OFFLOAD, 3383 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3384 .doit = macsec_upd_offload, 3385 .flags = GENL_ADMIN_PERM, 3386 }, 3387 }; 3388 3389 static struct genl_family macsec_fam __ro_after_init = { 3390 .name = MACSEC_GENL_NAME, 3391 .hdrsize = 0, 3392 .version = MACSEC_GENL_VERSION, 3393 .maxattr = MACSEC_ATTR_MAX, 3394 .policy = macsec_genl_policy, 3395 .netnsok = true, 3396 .module = THIS_MODULE, 3397 .small_ops = macsec_genl_ops, 3398 .n_small_ops = ARRAY_SIZE(macsec_genl_ops), 3399 .resv_start_op = MACSEC_CMD_UPD_OFFLOAD + 1, 3400 }; 3401 3402 static struct sk_buff *macsec_insert_tx_tag(struct sk_buff *skb, 3403 struct net_device *dev) 3404 { 3405 struct macsec_dev *macsec = macsec_priv(dev); 3406 const struct macsec_ops *ops; 3407 struct phy_device *phydev; 3408 struct macsec_context ctx; 3409 int skb_final_len; 3410 int err; 3411 3412 ops = macsec_get_ops(macsec, &ctx); 3413 skb_final_len = skb->len - ETH_HLEN + ops->needed_headroom + 3414 ops->needed_tailroom; 3415 if (unlikely(skb_final_len > macsec->real_dev->mtu)) { 3416 err = -EINVAL; 3417 goto cleanup; 3418 } 3419 3420 phydev = macsec->real_dev->phydev; 3421 3422 err = skb_ensure_writable_head_tail(skb, dev); 3423 if (unlikely(err < 0)) 3424 goto cleanup; 3425 3426 err = ops->mdo_insert_tx_tag(phydev, skb); 3427 if (unlikely(err)) 3428 goto cleanup; 3429 3430 return skb; 3431 cleanup: 3432 kfree_skb(skb); 3433 return ERR_PTR(err); 3434 } 3435 3436 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, 3437 struct net_device *dev) 3438 { 3439 struct macsec_dev *macsec = netdev_priv(dev); 3440 struct macsec_secy *secy = &macsec->secy; 3441 struct pcpu_secy_stats *secy_stats; 3442 int ret, len; 3443 3444 if (macsec_is_offloaded(netdev_priv(dev))) { 3445 struct metadata_dst *md_dst = secy->tx_sc.md_dst; 3446 3447 skb_dst_drop(skb); 3448 dst_hold(&md_dst->dst); 3449 skb_dst_set(skb, &md_dst->dst); 3450 3451 if (macsec->insert_tx_tag) { 3452 skb = macsec_insert_tx_tag(skb, dev); 3453 if (IS_ERR(skb)) { 3454 DEV_STATS_INC(dev, tx_dropped); 3455 return NETDEV_TX_OK; 3456 } 3457 } 3458 3459 skb->dev = macsec->real_dev; 3460 return dev_queue_xmit(skb); 3461 } 3462 3463 /* 10.5 */ 3464 if (!secy->protect_frames) { 3465 secy_stats = this_cpu_ptr(macsec->stats); 3466 u64_stats_update_begin(&secy_stats->syncp); 3467 secy_stats->stats.OutPktsUntagged++; 3468 u64_stats_update_end(&secy_stats->syncp); 3469 skb->dev = macsec->real_dev; 3470 len = skb->len; 3471 ret = dev_queue_xmit(skb); 3472 count_tx(dev, ret, len); 3473 return ret; 3474 } 3475 3476 if (!secy->operational) { 3477 kfree_skb(skb); 3478 DEV_STATS_INC(dev, tx_dropped); 3479 return NETDEV_TX_OK; 3480 } 3481 3482 len = skb->len; 3483 skb = macsec_encrypt(skb, dev); 3484 if (IS_ERR(skb)) { 3485 if (PTR_ERR(skb) != -EINPROGRESS) 3486 DEV_STATS_INC(dev, tx_dropped); 3487 return NETDEV_TX_OK; 3488 } 3489 3490 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 3491 3492 macsec_encrypt_finish(skb, dev); 3493 ret = dev_queue_xmit(skb); 3494 count_tx(dev, ret, len); 3495 return ret; 3496 } 3497 3498 #define MACSEC_FEATURES \ 3499 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 3500 3501 static int macsec_dev_init(struct net_device *dev) 3502 { 3503 struct macsec_dev *macsec = macsec_priv(dev); 3504 struct net_device *real_dev = macsec->real_dev; 3505 int err; 3506 3507 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 3508 if (!dev->tstats) 3509 return -ENOMEM; 3510 3511 err = gro_cells_init(&macsec->gro_cells, dev); 3512 if (err) { 3513 free_percpu(dev->tstats); 3514 return err; 3515 } 3516 3517 dev->features = real_dev->features & MACSEC_FEATURES; 3518 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; 3519 3520 macsec_set_head_tail_room(dev); 3521 3522 if (is_zero_ether_addr(dev->dev_addr)) 3523 eth_hw_addr_inherit(dev, real_dev); 3524 if (is_zero_ether_addr(dev->broadcast)) 3525 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 3526 3527 /* Get macsec's reference to real_dev */ 3528 netdev_hold(real_dev, &macsec->dev_tracker, GFP_KERNEL); 3529 3530 return 0; 3531 } 3532 3533 static void macsec_dev_uninit(struct net_device *dev) 3534 { 3535 struct macsec_dev *macsec = macsec_priv(dev); 3536 3537 gro_cells_destroy(&macsec->gro_cells); 3538 free_percpu(dev->tstats); 3539 } 3540 3541 static netdev_features_t macsec_fix_features(struct net_device *dev, 3542 netdev_features_t features) 3543 { 3544 struct macsec_dev *macsec = macsec_priv(dev); 3545 struct net_device *real_dev = macsec->real_dev; 3546 3547 features &= (real_dev->features & MACSEC_FEATURES) | 3548 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; 3549 features |= NETIF_F_LLTX; 3550 3551 return features; 3552 } 3553 3554 static int macsec_dev_open(struct net_device *dev) 3555 { 3556 struct macsec_dev *macsec = macsec_priv(dev); 3557 struct net_device *real_dev = macsec->real_dev; 3558 int err; 3559 3560 err = dev_uc_add(real_dev, dev->dev_addr); 3561 if (err < 0) 3562 return err; 3563 3564 if (dev->flags & IFF_ALLMULTI) { 3565 err = dev_set_allmulti(real_dev, 1); 3566 if (err < 0) 3567 goto del_unicast; 3568 } 3569 3570 if (dev->flags & IFF_PROMISC) { 3571 err = dev_set_promiscuity(real_dev, 1); 3572 if (err < 0) 3573 goto clear_allmulti; 3574 } 3575 3576 /* If h/w offloading is available, propagate to the device */ 3577 if (macsec_is_offloaded(macsec)) { 3578 const struct macsec_ops *ops; 3579 struct macsec_context ctx; 3580 3581 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3582 if (!ops) { 3583 err = -EOPNOTSUPP; 3584 goto clear_allmulti; 3585 } 3586 3587 ctx.secy = &macsec->secy; 3588 err = macsec_offload(ops->mdo_dev_open, &ctx); 3589 if (err) 3590 goto clear_allmulti; 3591 } 3592 3593 if (netif_carrier_ok(real_dev)) 3594 netif_carrier_on(dev); 3595 3596 return 0; 3597 clear_allmulti: 3598 if (dev->flags & IFF_ALLMULTI) 3599 dev_set_allmulti(real_dev, -1); 3600 del_unicast: 3601 dev_uc_del(real_dev, dev->dev_addr); 3602 netif_carrier_off(dev); 3603 return err; 3604 } 3605 3606 static int macsec_dev_stop(struct net_device *dev) 3607 { 3608 struct macsec_dev *macsec = macsec_priv(dev); 3609 struct net_device *real_dev = macsec->real_dev; 3610 3611 netif_carrier_off(dev); 3612 3613 /* If h/w offloading is available, propagate to the device */ 3614 if (macsec_is_offloaded(macsec)) { 3615 const struct macsec_ops *ops; 3616 struct macsec_context ctx; 3617 3618 ops = macsec_get_ops(macsec, &ctx); 3619 if (ops) { 3620 ctx.secy = &macsec->secy; 3621 macsec_offload(ops->mdo_dev_stop, &ctx); 3622 } 3623 } 3624 3625 dev_mc_unsync(real_dev, dev); 3626 dev_uc_unsync(real_dev, dev); 3627 3628 if (dev->flags & IFF_ALLMULTI) 3629 dev_set_allmulti(real_dev, -1); 3630 3631 if (dev->flags & IFF_PROMISC) 3632 dev_set_promiscuity(real_dev, -1); 3633 3634 dev_uc_del(real_dev, dev->dev_addr); 3635 3636 return 0; 3637 } 3638 3639 static void macsec_dev_change_rx_flags(struct net_device *dev, int change) 3640 { 3641 struct net_device *real_dev = macsec_priv(dev)->real_dev; 3642 3643 if (!(dev->flags & IFF_UP)) 3644 return; 3645 3646 if (change & IFF_ALLMULTI) 3647 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 3648 3649 if (change & IFF_PROMISC) 3650 dev_set_promiscuity(real_dev, 3651 dev->flags & IFF_PROMISC ? 1 : -1); 3652 } 3653 3654 static void macsec_dev_set_rx_mode(struct net_device *dev) 3655 { 3656 struct net_device *real_dev = macsec_priv(dev)->real_dev; 3657 3658 dev_mc_sync(real_dev, dev); 3659 dev_uc_sync(real_dev, dev); 3660 } 3661 3662 static int macsec_set_mac_address(struct net_device *dev, void *p) 3663 { 3664 struct macsec_dev *macsec = macsec_priv(dev); 3665 struct net_device *real_dev = macsec->real_dev; 3666 struct sockaddr *addr = p; 3667 u8 old_addr[ETH_ALEN]; 3668 int err; 3669 3670 if (!is_valid_ether_addr(addr->sa_data)) 3671 return -EADDRNOTAVAIL; 3672 3673 if (dev->flags & IFF_UP) { 3674 err = dev_uc_add(real_dev, addr->sa_data); 3675 if (err < 0) 3676 return err; 3677 } 3678 3679 ether_addr_copy(old_addr, dev->dev_addr); 3680 eth_hw_addr_set(dev, addr->sa_data); 3681 3682 /* If h/w offloading is available, propagate to the device */ 3683 if (macsec_is_offloaded(macsec)) { 3684 const struct macsec_ops *ops; 3685 struct macsec_context ctx; 3686 3687 ops = macsec_get_ops(macsec, &ctx); 3688 if (!ops) { 3689 err = -EOPNOTSUPP; 3690 goto restore_old_addr; 3691 } 3692 3693 ctx.secy = &macsec->secy; 3694 err = macsec_offload(ops->mdo_upd_secy, &ctx); 3695 if (err) 3696 goto restore_old_addr; 3697 } 3698 3699 if (dev->flags & IFF_UP) 3700 dev_uc_del(real_dev, old_addr); 3701 3702 return 0; 3703 3704 restore_old_addr: 3705 if (dev->flags & IFF_UP) 3706 dev_uc_del(real_dev, addr->sa_data); 3707 3708 eth_hw_addr_set(dev, old_addr); 3709 3710 return err; 3711 } 3712 3713 static int macsec_change_mtu(struct net_device *dev, int new_mtu) 3714 { 3715 struct macsec_dev *macsec = macsec_priv(dev); 3716 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); 3717 3718 if (macsec->real_dev->mtu - extra < new_mtu) 3719 return -ERANGE; 3720 3721 dev->mtu = new_mtu; 3722 3723 return 0; 3724 } 3725 3726 static void macsec_get_stats64(struct net_device *dev, 3727 struct rtnl_link_stats64 *s) 3728 { 3729 if (!dev->tstats) 3730 return; 3731 3732 dev_fetch_sw_netstats(s, dev->tstats); 3733 3734 s->rx_dropped = DEV_STATS_READ(dev, rx_dropped); 3735 s->tx_dropped = DEV_STATS_READ(dev, tx_dropped); 3736 s->rx_errors = DEV_STATS_READ(dev, rx_errors); 3737 } 3738 3739 static int macsec_get_iflink(const struct net_device *dev) 3740 { 3741 return macsec_priv(dev)->real_dev->ifindex; 3742 } 3743 3744 static const struct net_device_ops macsec_netdev_ops = { 3745 .ndo_init = macsec_dev_init, 3746 .ndo_uninit = macsec_dev_uninit, 3747 .ndo_open = macsec_dev_open, 3748 .ndo_stop = macsec_dev_stop, 3749 .ndo_fix_features = macsec_fix_features, 3750 .ndo_change_mtu = macsec_change_mtu, 3751 .ndo_set_rx_mode = macsec_dev_set_rx_mode, 3752 .ndo_change_rx_flags = macsec_dev_change_rx_flags, 3753 .ndo_set_mac_address = macsec_set_mac_address, 3754 .ndo_start_xmit = macsec_start_xmit, 3755 .ndo_get_stats64 = macsec_get_stats64, 3756 .ndo_get_iflink = macsec_get_iflink, 3757 }; 3758 3759 static const struct device_type macsec_type = { 3760 .name = "macsec", 3761 }; 3762 3763 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { 3764 [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, 3765 [IFLA_MACSEC_PORT] = { .type = NLA_U16 }, 3766 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, 3767 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, 3768 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, 3769 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, 3770 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, 3771 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, 3772 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, 3773 [IFLA_MACSEC_ES] = { .type = NLA_U8 }, 3774 [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, 3775 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, 3776 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, 3777 [IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 }, 3778 }; 3779 3780 static void macsec_free_netdev(struct net_device *dev) 3781 { 3782 struct macsec_dev *macsec = macsec_priv(dev); 3783 3784 if (macsec->secy.tx_sc.md_dst) 3785 metadata_dst_free(macsec->secy.tx_sc.md_dst); 3786 free_percpu(macsec->stats); 3787 free_percpu(macsec->secy.tx_sc.stats); 3788 3789 /* Get rid of the macsec's reference to real_dev */ 3790 netdev_put(macsec->real_dev, &macsec->dev_tracker); 3791 } 3792 3793 static void macsec_setup(struct net_device *dev) 3794 { 3795 ether_setup(dev); 3796 dev->min_mtu = 0; 3797 dev->max_mtu = ETH_MAX_MTU; 3798 dev->priv_flags |= IFF_NO_QUEUE; 3799 dev->netdev_ops = &macsec_netdev_ops; 3800 dev->needs_free_netdev = true; 3801 dev->priv_destructor = macsec_free_netdev; 3802 SET_NETDEV_DEVTYPE(dev, &macsec_type); 3803 3804 eth_zero_addr(dev->broadcast); 3805 } 3806 3807 static int macsec_changelink_common(struct net_device *dev, 3808 struct nlattr *data[]) 3809 { 3810 struct macsec_secy *secy; 3811 struct macsec_tx_sc *tx_sc; 3812 3813 secy = &macsec_priv(dev)->secy; 3814 tx_sc = &secy->tx_sc; 3815 3816 if (data[IFLA_MACSEC_ENCODING_SA]) { 3817 struct macsec_tx_sa *tx_sa; 3818 3819 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]); 3820 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]); 3821 3822 secy->operational = tx_sa && tx_sa->active; 3823 } 3824 3825 if (data[IFLA_MACSEC_ENCRYPT]) 3826 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); 3827 3828 if (data[IFLA_MACSEC_PROTECT]) 3829 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]); 3830 3831 if (data[IFLA_MACSEC_INC_SCI]) 3832 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 3833 3834 if (data[IFLA_MACSEC_ES]) 3835 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]); 3836 3837 if (data[IFLA_MACSEC_SCB]) 3838 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]); 3839 3840 if (data[IFLA_MACSEC_REPLAY_PROTECT]) 3841 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]); 3842 3843 if (data[IFLA_MACSEC_VALIDATION]) 3844 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); 3845 3846 if (data[IFLA_MACSEC_CIPHER_SUITE]) { 3847 switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) { 3848 case MACSEC_CIPHER_ID_GCM_AES_128: 3849 case MACSEC_DEFAULT_CIPHER_ID: 3850 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3851 secy->xpn = false; 3852 break; 3853 case MACSEC_CIPHER_ID_GCM_AES_256: 3854 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3855 secy->xpn = false; 3856 break; 3857 case MACSEC_CIPHER_ID_GCM_AES_XPN_128: 3858 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3859 secy->xpn = true; 3860 break; 3861 case MACSEC_CIPHER_ID_GCM_AES_XPN_256: 3862 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3863 secy->xpn = true; 3864 break; 3865 default: 3866 return -EINVAL; 3867 } 3868 } 3869 3870 if (data[IFLA_MACSEC_WINDOW]) { 3871 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); 3872 3873 /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window 3874 * for XPN cipher suites */ 3875 if (secy->xpn && 3876 secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW) 3877 return -EINVAL; 3878 } 3879 3880 return 0; 3881 } 3882 3883 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], 3884 struct nlattr *data[], 3885 struct netlink_ext_ack *extack) 3886 { 3887 struct macsec_dev *macsec = macsec_priv(dev); 3888 bool macsec_offload_state_change = false; 3889 enum macsec_offload offload; 3890 struct macsec_tx_sc tx_sc; 3891 struct macsec_secy secy; 3892 int ret; 3893 3894 if (!data) 3895 return 0; 3896 3897 if (data[IFLA_MACSEC_CIPHER_SUITE] || 3898 data[IFLA_MACSEC_ICV_LEN] || 3899 data[IFLA_MACSEC_SCI] || 3900 data[IFLA_MACSEC_PORT]) 3901 return -EINVAL; 3902 3903 /* Keep a copy of unmodified secy and tx_sc, in case the offload 3904 * propagation fails, to revert macsec_changelink_common. 3905 */ 3906 memcpy(&secy, &macsec->secy, sizeof(secy)); 3907 memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc)); 3908 3909 ret = macsec_changelink_common(dev, data); 3910 if (ret) 3911 goto cleanup; 3912 3913 if (data[IFLA_MACSEC_OFFLOAD]) { 3914 offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]); 3915 if (macsec->offload != offload) { 3916 macsec_offload_state_change = true; 3917 ret = macsec_update_offload(dev, offload); 3918 if (ret) 3919 goto cleanup; 3920 } 3921 } 3922 3923 /* If h/w offloading is available, propagate to the device */ 3924 if (!macsec_offload_state_change && macsec_is_offloaded(macsec)) { 3925 const struct macsec_ops *ops; 3926 struct macsec_context ctx; 3927 3928 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3929 if (!ops) { 3930 ret = -EOPNOTSUPP; 3931 goto cleanup; 3932 } 3933 3934 ctx.secy = &macsec->secy; 3935 ret = macsec_offload(ops->mdo_upd_secy, &ctx); 3936 if (ret) 3937 goto cleanup; 3938 } 3939 3940 return 0; 3941 3942 cleanup: 3943 memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc)); 3944 memcpy(&macsec->secy, &secy, sizeof(secy)); 3945 3946 return ret; 3947 } 3948 3949 static void macsec_del_dev(struct macsec_dev *macsec) 3950 { 3951 int i; 3952 3953 while (macsec->secy.rx_sc) { 3954 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); 3955 3956 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); 3957 free_rx_sc(rx_sc); 3958 } 3959 3960 for (i = 0; i < MACSEC_NUM_AN; i++) { 3961 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); 3962 3963 if (sa) { 3964 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); 3965 clear_tx_sa(sa); 3966 } 3967 } 3968 } 3969 3970 static void macsec_common_dellink(struct net_device *dev, struct list_head *head) 3971 { 3972 struct macsec_dev *macsec = macsec_priv(dev); 3973 struct net_device *real_dev = macsec->real_dev; 3974 3975 /* If h/w offloading is available, propagate to the device */ 3976 if (macsec_is_offloaded(macsec)) { 3977 const struct macsec_ops *ops; 3978 struct macsec_context ctx; 3979 3980 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3981 if (ops) { 3982 ctx.secy = &macsec->secy; 3983 macsec_offload(ops->mdo_del_secy, &ctx); 3984 } 3985 } 3986 3987 unregister_netdevice_queue(dev, head); 3988 list_del_rcu(&macsec->secys); 3989 macsec_del_dev(macsec); 3990 netdev_upper_dev_unlink(real_dev, dev); 3991 3992 macsec_generation++; 3993 } 3994 3995 static void macsec_dellink(struct net_device *dev, struct list_head *head) 3996 { 3997 struct macsec_dev *macsec = macsec_priv(dev); 3998 struct net_device *real_dev = macsec->real_dev; 3999 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 4000 4001 macsec_common_dellink(dev, head); 4002 4003 if (list_empty(&rxd->secys)) { 4004 netdev_rx_handler_unregister(real_dev); 4005 kfree(rxd); 4006 } 4007 } 4008 4009 static int register_macsec_dev(struct net_device *real_dev, 4010 struct net_device *dev) 4011 { 4012 struct macsec_dev *macsec = macsec_priv(dev); 4013 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 4014 4015 if (!rxd) { 4016 int err; 4017 4018 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL); 4019 if (!rxd) 4020 return -ENOMEM; 4021 4022 INIT_LIST_HEAD(&rxd->secys); 4023 4024 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 4025 rxd); 4026 if (err < 0) { 4027 kfree(rxd); 4028 return err; 4029 } 4030 } 4031 4032 list_add_tail_rcu(&macsec->secys, &rxd->secys); 4033 return 0; 4034 } 4035 4036 static bool sci_exists(struct net_device *dev, sci_t sci) 4037 { 4038 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev); 4039 struct macsec_dev *macsec; 4040 4041 list_for_each_entry(macsec, &rxd->secys, secys) { 4042 if (macsec->secy.sci == sci) 4043 return true; 4044 } 4045 4046 return false; 4047 } 4048 4049 static sci_t dev_to_sci(struct net_device *dev, __be16 port) 4050 { 4051 return make_sci(dev->dev_addr, port); 4052 } 4053 4054 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) 4055 { 4056 struct macsec_dev *macsec = macsec_priv(dev); 4057 struct macsec_secy *secy = &macsec->secy; 4058 4059 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); 4060 if (!macsec->stats) 4061 return -ENOMEM; 4062 4063 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); 4064 if (!secy->tx_sc.stats) 4065 return -ENOMEM; 4066 4067 secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL); 4068 if (!secy->tx_sc.md_dst) 4069 /* macsec and secy percpu stats will be freed when unregistering 4070 * net_device in macsec_free_netdev() 4071 */ 4072 return -ENOMEM; 4073 4074 if (sci == MACSEC_UNDEF_SCI) 4075 sci = dev_to_sci(dev, MACSEC_PORT_ES); 4076 4077 secy->netdev = dev; 4078 secy->operational = true; 4079 secy->key_len = DEFAULT_SAK_LEN; 4080 secy->icv_len = icv_len; 4081 secy->validate_frames = MACSEC_VALIDATE_DEFAULT; 4082 secy->protect_frames = true; 4083 secy->replay_protect = false; 4084 secy->xpn = DEFAULT_XPN; 4085 4086 secy->sci = sci; 4087 secy->tx_sc.md_dst->u.macsec_info.sci = sci; 4088 secy->tx_sc.active = true; 4089 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; 4090 secy->tx_sc.encrypt = DEFAULT_ENCRYPT; 4091 secy->tx_sc.send_sci = DEFAULT_SEND_SCI; 4092 secy->tx_sc.end_station = false; 4093 secy->tx_sc.scb = false; 4094 4095 return 0; 4096 } 4097 4098 static struct lock_class_key macsec_netdev_addr_lock_key; 4099 4100 static int macsec_newlink(struct net *net, struct net_device *dev, 4101 struct nlattr *tb[], struct nlattr *data[], 4102 struct netlink_ext_ack *extack) 4103 { 4104 struct macsec_dev *macsec = macsec_priv(dev); 4105 rx_handler_func_t *rx_handler; 4106 u8 icv_len = MACSEC_DEFAULT_ICV_LEN; 4107 struct net_device *real_dev; 4108 int err, mtu; 4109 sci_t sci; 4110 4111 if (!tb[IFLA_LINK]) 4112 return -EINVAL; 4113 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); 4114 if (!real_dev) 4115 return -ENODEV; 4116 if (real_dev->type != ARPHRD_ETHER) 4117 return -EINVAL; 4118 4119 dev->priv_flags |= IFF_MACSEC; 4120 4121 macsec->real_dev = real_dev; 4122 4123 if (data && data[IFLA_MACSEC_OFFLOAD]) 4124 macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]); 4125 else 4126 /* MACsec offloading is off by default */ 4127 macsec->offload = MACSEC_OFFLOAD_OFF; 4128 4129 /* Check if the offloading mode is supported by the underlying layers */ 4130 if (macsec->offload != MACSEC_OFFLOAD_OFF && 4131 !macsec_check_offload(macsec->offload, macsec)) 4132 return -EOPNOTSUPP; 4133 4134 /* send_sci must be set to true when transmit sci explicitly is set */ 4135 if ((data && data[IFLA_MACSEC_SCI]) && 4136 (data && data[IFLA_MACSEC_INC_SCI])) { 4137 u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 4138 4139 if (!send_sci) 4140 return -EINVAL; 4141 } 4142 4143 if (data && data[IFLA_MACSEC_ICV_LEN]) 4144 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 4145 mtu = real_dev->mtu - icv_len - macsec_extra_len(true); 4146 if (mtu < 0) 4147 dev->mtu = 0; 4148 else 4149 dev->mtu = mtu; 4150 4151 rx_handler = rtnl_dereference(real_dev->rx_handler); 4152 if (rx_handler && rx_handler != macsec_handle_frame) 4153 return -EBUSY; 4154 4155 err = register_netdevice(dev); 4156 if (err < 0) 4157 return err; 4158 4159 netdev_lockdep_set_classes(dev); 4160 lockdep_set_class(&dev->addr_list_lock, 4161 &macsec_netdev_addr_lock_key); 4162 4163 err = netdev_upper_dev_link(real_dev, dev, extack); 4164 if (err < 0) 4165 goto unregister; 4166 4167 /* need to be already registered so that ->init has run and 4168 * the MAC addr is set 4169 */ 4170 if (data && data[IFLA_MACSEC_SCI]) 4171 sci = nla_get_sci(data[IFLA_MACSEC_SCI]); 4172 else if (data && data[IFLA_MACSEC_PORT]) 4173 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT])); 4174 else 4175 sci = dev_to_sci(dev, MACSEC_PORT_ES); 4176 4177 if (rx_handler && sci_exists(real_dev, sci)) { 4178 err = -EBUSY; 4179 goto unlink; 4180 } 4181 4182 err = macsec_add_dev(dev, sci, icv_len); 4183 if (err) 4184 goto unlink; 4185 4186 if (data) { 4187 err = macsec_changelink_common(dev, data); 4188 if (err) 4189 goto del_dev; 4190 } 4191 4192 /* If h/w offloading is available, propagate to the device */ 4193 if (macsec_is_offloaded(macsec)) { 4194 const struct macsec_ops *ops; 4195 struct macsec_context ctx; 4196 4197 ops = macsec_get_ops(macsec, &ctx); 4198 if (ops) { 4199 ctx.secy = &macsec->secy; 4200 err = macsec_offload(ops->mdo_add_secy, &ctx); 4201 if (err) 4202 goto del_dev; 4203 4204 macsec->insert_tx_tag = 4205 macsec_needs_tx_tag(macsec, ops); 4206 } 4207 } 4208 4209 err = register_macsec_dev(real_dev, dev); 4210 if (err < 0) 4211 goto del_dev; 4212 4213 netif_stacked_transfer_operstate(real_dev, dev); 4214 linkwatch_fire_event(dev); 4215 4216 macsec_generation++; 4217 4218 return 0; 4219 4220 del_dev: 4221 macsec_del_dev(macsec); 4222 unlink: 4223 netdev_upper_dev_unlink(real_dev, dev); 4224 unregister: 4225 unregister_netdevice(dev); 4226 return err; 4227 } 4228 4229 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[], 4230 struct netlink_ext_ack *extack) 4231 { 4232 u64 csid = MACSEC_DEFAULT_CIPHER_ID; 4233 u8 icv_len = MACSEC_DEFAULT_ICV_LEN; 4234 int flag; 4235 bool es, scb, sci; 4236 4237 if (!data) 4238 return 0; 4239 4240 if (data[IFLA_MACSEC_CIPHER_SUITE]) 4241 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); 4242 4243 if (data[IFLA_MACSEC_ICV_LEN]) { 4244 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 4245 if (icv_len != MACSEC_DEFAULT_ICV_LEN) { 4246 char dummy_key[DEFAULT_SAK_LEN] = { 0 }; 4247 struct crypto_aead *dummy_tfm; 4248 4249 dummy_tfm = macsec_alloc_tfm(dummy_key, 4250 DEFAULT_SAK_LEN, 4251 icv_len); 4252 if (IS_ERR(dummy_tfm)) 4253 return PTR_ERR(dummy_tfm); 4254 crypto_free_aead(dummy_tfm); 4255 } 4256 } 4257 4258 switch (csid) { 4259 case MACSEC_CIPHER_ID_GCM_AES_128: 4260 case MACSEC_CIPHER_ID_GCM_AES_256: 4261 case MACSEC_CIPHER_ID_GCM_AES_XPN_128: 4262 case MACSEC_CIPHER_ID_GCM_AES_XPN_256: 4263 case MACSEC_DEFAULT_CIPHER_ID: 4264 if (icv_len < MACSEC_MIN_ICV_LEN || 4265 icv_len > MACSEC_STD_ICV_LEN) 4266 return -EINVAL; 4267 break; 4268 default: 4269 return -EINVAL; 4270 } 4271 4272 if (data[IFLA_MACSEC_ENCODING_SA]) { 4273 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) 4274 return -EINVAL; 4275 } 4276 4277 for (flag = IFLA_MACSEC_ENCODING_SA + 1; 4278 flag < IFLA_MACSEC_VALIDATION; 4279 flag++) { 4280 if (data[flag]) { 4281 if (nla_get_u8(data[flag]) > 1) 4282 return -EINVAL; 4283 } 4284 } 4285 4286 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; 4287 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; 4288 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; 4289 4290 if ((sci && (scb || es)) || (scb && es)) 4291 return -EINVAL; 4292 4293 if (data[IFLA_MACSEC_VALIDATION] && 4294 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 4295 return -EINVAL; 4296 4297 if ((data[IFLA_MACSEC_REPLAY_PROTECT] && 4298 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && 4299 !data[IFLA_MACSEC_WINDOW]) 4300 return -EINVAL; 4301 4302 return 0; 4303 } 4304 4305 static struct net *macsec_get_link_net(const struct net_device *dev) 4306 { 4307 return dev_net(macsec_priv(dev)->real_dev); 4308 } 4309 4310 struct net_device *macsec_get_real_dev(const struct net_device *dev) 4311 { 4312 return macsec_priv(dev)->real_dev; 4313 } 4314 EXPORT_SYMBOL_GPL(macsec_get_real_dev); 4315 4316 bool macsec_netdev_is_offloaded(struct net_device *dev) 4317 { 4318 return macsec_is_offloaded(macsec_priv(dev)); 4319 } 4320 EXPORT_SYMBOL_GPL(macsec_netdev_is_offloaded); 4321 4322 static size_t macsec_get_size(const struct net_device *dev) 4323 { 4324 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */ 4325 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */ 4326 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */ 4327 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */ 4328 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */ 4329 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */ 4330 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */ 4331 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */ 4332 nla_total_size(1) + /* IFLA_MACSEC_ES */ 4333 nla_total_size(1) + /* IFLA_MACSEC_SCB */ 4334 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */ 4335 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */ 4336 nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */ 4337 0; 4338 } 4339 4340 static int macsec_fill_info(struct sk_buff *skb, 4341 const struct net_device *dev) 4342 { 4343 struct macsec_tx_sc *tx_sc; 4344 struct macsec_dev *macsec; 4345 struct macsec_secy *secy; 4346 u64 csid; 4347 4348 macsec = macsec_priv(dev); 4349 secy = &macsec->secy; 4350 tx_sc = &secy->tx_sc; 4351 4352 switch (secy->key_len) { 4353 case MACSEC_GCM_AES_128_SAK_LEN: 4354 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; 4355 break; 4356 case MACSEC_GCM_AES_256_SAK_LEN: 4357 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; 4358 break; 4359 default: 4360 goto nla_put_failure; 4361 } 4362 4363 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, 4364 IFLA_MACSEC_PAD) || 4365 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 4366 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, 4367 csid, IFLA_MACSEC_PAD) || 4368 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 4369 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 4370 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 4371 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) || 4372 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) || 4373 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || 4374 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || 4375 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || 4376 nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) || 4377 0) 4378 goto nla_put_failure; 4379 4380 if (secy->replay_protect) { 4381 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window)) 4382 goto nla_put_failure; 4383 } 4384 4385 return 0; 4386 4387 nla_put_failure: 4388 return -EMSGSIZE; 4389 } 4390 4391 static struct rtnl_link_ops macsec_link_ops __read_mostly = { 4392 .kind = "macsec", 4393 .priv_size = sizeof(struct macsec_dev), 4394 .maxtype = IFLA_MACSEC_MAX, 4395 .policy = macsec_rtnl_policy, 4396 .setup = macsec_setup, 4397 .validate = macsec_validate_attr, 4398 .newlink = macsec_newlink, 4399 .changelink = macsec_changelink, 4400 .dellink = macsec_dellink, 4401 .get_size = macsec_get_size, 4402 .fill_info = macsec_fill_info, 4403 .get_link_net = macsec_get_link_net, 4404 }; 4405 4406 static bool is_macsec_master(struct net_device *dev) 4407 { 4408 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame; 4409 } 4410 4411 static int macsec_notify(struct notifier_block *this, unsigned long event, 4412 void *ptr) 4413 { 4414 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); 4415 LIST_HEAD(head); 4416 4417 if (!is_macsec_master(real_dev)) 4418 return NOTIFY_DONE; 4419 4420 switch (event) { 4421 case NETDEV_DOWN: 4422 case NETDEV_UP: 4423 case NETDEV_CHANGE: { 4424 struct macsec_dev *m, *n; 4425 struct macsec_rxh_data *rxd; 4426 4427 rxd = macsec_data_rtnl(real_dev); 4428 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 4429 struct net_device *dev = m->secy.netdev; 4430 4431 netif_stacked_transfer_operstate(real_dev, dev); 4432 } 4433 break; 4434 } 4435 case NETDEV_UNREGISTER: { 4436 struct macsec_dev *m, *n; 4437 struct macsec_rxh_data *rxd; 4438 4439 rxd = macsec_data_rtnl(real_dev); 4440 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 4441 macsec_common_dellink(m->secy.netdev, &head); 4442 } 4443 4444 netdev_rx_handler_unregister(real_dev); 4445 kfree(rxd); 4446 4447 unregister_netdevice_many(&head); 4448 break; 4449 } 4450 case NETDEV_CHANGEMTU: { 4451 struct macsec_dev *m; 4452 struct macsec_rxh_data *rxd; 4453 4454 rxd = macsec_data_rtnl(real_dev); 4455 list_for_each_entry(m, &rxd->secys, secys) { 4456 struct net_device *dev = m->secy.netdev; 4457 unsigned int mtu = real_dev->mtu - (m->secy.icv_len + 4458 macsec_extra_len(true)); 4459 4460 if (dev->mtu > mtu) 4461 dev_set_mtu(dev, mtu); 4462 } 4463 } 4464 } 4465 4466 return NOTIFY_OK; 4467 } 4468 4469 static struct notifier_block macsec_notifier = { 4470 .notifier_call = macsec_notify, 4471 }; 4472 4473 static int __init macsec_init(void) 4474 { 4475 int err; 4476 4477 pr_info("MACsec IEEE 802.1AE\n"); 4478 err = register_netdevice_notifier(&macsec_notifier); 4479 if (err) 4480 return err; 4481 4482 err = rtnl_link_register(&macsec_link_ops); 4483 if (err) 4484 goto notifier; 4485 4486 err = genl_register_family(&macsec_fam); 4487 if (err) 4488 goto rtnl; 4489 4490 return 0; 4491 4492 rtnl: 4493 rtnl_link_unregister(&macsec_link_ops); 4494 notifier: 4495 unregister_netdevice_notifier(&macsec_notifier); 4496 return err; 4497 } 4498 4499 static void __exit macsec_exit(void) 4500 { 4501 genl_unregister_family(&macsec_fam); 4502 rtnl_link_unregister(&macsec_link_ops); 4503 unregister_netdevice_notifier(&macsec_notifier); 4504 rcu_barrier(); 4505 } 4506 4507 module_init(macsec_init); 4508 module_exit(macsec_exit); 4509 4510 MODULE_ALIAS_RTNL_LINK("macsec"); 4511 MODULE_ALIAS_GENL_FAMILY("macsec"); 4512 4513 MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); 4514 MODULE_LICENSE("GPL v2"); 4515