1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * drivers/net/macsec.c - MACsec device 4 * 5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net> 6 */ 7 8 #include <linux/types.h> 9 #include <linux/skbuff.h> 10 #include <linux/socket.h> 11 #include <linux/module.h> 12 #include <crypto/aead.h> 13 #include <linux/etherdevice.h> 14 #include <linux/netdevice.h> 15 #include <linux/rtnetlink.h> 16 #include <linux/refcount.h> 17 #include <net/genetlink.h> 18 #include <net/sock.h> 19 #include <net/gro_cells.h> 20 #include <net/macsec.h> 21 #include <linux/phy.h> 22 #include <linux/byteorder/generic.h> 23 24 #include <uapi/linux/if_macsec.h> 25 26 #define MACSEC_SCI_LEN 8 27 28 /* SecTAG length = macsec_eth_header without the optional SCI */ 29 #define MACSEC_TAG_LEN 6 30 31 struct macsec_eth_header { 32 struct ethhdr eth; 33 /* SecTAG */ 34 u8 tci_an; 35 #if defined(__LITTLE_ENDIAN_BITFIELD) 36 u8 short_length:6, 37 unused:2; 38 #elif defined(__BIG_ENDIAN_BITFIELD) 39 u8 unused:2, 40 short_length:6; 41 #else 42 #error "Please fix <asm/byteorder.h>" 43 #endif 44 __be32 packet_number; 45 u8 secure_channel_id[8]; /* optional */ 46 } __packed; 47 48 #define MACSEC_TCI_VERSION 0x80 49 #define MACSEC_TCI_ES 0x40 /* end station */ 50 #define MACSEC_TCI_SC 0x20 /* SCI present */ 51 #define MACSEC_TCI_SCB 0x10 /* epon */ 52 #define MACSEC_TCI_E 0x08 /* encryption */ 53 #define MACSEC_TCI_C 0x04 /* changed text */ 54 #define MACSEC_AN_MASK 0x03 /* association number */ 55 #define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C) 56 57 /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ 58 #define MIN_NON_SHORT_LEN 48 59 60 #define GCM_AES_IV_LEN 12 61 #define DEFAULT_ICV_LEN 16 62 63 #define for_each_rxsc(secy, sc) \ 64 for (sc = rcu_dereference_bh(secy->rx_sc); \ 65 sc; \ 66 sc = rcu_dereference_bh(sc->next)) 67 #define for_each_rxsc_rtnl(secy, sc) \ 68 for (sc = rtnl_dereference(secy->rx_sc); \ 69 sc; \ 70 sc = rtnl_dereference(sc->next)) 71 72 #define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31))) 73 74 struct gcm_iv_xpn { 75 union { 76 u8 short_secure_channel_id[4]; 77 ssci_t ssci; 78 }; 79 __be64 pn; 80 } __packed; 81 82 struct gcm_iv { 83 union { 84 u8 secure_channel_id[8]; 85 sci_t sci; 86 }; 87 __be32 pn; 88 }; 89 90 struct macsec_dev_stats { 91 __u64 OutPktsUntagged; 92 __u64 InPktsUntagged; 93 __u64 OutPktsTooLong; 94 __u64 InPktsNoTag; 95 __u64 InPktsBadTag; 96 __u64 InPktsUnknownSCI; 97 __u64 InPktsNoSCI; 98 __u64 InPktsOverrun; 99 }; 100 101 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT 102 103 struct pcpu_secy_stats { 104 struct macsec_dev_stats stats; 105 struct u64_stats_sync syncp; 106 }; 107 108 /** 109 * struct macsec_dev - private data 110 * @secy: SecY config 111 * @real_dev: pointer to underlying netdevice 112 * @stats: MACsec device stats 113 * @secys: linked list of SecY's on the underlying device 114 * @offload: status of offloading on the MACsec device 115 */ 116 struct macsec_dev { 117 struct macsec_secy secy; 118 struct net_device *real_dev; 119 struct pcpu_secy_stats __percpu *stats; 120 struct list_head secys; 121 struct gro_cells gro_cells; 122 enum macsec_offload offload; 123 }; 124 125 /** 126 * struct macsec_rxh_data - rx_handler private argument 127 * @secys: linked list of SecY's on this underlying device 128 */ 129 struct macsec_rxh_data { 130 struct list_head secys; 131 }; 132 133 static struct macsec_dev *macsec_priv(const struct net_device *dev) 134 { 135 return (struct macsec_dev *)netdev_priv(dev); 136 } 137 138 static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev) 139 { 140 return rcu_dereference_bh(dev->rx_handler_data); 141 } 142 143 static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev) 144 { 145 return rtnl_dereference(dev->rx_handler_data); 146 } 147 148 struct macsec_cb { 149 struct aead_request *req; 150 union { 151 struct macsec_tx_sa *tx_sa; 152 struct macsec_rx_sa *rx_sa; 153 }; 154 u8 assoc_num; 155 bool valid; 156 bool has_sci; 157 }; 158 159 static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) 160 { 161 struct macsec_rx_sa *sa = rcu_dereference_bh(ptr); 162 163 if (!sa || !sa->active) 164 return NULL; 165 166 if (!refcount_inc_not_zero(&sa->refcnt)) 167 return NULL; 168 169 return sa; 170 } 171 172 static void free_rx_sc_rcu(struct rcu_head *head) 173 { 174 struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); 175 176 free_percpu(rx_sc->stats); 177 kfree(rx_sc); 178 } 179 180 static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) 181 { 182 return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL; 183 } 184 185 static void macsec_rxsc_put(struct macsec_rx_sc *sc) 186 { 187 if (refcount_dec_and_test(&sc->refcnt)) 188 call_rcu(&sc->rcu_head, free_rx_sc_rcu); 189 } 190 191 static void free_rxsa(struct rcu_head *head) 192 { 193 struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); 194 195 crypto_free_aead(sa->key.tfm); 196 free_percpu(sa->stats); 197 kfree(sa); 198 } 199 200 static void macsec_rxsa_put(struct macsec_rx_sa *sa) 201 { 202 if (refcount_dec_and_test(&sa->refcnt)) 203 call_rcu(&sa->rcu, free_rxsa); 204 } 205 206 static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) 207 { 208 struct macsec_tx_sa *sa = rcu_dereference_bh(ptr); 209 210 if (!sa || !sa->active) 211 return NULL; 212 213 if (!refcount_inc_not_zero(&sa->refcnt)) 214 return NULL; 215 216 return sa; 217 } 218 219 static void free_txsa(struct rcu_head *head) 220 { 221 struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); 222 223 crypto_free_aead(sa->key.tfm); 224 free_percpu(sa->stats); 225 kfree(sa); 226 } 227 228 static void macsec_txsa_put(struct macsec_tx_sa *sa) 229 { 230 if (refcount_dec_and_test(&sa->refcnt)) 231 call_rcu(&sa->rcu, free_txsa); 232 } 233 234 static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) 235 { 236 BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb)); 237 return (struct macsec_cb *)skb->cb; 238 } 239 240 #define MACSEC_PORT_ES (htons(0x0001)) 241 #define MACSEC_PORT_SCB (0x0000) 242 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) 243 #define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff) 244 245 #define MACSEC_GCM_AES_128_SAK_LEN 16 246 #define MACSEC_GCM_AES_256_SAK_LEN 32 247 248 #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN 249 #define DEFAULT_XPN false 250 #define DEFAULT_SEND_SCI true 251 #define DEFAULT_ENCRYPT false 252 #define DEFAULT_ENCODING_SA 0 253 254 static bool send_sci(const struct macsec_secy *secy) 255 { 256 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 257 258 return tx_sc->send_sci || 259 (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb); 260 } 261 262 static sci_t make_sci(u8 *addr, __be16 port) 263 { 264 sci_t sci; 265 266 memcpy(&sci, addr, ETH_ALEN); 267 memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port)); 268 269 return sci; 270 } 271 272 static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) 273 { 274 sci_t sci; 275 276 if (sci_present) 277 memcpy(&sci, hdr->secure_channel_id, 278 sizeof(hdr->secure_channel_id)); 279 else 280 sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); 281 282 return sci; 283 } 284 285 static unsigned int macsec_sectag_len(bool sci_present) 286 { 287 return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); 288 } 289 290 static unsigned int macsec_hdr_len(bool sci_present) 291 { 292 return macsec_sectag_len(sci_present) + ETH_HLEN; 293 } 294 295 static unsigned int macsec_extra_len(bool sci_present) 296 { 297 return macsec_sectag_len(sci_present) + sizeof(__be16); 298 } 299 300 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ 301 static void macsec_fill_sectag(struct macsec_eth_header *h, 302 const struct macsec_secy *secy, u32 pn, 303 bool sci_present) 304 { 305 const struct macsec_tx_sc *tx_sc = &secy->tx_sc; 306 307 memset(&h->tci_an, 0, macsec_sectag_len(sci_present)); 308 h->eth.h_proto = htons(ETH_P_MACSEC); 309 310 if (sci_present) { 311 h->tci_an |= MACSEC_TCI_SC; 312 memcpy(&h->secure_channel_id, &secy->sci, 313 sizeof(h->secure_channel_id)); 314 } else { 315 if (tx_sc->end_station) 316 h->tci_an |= MACSEC_TCI_ES; 317 if (tx_sc->scb) 318 h->tci_an |= MACSEC_TCI_SCB; 319 } 320 321 h->packet_number = htonl(pn); 322 323 /* with GCM, C/E clear for !encrypt, both set for encrypt */ 324 if (tx_sc->encrypt) 325 h->tci_an |= MACSEC_TCI_CONFID; 326 else if (secy->icv_len != DEFAULT_ICV_LEN) 327 h->tci_an |= MACSEC_TCI_C; 328 329 h->tci_an |= tx_sc->encoding_sa; 330 } 331 332 static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) 333 { 334 if (data_len < MIN_NON_SHORT_LEN) 335 h->short_length = data_len; 336 } 337 338 /* Checks if a MACsec interface is being offloaded to an hardware engine */ 339 static bool macsec_is_offloaded(struct macsec_dev *macsec) 340 { 341 if (macsec->offload == MACSEC_OFFLOAD_PHY) 342 return true; 343 344 return false; 345 } 346 347 /* Checks if underlying layers implement MACsec offloading functions. */ 348 static bool macsec_check_offload(enum macsec_offload offload, 349 struct macsec_dev *macsec) 350 { 351 if (!macsec || !macsec->real_dev) 352 return false; 353 354 if (offload == MACSEC_OFFLOAD_PHY) 355 return macsec->real_dev->phydev && 356 macsec->real_dev->phydev->macsec_ops; 357 358 return false; 359 } 360 361 static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload, 362 struct macsec_dev *macsec, 363 struct macsec_context *ctx) 364 { 365 if (ctx) { 366 memset(ctx, 0, sizeof(*ctx)); 367 ctx->offload = offload; 368 369 if (offload == MACSEC_OFFLOAD_PHY) 370 ctx->phydev = macsec->real_dev->phydev; 371 } 372 373 return macsec->real_dev->phydev->macsec_ops; 374 } 375 376 /* Returns a pointer to the MACsec ops struct if any and updates the MACsec 377 * context device reference if provided. 378 */ 379 static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec, 380 struct macsec_context *ctx) 381 { 382 if (!macsec_check_offload(macsec->offload, macsec)) 383 return NULL; 384 385 return __macsec_get_ops(macsec->offload, macsec, ctx); 386 } 387 388 /* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */ 389 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn) 390 { 391 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; 392 int len = skb->len - 2 * ETH_ALEN; 393 int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; 394 395 /* a) It comprises at least 17 octets */ 396 if (skb->len <= 16) 397 return false; 398 399 /* b) MACsec EtherType: already checked */ 400 401 /* c) V bit is clear */ 402 if (h->tci_an & MACSEC_TCI_VERSION) 403 return false; 404 405 /* d) ES or SCB => !SC */ 406 if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && 407 (h->tci_an & MACSEC_TCI_SC)) 408 return false; 409 410 /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */ 411 if (h->unused) 412 return false; 413 414 /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */ 415 if (!h->packet_number && !xpn) 416 return false; 417 418 /* length check, f) g) h) i) */ 419 if (h->short_length) 420 return len == extra_len + h->short_length; 421 return len >= extra_len + MIN_NON_SHORT_LEN; 422 } 423 424 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) 425 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN 426 427 static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn, 428 salt_t salt) 429 { 430 struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv; 431 432 gcm_iv->ssci = ssci ^ salt.ssci; 433 gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn; 434 } 435 436 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) 437 { 438 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; 439 440 gcm_iv->sci = sci; 441 gcm_iv->pn = htonl(pn); 442 } 443 444 static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) 445 { 446 return (struct macsec_eth_header *)skb_mac_header(skb); 447 } 448 449 static sci_t dev_to_sci(struct net_device *dev, __be16 port) 450 { 451 return make_sci(dev->dev_addr, port); 452 } 453 454 static void __macsec_pn_wrapped(struct macsec_secy *secy, 455 struct macsec_tx_sa *tx_sa) 456 { 457 pr_debug("PN wrapped, transitioning to !oper\n"); 458 tx_sa->active = false; 459 if (secy->protect_frames) 460 secy->operational = false; 461 } 462 463 void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa) 464 { 465 spin_lock_bh(&tx_sa->lock); 466 __macsec_pn_wrapped(secy, tx_sa); 467 spin_unlock_bh(&tx_sa->lock); 468 } 469 EXPORT_SYMBOL_GPL(macsec_pn_wrapped); 470 471 static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa, 472 struct macsec_secy *secy) 473 { 474 pn_t pn; 475 476 spin_lock_bh(&tx_sa->lock); 477 478 pn = tx_sa->next_pn_halves; 479 if (secy->xpn) 480 tx_sa->next_pn++; 481 else 482 tx_sa->next_pn_halves.lower++; 483 484 if (tx_sa->next_pn == 0) 485 __macsec_pn_wrapped(secy, tx_sa); 486 spin_unlock_bh(&tx_sa->lock); 487 488 return pn; 489 } 490 491 static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) 492 { 493 struct macsec_dev *macsec = netdev_priv(dev); 494 495 skb->dev = macsec->real_dev; 496 skb_reset_mac_header(skb); 497 skb->protocol = eth_hdr(skb)->h_proto; 498 } 499 500 static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, 501 struct macsec_tx_sa *tx_sa) 502 { 503 struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); 504 505 u64_stats_update_begin(&txsc_stats->syncp); 506 if (tx_sc->encrypt) { 507 txsc_stats->stats.OutOctetsEncrypted += skb->len; 508 txsc_stats->stats.OutPktsEncrypted++; 509 this_cpu_inc(tx_sa->stats->OutPktsEncrypted); 510 } else { 511 txsc_stats->stats.OutOctetsProtected += skb->len; 512 txsc_stats->stats.OutPktsProtected++; 513 this_cpu_inc(tx_sa->stats->OutPktsProtected); 514 } 515 u64_stats_update_end(&txsc_stats->syncp); 516 } 517 518 static void count_tx(struct net_device *dev, int ret, int len) 519 { 520 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 521 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 522 523 u64_stats_update_begin(&stats->syncp); 524 stats->tx_packets++; 525 stats->tx_bytes += len; 526 u64_stats_update_end(&stats->syncp); 527 } 528 } 529 530 static void macsec_encrypt_done(struct crypto_async_request *base, int err) 531 { 532 struct sk_buff *skb = base->data; 533 struct net_device *dev = skb->dev; 534 struct macsec_dev *macsec = macsec_priv(dev); 535 struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; 536 int len, ret; 537 538 aead_request_free(macsec_skb_cb(skb)->req); 539 540 rcu_read_lock_bh(); 541 macsec_encrypt_finish(skb, dev); 542 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 543 len = skb->len; 544 ret = dev_queue_xmit(skb); 545 count_tx(dev, ret, len); 546 rcu_read_unlock_bh(); 547 548 macsec_txsa_put(sa); 549 dev_put(dev); 550 } 551 552 static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm, 553 unsigned char **iv, 554 struct scatterlist **sg, 555 int num_frags) 556 { 557 size_t size, iv_offset, sg_offset; 558 struct aead_request *req; 559 void *tmp; 560 561 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm); 562 iv_offset = size; 563 size += GCM_AES_IV_LEN; 564 565 size = ALIGN(size, __alignof__(struct scatterlist)); 566 sg_offset = size; 567 size += sizeof(struct scatterlist) * num_frags; 568 569 tmp = kmalloc(size, GFP_ATOMIC); 570 if (!tmp) 571 return NULL; 572 573 *iv = (unsigned char *)(tmp + iv_offset); 574 *sg = (struct scatterlist *)(tmp + sg_offset); 575 req = tmp; 576 577 aead_request_set_tfm(req, tfm); 578 579 return req; 580 } 581 582 static struct sk_buff *macsec_encrypt(struct sk_buff *skb, 583 struct net_device *dev) 584 { 585 int ret; 586 struct scatterlist *sg; 587 struct sk_buff *trailer; 588 unsigned char *iv; 589 struct ethhdr *eth; 590 struct macsec_eth_header *hh; 591 size_t unprotected_len; 592 struct aead_request *req; 593 struct macsec_secy *secy; 594 struct macsec_tx_sc *tx_sc; 595 struct macsec_tx_sa *tx_sa; 596 struct macsec_dev *macsec = macsec_priv(dev); 597 bool sci_present; 598 pn_t pn; 599 600 secy = &macsec->secy; 601 tx_sc = &secy->tx_sc; 602 603 /* 10.5.1 TX SA assignment */ 604 tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]); 605 if (!tx_sa) { 606 secy->operational = false; 607 kfree_skb(skb); 608 return ERR_PTR(-EINVAL); 609 } 610 611 if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || 612 skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { 613 struct sk_buff *nskb = skb_copy_expand(skb, 614 MACSEC_NEEDED_HEADROOM, 615 MACSEC_NEEDED_TAILROOM, 616 GFP_ATOMIC); 617 if (likely(nskb)) { 618 consume_skb(skb); 619 skb = nskb; 620 } else { 621 macsec_txsa_put(tx_sa); 622 kfree_skb(skb); 623 return ERR_PTR(-ENOMEM); 624 } 625 } else { 626 skb = skb_unshare(skb, GFP_ATOMIC); 627 if (!skb) { 628 macsec_txsa_put(tx_sa); 629 return ERR_PTR(-ENOMEM); 630 } 631 } 632 633 unprotected_len = skb->len; 634 eth = eth_hdr(skb); 635 sci_present = send_sci(secy); 636 hh = skb_push(skb, macsec_extra_len(sci_present)); 637 memmove(hh, eth, 2 * ETH_ALEN); 638 639 pn = tx_sa_update_pn(tx_sa, secy); 640 if (pn.full64 == 0) { 641 macsec_txsa_put(tx_sa); 642 kfree_skb(skb); 643 return ERR_PTR(-ENOLINK); 644 } 645 macsec_fill_sectag(hh, secy, pn.lower, sci_present); 646 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); 647 648 skb_put(skb, secy->icv_len); 649 650 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { 651 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 652 653 u64_stats_update_begin(&secy_stats->syncp); 654 secy_stats->stats.OutPktsTooLong++; 655 u64_stats_update_end(&secy_stats->syncp); 656 657 macsec_txsa_put(tx_sa); 658 kfree_skb(skb); 659 return ERR_PTR(-EINVAL); 660 } 661 662 ret = skb_cow_data(skb, 0, &trailer); 663 if (unlikely(ret < 0)) { 664 macsec_txsa_put(tx_sa); 665 kfree_skb(skb); 666 return ERR_PTR(ret); 667 } 668 669 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret); 670 if (!req) { 671 macsec_txsa_put(tx_sa); 672 kfree_skb(skb); 673 return ERR_PTR(-ENOMEM); 674 } 675 676 if (secy->xpn) 677 macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt); 678 else 679 macsec_fill_iv(iv, secy->sci, pn.lower); 680 681 sg_init_table(sg, ret); 682 ret = skb_to_sgvec(skb, sg, 0, skb->len); 683 if (unlikely(ret < 0)) { 684 aead_request_free(req); 685 macsec_txsa_put(tx_sa); 686 kfree_skb(skb); 687 return ERR_PTR(ret); 688 } 689 690 if (tx_sc->encrypt) { 691 int len = skb->len - macsec_hdr_len(sci_present) - 692 secy->icv_len; 693 aead_request_set_crypt(req, sg, sg, len, iv); 694 aead_request_set_ad(req, macsec_hdr_len(sci_present)); 695 } else { 696 aead_request_set_crypt(req, sg, sg, 0, iv); 697 aead_request_set_ad(req, skb->len - secy->icv_len); 698 } 699 700 macsec_skb_cb(skb)->req = req; 701 macsec_skb_cb(skb)->tx_sa = tx_sa; 702 aead_request_set_callback(req, 0, macsec_encrypt_done, skb); 703 704 dev_hold(skb->dev); 705 ret = crypto_aead_encrypt(req); 706 if (ret == -EINPROGRESS) { 707 return ERR_PTR(ret); 708 } else if (ret != 0) { 709 dev_put(skb->dev); 710 kfree_skb(skb); 711 aead_request_free(req); 712 macsec_txsa_put(tx_sa); 713 return ERR_PTR(-EINVAL); 714 } 715 716 dev_put(skb->dev); 717 aead_request_free(req); 718 macsec_txsa_put(tx_sa); 719 720 return skb; 721 } 722 723 static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn) 724 { 725 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 726 struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats); 727 struct macsec_eth_header *hdr = macsec_ethhdr(skb); 728 u32 lowest_pn = 0; 729 730 spin_lock(&rx_sa->lock); 731 if (rx_sa->next_pn_halves.lower >= secy->replay_window) 732 lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window; 733 734 /* Now perform replay protection check again 735 * (see IEEE 802.1AE-2006 figure 10-5) 736 */ 737 if (secy->replay_protect && pn < lowest_pn && 738 (!secy->xpn || pn_same_half(pn, lowest_pn))) { 739 spin_unlock(&rx_sa->lock); 740 u64_stats_update_begin(&rxsc_stats->syncp); 741 rxsc_stats->stats.InPktsLate++; 742 u64_stats_update_end(&rxsc_stats->syncp); 743 return false; 744 } 745 746 if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { 747 u64_stats_update_begin(&rxsc_stats->syncp); 748 if (hdr->tci_an & MACSEC_TCI_E) 749 rxsc_stats->stats.InOctetsDecrypted += skb->len; 750 else 751 rxsc_stats->stats.InOctetsValidated += skb->len; 752 u64_stats_update_end(&rxsc_stats->syncp); 753 } 754 755 if (!macsec_skb_cb(skb)->valid) { 756 spin_unlock(&rx_sa->lock); 757 758 /* 10.6.5 */ 759 if (hdr->tci_an & MACSEC_TCI_C || 760 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 761 u64_stats_update_begin(&rxsc_stats->syncp); 762 rxsc_stats->stats.InPktsNotValid++; 763 u64_stats_update_end(&rxsc_stats->syncp); 764 return false; 765 } 766 767 u64_stats_update_begin(&rxsc_stats->syncp); 768 if (secy->validate_frames == MACSEC_VALIDATE_CHECK) { 769 rxsc_stats->stats.InPktsInvalid++; 770 this_cpu_inc(rx_sa->stats->InPktsInvalid); 771 } else if (pn < lowest_pn) { 772 rxsc_stats->stats.InPktsDelayed++; 773 } else { 774 rxsc_stats->stats.InPktsUnchecked++; 775 } 776 u64_stats_update_end(&rxsc_stats->syncp); 777 } else { 778 u64_stats_update_begin(&rxsc_stats->syncp); 779 if (pn < lowest_pn) { 780 rxsc_stats->stats.InPktsDelayed++; 781 } else { 782 rxsc_stats->stats.InPktsOK++; 783 this_cpu_inc(rx_sa->stats->InPktsOK); 784 } 785 u64_stats_update_end(&rxsc_stats->syncp); 786 787 // Instead of "pn >=" - to support pn overflow in xpn 788 if (pn + 1 > rx_sa->next_pn_halves.lower) { 789 rx_sa->next_pn_halves.lower = pn + 1; 790 } else if (secy->xpn && 791 !pn_same_half(pn, rx_sa->next_pn_halves.lower)) { 792 rx_sa->next_pn_halves.upper++; 793 rx_sa->next_pn_halves.lower = pn + 1; 794 } 795 796 spin_unlock(&rx_sa->lock); 797 } 798 799 return true; 800 } 801 802 static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) 803 { 804 skb->pkt_type = PACKET_HOST; 805 skb->protocol = eth_type_trans(skb, dev); 806 807 skb_reset_network_header(skb); 808 if (!skb_transport_header_was_set(skb)) 809 skb_reset_transport_header(skb); 810 skb_reset_mac_len(skb); 811 } 812 813 static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) 814 { 815 skb->ip_summed = CHECKSUM_NONE; 816 memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); 817 skb_pull(skb, hdr_len); 818 pskb_trim_unique(skb, skb->len - icv_len); 819 } 820 821 static void count_rx(struct net_device *dev, int len) 822 { 823 struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); 824 825 u64_stats_update_begin(&stats->syncp); 826 stats->rx_packets++; 827 stats->rx_bytes += len; 828 u64_stats_update_end(&stats->syncp); 829 } 830 831 static void macsec_decrypt_done(struct crypto_async_request *base, int err) 832 { 833 struct sk_buff *skb = base->data; 834 struct net_device *dev = skb->dev; 835 struct macsec_dev *macsec = macsec_priv(dev); 836 struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; 837 struct macsec_rx_sc *rx_sc = rx_sa->sc; 838 int len; 839 u32 pn; 840 841 aead_request_free(macsec_skb_cb(skb)->req); 842 843 if (!err) 844 macsec_skb_cb(skb)->valid = true; 845 846 rcu_read_lock_bh(); 847 pn = ntohl(macsec_ethhdr(skb)->packet_number); 848 if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { 849 rcu_read_unlock_bh(); 850 kfree_skb(skb); 851 goto out; 852 } 853 854 macsec_finalize_skb(skb, macsec->secy.icv_len, 855 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 856 macsec_reset_skb(skb, macsec->secy.netdev); 857 858 len = skb->len; 859 if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) 860 count_rx(dev, len); 861 862 rcu_read_unlock_bh(); 863 864 out: 865 macsec_rxsa_put(rx_sa); 866 macsec_rxsc_put(rx_sc); 867 dev_put(dev); 868 } 869 870 static struct sk_buff *macsec_decrypt(struct sk_buff *skb, 871 struct net_device *dev, 872 struct macsec_rx_sa *rx_sa, 873 sci_t sci, 874 struct macsec_secy *secy) 875 { 876 int ret; 877 struct scatterlist *sg; 878 struct sk_buff *trailer; 879 unsigned char *iv; 880 struct aead_request *req; 881 struct macsec_eth_header *hdr; 882 u32 hdr_pn; 883 u16 icv_len = secy->icv_len; 884 885 macsec_skb_cb(skb)->valid = false; 886 skb = skb_share_check(skb, GFP_ATOMIC); 887 if (!skb) 888 return ERR_PTR(-ENOMEM); 889 890 ret = skb_cow_data(skb, 0, &trailer); 891 if (unlikely(ret < 0)) { 892 kfree_skb(skb); 893 return ERR_PTR(ret); 894 } 895 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret); 896 if (!req) { 897 kfree_skb(skb); 898 return ERR_PTR(-ENOMEM); 899 } 900 901 hdr = (struct macsec_eth_header *)skb->data; 902 hdr_pn = ntohl(hdr->packet_number); 903 904 if (secy->xpn) { 905 pn_t recovered_pn = rx_sa->next_pn_halves; 906 907 recovered_pn.lower = hdr_pn; 908 if (hdr_pn < rx_sa->next_pn_halves.lower && 909 !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower)) 910 recovered_pn.upper++; 911 912 macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64, 913 rx_sa->key.salt); 914 } else { 915 macsec_fill_iv(iv, sci, hdr_pn); 916 } 917 918 sg_init_table(sg, ret); 919 ret = skb_to_sgvec(skb, sg, 0, skb->len); 920 if (unlikely(ret < 0)) { 921 aead_request_free(req); 922 kfree_skb(skb); 923 return ERR_PTR(ret); 924 } 925 926 if (hdr->tci_an & MACSEC_TCI_E) { 927 /* confidentiality: ethernet + macsec header 928 * authenticated, encrypted payload 929 */ 930 int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci); 931 932 aead_request_set_crypt(req, sg, sg, len, iv); 933 aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci)); 934 skb = skb_unshare(skb, GFP_ATOMIC); 935 if (!skb) { 936 aead_request_free(req); 937 return ERR_PTR(-ENOMEM); 938 } 939 } else { 940 /* integrity only: all headers + data authenticated */ 941 aead_request_set_crypt(req, sg, sg, icv_len, iv); 942 aead_request_set_ad(req, skb->len - icv_len); 943 } 944 945 macsec_skb_cb(skb)->req = req; 946 skb->dev = dev; 947 aead_request_set_callback(req, 0, macsec_decrypt_done, skb); 948 949 dev_hold(dev); 950 ret = crypto_aead_decrypt(req); 951 if (ret == -EINPROGRESS) { 952 return ERR_PTR(ret); 953 } else if (ret != 0) { 954 /* decryption/authentication failed 955 * 10.6 if validateFrames is disabled, deliver anyway 956 */ 957 if (ret != -EBADMSG) { 958 kfree_skb(skb); 959 skb = ERR_PTR(ret); 960 } 961 } else { 962 macsec_skb_cb(skb)->valid = true; 963 } 964 dev_put(dev); 965 966 aead_request_free(req); 967 968 return skb; 969 } 970 971 static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci) 972 { 973 struct macsec_rx_sc *rx_sc; 974 975 for_each_rxsc(secy, rx_sc) { 976 if (rx_sc->sci == sci) 977 return rx_sc; 978 } 979 980 return NULL; 981 } 982 983 static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) 984 { 985 struct macsec_rx_sc *rx_sc; 986 987 for_each_rxsc_rtnl(secy, rx_sc) { 988 if (rx_sc->sci == sci) 989 return rx_sc; 990 } 991 992 return NULL; 993 } 994 995 static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) 996 { 997 /* Deliver to the uncontrolled port by default */ 998 enum rx_handler_result ret = RX_HANDLER_PASS; 999 struct macsec_rxh_data *rxd; 1000 struct macsec_dev *macsec; 1001 1002 rcu_read_lock(); 1003 rxd = macsec_data_rcu(skb->dev); 1004 1005 /* 10.6 If the management control validateFrames is not 1006 * Strict, frames without a SecTAG are received, counted, and 1007 * delivered to the Controlled Port 1008 */ 1009 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1010 struct sk_buff *nskb; 1011 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); 1012 1013 if (!macsec_is_offloaded(macsec) && 1014 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1015 u64_stats_update_begin(&secy_stats->syncp); 1016 secy_stats->stats.InPktsNoTag++; 1017 u64_stats_update_end(&secy_stats->syncp); 1018 continue; 1019 } 1020 1021 /* deliver on this port */ 1022 nskb = skb_clone(skb, GFP_ATOMIC); 1023 if (!nskb) 1024 break; 1025 1026 nskb->dev = macsec->secy.netdev; 1027 1028 if (netif_rx(nskb) == NET_RX_SUCCESS) { 1029 u64_stats_update_begin(&secy_stats->syncp); 1030 secy_stats->stats.InPktsUntagged++; 1031 u64_stats_update_end(&secy_stats->syncp); 1032 } 1033 1034 if (netif_running(macsec->secy.netdev) && 1035 macsec_is_offloaded(macsec)) { 1036 ret = RX_HANDLER_EXACT; 1037 goto out; 1038 } 1039 } 1040 1041 out: 1042 rcu_read_unlock(); 1043 return ret; 1044 } 1045 1046 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) 1047 { 1048 struct sk_buff *skb = *pskb; 1049 struct net_device *dev = skb->dev; 1050 struct macsec_eth_header *hdr; 1051 struct macsec_secy *secy = NULL; 1052 struct macsec_rx_sc *rx_sc; 1053 struct macsec_rx_sa *rx_sa; 1054 struct macsec_rxh_data *rxd; 1055 struct macsec_dev *macsec; 1056 sci_t sci; 1057 u32 hdr_pn; 1058 bool cbit; 1059 struct pcpu_rx_sc_stats *rxsc_stats; 1060 struct pcpu_secy_stats *secy_stats; 1061 bool pulled_sci; 1062 int ret; 1063 1064 if (skb_headroom(skb) < ETH_HLEN) 1065 goto drop_direct; 1066 1067 hdr = macsec_ethhdr(skb); 1068 if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) 1069 return handle_not_macsec(skb); 1070 1071 skb = skb_unshare(skb, GFP_ATOMIC); 1072 *pskb = skb; 1073 if (!skb) 1074 return RX_HANDLER_CONSUMED; 1075 1076 pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); 1077 if (!pulled_sci) { 1078 if (!pskb_may_pull(skb, macsec_extra_len(false))) 1079 goto drop_direct; 1080 } 1081 1082 hdr = macsec_ethhdr(skb); 1083 1084 /* Frames with a SecTAG that has the TCI E bit set but the C 1085 * bit clear are discarded, as this reserved encoding is used 1086 * to identify frames with a SecTAG that are not to be 1087 * delivered to the Controlled Port. 1088 */ 1089 if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E) 1090 return RX_HANDLER_PASS; 1091 1092 /* now, pull the extra length */ 1093 if (hdr->tci_an & MACSEC_TCI_SC) { 1094 if (!pulled_sci) 1095 goto drop_direct; 1096 } 1097 1098 /* ethernet header is part of crypto processing */ 1099 skb_push(skb, ETH_HLEN); 1100 1101 macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); 1102 macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; 1103 sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); 1104 1105 rcu_read_lock(); 1106 rxd = macsec_data_rcu(skb->dev); 1107 1108 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1109 struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); 1110 1111 sc = sc ? macsec_rxsc_get(sc) : NULL; 1112 1113 if (sc) { 1114 secy = &macsec->secy; 1115 rx_sc = sc; 1116 break; 1117 } 1118 } 1119 1120 if (!secy) 1121 goto nosci; 1122 1123 dev = secy->netdev; 1124 macsec = macsec_priv(dev); 1125 secy_stats = this_cpu_ptr(macsec->stats); 1126 rxsc_stats = this_cpu_ptr(rx_sc->stats); 1127 1128 if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) { 1129 u64_stats_update_begin(&secy_stats->syncp); 1130 secy_stats->stats.InPktsBadTag++; 1131 u64_stats_update_end(&secy_stats->syncp); 1132 goto drop_nosa; 1133 } 1134 1135 rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]); 1136 if (!rx_sa) { 1137 /* 10.6.1 if the SA is not in use */ 1138 1139 /* If validateFrames is Strict or the C bit in the 1140 * SecTAG is set, discard 1141 */ 1142 if (hdr->tci_an & MACSEC_TCI_C || 1143 secy->validate_frames == MACSEC_VALIDATE_STRICT) { 1144 u64_stats_update_begin(&rxsc_stats->syncp); 1145 rxsc_stats->stats.InPktsNotUsingSA++; 1146 u64_stats_update_end(&rxsc_stats->syncp); 1147 goto drop_nosa; 1148 } 1149 1150 /* not Strict, the frame (with the SecTAG and ICV 1151 * removed) is delivered to the Controlled Port. 1152 */ 1153 u64_stats_update_begin(&rxsc_stats->syncp); 1154 rxsc_stats->stats.InPktsUnusedSA++; 1155 u64_stats_update_end(&rxsc_stats->syncp); 1156 goto deliver; 1157 } 1158 1159 /* First, PN check to avoid decrypting obviously wrong packets */ 1160 hdr_pn = ntohl(hdr->packet_number); 1161 if (secy->replay_protect) { 1162 bool late; 1163 1164 spin_lock(&rx_sa->lock); 1165 late = rx_sa->next_pn_halves.lower >= secy->replay_window && 1166 hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window); 1167 1168 if (secy->xpn) 1169 late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn); 1170 spin_unlock(&rx_sa->lock); 1171 1172 if (late) { 1173 u64_stats_update_begin(&rxsc_stats->syncp); 1174 rxsc_stats->stats.InPktsLate++; 1175 u64_stats_update_end(&rxsc_stats->syncp); 1176 goto drop; 1177 } 1178 } 1179 1180 macsec_skb_cb(skb)->rx_sa = rx_sa; 1181 1182 /* Disabled && !changed text => skip validation */ 1183 if (hdr->tci_an & MACSEC_TCI_C || 1184 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1185 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1186 1187 if (IS_ERR(skb)) { 1188 /* the decrypt callback needs the reference */ 1189 if (PTR_ERR(skb) != -EINPROGRESS) { 1190 macsec_rxsa_put(rx_sa); 1191 macsec_rxsc_put(rx_sc); 1192 } 1193 rcu_read_unlock(); 1194 *pskb = NULL; 1195 return RX_HANDLER_CONSUMED; 1196 } 1197 1198 if (!macsec_post_decrypt(skb, secy, hdr_pn)) 1199 goto drop; 1200 1201 deliver: 1202 macsec_finalize_skb(skb, secy->icv_len, 1203 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1204 macsec_reset_skb(skb, secy->netdev); 1205 1206 if (rx_sa) 1207 macsec_rxsa_put(rx_sa); 1208 macsec_rxsc_put(rx_sc); 1209 1210 skb_orphan(skb); 1211 ret = gro_cells_receive(&macsec->gro_cells, skb); 1212 if (ret == NET_RX_SUCCESS) 1213 count_rx(dev, skb->len); 1214 else 1215 macsec->secy.netdev->stats.rx_dropped++; 1216 1217 rcu_read_unlock(); 1218 1219 *pskb = NULL; 1220 return RX_HANDLER_CONSUMED; 1221 1222 drop: 1223 macsec_rxsa_put(rx_sa); 1224 drop_nosa: 1225 macsec_rxsc_put(rx_sc); 1226 rcu_read_unlock(); 1227 drop_direct: 1228 kfree_skb(skb); 1229 *pskb = NULL; 1230 return RX_HANDLER_CONSUMED; 1231 1232 nosci: 1233 /* 10.6.1 if the SC is not found */ 1234 cbit = !!(hdr->tci_an & MACSEC_TCI_C); 1235 if (!cbit) 1236 macsec_finalize_skb(skb, DEFAULT_ICV_LEN, 1237 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1238 1239 list_for_each_entry_rcu(macsec, &rxd->secys, secys) { 1240 struct sk_buff *nskb; 1241 1242 secy_stats = this_cpu_ptr(macsec->stats); 1243 1244 /* If validateFrames is Strict or the C bit in the 1245 * SecTAG is set, discard 1246 */ 1247 if (cbit || 1248 macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { 1249 u64_stats_update_begin(&secy_stats->syncp); 1250 secy_stats->stats.InPktsNoSCI++; 1251 u64_stats_update_end(&secy_stats->syncp); 1252 continue; 1253 } 1254 1255 /* not strict, the frame (with the SecTAG and ICV 1256 * removed) is delivered to the Controlled Port. 1257 */ 1258 nskb = skb_clone(skb, GFP_ATOMIC); 1259 if (!nskb) 1260 break; 1261 1262 macsec_reset_skb(nskb, macsec->secy.netdev); 1263 1264 ret = netif_rx(nskb); 1265 if (ret == NET_RX_SUCCESS) { 1266 u64_stats_update_begin(&secy_stats->syncp); 1267 secy_stats->stats.InPktsUnknownSCI++; 1268 u64_stats_update_end(&secy_stats->syncp); 1269 } else { 1270 macsec->secy.netdev->stats.rx_dropped++; 1271 } 1272 } 1273 1274 rcu_read_unlock(); 1275 *pskb = skb; 1276 return RX_HANDLER_PASS; 1277 } 1278 1279 static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) 1280 { 1281 struct crypto_aead *tfm; 1282 int ret; 1283 1284 tfm = crypto_alloc_aead("gcm(aes)", 0, 0); 1285 1286 if (IS_ERR(tfm)) 1287 return tfm; 1288 1289 ret = crypto_aead_setkey(tfm, key, key_len); 1290 if (ret < 0) 1291 goto fail; 1292 1293 ret = crypto_aead_setauthsize(tfm, icv_len); 1294 if (ret < 0) 1295 goto fail; 1296 1297 return tfm; 1298 fail: 1299 crypto_free_aead(tfm); 1300 return ERR_PTR(ret); 1301 } 1302 1303 static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, 1304 int icv_len) 1305 { 1306 rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); 1307 if (!rx_sa->stats) 1308 return -ENOMEM; 1309 1310 rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1311 if (IS_ERR(rx_sa->key.tfm)) { 1312 free_percpu(rx_sa->stats); 1313 return PTR_ERR(rx_sa->key.tfm); 1314 } 1315 1316 rx_sa->ssci = MACSEC_UNDEF_SSCI; 1317 rx_sa->active = false; 1318 rx_sa->next_pn = 1; 1319 refcount_set(&rx_sa->refcnt, 1); 1320 spin_lock_init(&rx_sa->lock); 1321 1322 return 0; 1323 } 1324 1325 static void clear_rx_sa(struct macsec_rx_sa *rx_sa) 1326 { 1327 rx_sa->active = false; 1328 1329 macsec_rxsa_put(rx_sa); 1330 } 1331 1332 static void free_rx_sc(struct macsec_rx_sc *rx_sc) 1333 { 1334 int i; 1335 1336 for (i = 0; i < MACSEC_NUM_AN; i++) { 1337 struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]); 1338 1339 RCU_INIT_POINTER(rx_sc->sa[i], NULL); 1340 if (sa) 1341 clear_rx_sa(sa); 1342 } 1343 1344 macsec_rxsc_put(rx_sc); 1345 } 1346 1347 static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) 1348 { 1349 struct macsec_rx_sc *rx_sc, __rcu **rx_scp; 1350 1351 for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp); 1352 rx_sc; 1353 rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) { 1354 if (rx_sc->sci == sci) { 1355 if (rx_sc->active) 1356 secy->n_rx_sc--; 1357 rcu_assign_pointer(*rx_scp, rx_sc->next); 1358 return rx_sc; 1359 } 1360 } 1361 1362 return NULL; 1363 } 1364 1365 static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) 1366 { 1367 struct macsec_rx_sc *rx_sc; 1368 struct macsec_dev *macsec; 1369 struct net_device *real_dev = macsec_priv(dev)->real_dev; 1370 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 1371 struct macsec_secy *secy; 1372 1373 list_for_each_entry(macsec, &rxd->secys, secys) { 1374 if (find_rx_sc_rtnl(&macsec->secy, sci)) 1375 return ERR_PTR(-EEXIST); 1376 } 1377 1378 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); 1379 if (!rx_sc) 1380 return ERR_PTR(-ENOMEM); 1381 1382 rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats); 1383 if (!rx_sc->stats) { 1384 kfree(rx_sc); 1385 return ERR_PTR(-ENOMEM); 1386 } 1387 1388 rx_sc->sci = sci; 1389 rx_sc->active = true; 1390 refcount_set(&rx_sc->refcnt, 1); 1391 1392 secy = &macsec_priv(dev)->secy; 1393 rcu_assign_pointer(rx_sc->next, secy->rx_sc); 1394 rcu_assign_pointer(secy->rx_sc, rx_sc); 1395 1396 if (rx_sc->active) 1397 secy->n_rx_sc++; 1398 1399 return rx_sc; 1400 } 1401 1402 static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, 1403 int icv_len) 1404 { 1405 tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); 1406 if (!tx_sa->stats) 1407 return -ENOMEM; 1408 1409 tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); 1410 if (IS_ERR(tx_sa->key.tfm)) { 1411 free_percpu(tx_sa->stats); 1412 return PTR_ERR(tx_sa->key.tfm); 1413 } 1414 1415 tx_sa->ssci = MACSEC_UNDEF_SSCI; 1416 tx_sa->active = false; 1417 refcount_set(&tx_sa->refcnt, 1); 1418 spin_lock_init(&tx_sa->lock); 1419 1420 return 0; 1421 } 1422 1423 static void clear_tx_sa(struct macsec_tx_sa *tx_sa) 1424 { 1425 tx_sa->active = false; 1426 1427 macsec_txsa_put(tx_sa); 1428 } 1429 1430 static struct genl_family macsec_fam; 1431 1432 static struct net_device *get_dev_from_nl(struct net *net, 1433 struct nlattr **attrs) 1434 { 1435 int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]); 1436 struct net_device *dev; 1437 1438 dev = __dev_get_by_index(net, ifindex); 1439 if (!dev) 1440 return ERR_PTR(-ENODEV); 1441 1442 if (!netif_is_macsec(dev)) 1443 return ERR_PTR(-ENODEV); 1444 1445 return dev; 1446 } 1447 1448 static sci_t nla_get_sci(const struct nlattr *nla) 1449 { 1450 return (__force sci_t)nla_get_u64(nla); 1451 } 1452 1453 static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value, 1454 int padattr) 1455 { 1456 return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr); 1457 } 1458 1459 static ssci_t nla_get_ssci(const struct nlattr *nla) 1460 { 1461 return (__force ssci_t)nla_get_u32(nla); 1462 } 1463 1464 static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value) 1465 { 1466 return nla_put_u32(skb, attrtype, (__force u64)value); 1467 } 1468 1469 static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, 1470 struct nlattr **attrs, 1471 struct nlattr **tb_sa, 1472 struct net_device **devp, 1473 struct macsec_secy **secyp, 1474 struct macsec_tx_sc **scp, 1475 u8 *assoc_num) 1476 { 1477 struct net_device *dev; 1478 struct macsec_secy *secy; 1479 struct macsec_tx_sc *tx_sc; 1480 struct macsec_tx_sa *tx_sa; 1481 1482 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1483 return ERR_PTR(-EINVAL); 1484 1485 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1486 1487 dev = get_dev_from_nl(net, attrs); 1488 if (IS_ERR(dev)) 1489 return ERR_CAST(dev); 1490 1491 if (*assoc_num >= MACSEC_NUM_AN) 1492 return ERR_PTR(-EINVAL); 1493 1494 secy = &macsec_priv(dev)->secy; 1495 tx_sc = &secy->tx_sc; 1496 1497 tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]); 1498 if (!tx_sa) 1499 return ERR_PTR(-ENODEV); 1500 1501 *devp = dev; 1502 *scp = tx_sc; 1503 *secyp = secy; 1504 return tx_sa; 1505 } 1506 1507 static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net, 1508 struct nlattr **attrs, 1509 struct nlattr **tb_rxsc, 1510 struct net_device **devp, 1511 struct macsec_secy **secyp) 1512 { 1513 struct net_device *dev; 1514 struct macsec_secy *secy; 1515 struct macsec_rx_sc *rx_sc; 1516 sci_t sci; 1517 1518 dev = get_dev_from_nl(net, attrs); 1519 if (IS_ERR(dev)) 1520 return ERR_CAST(dev); 1521 1522 secy = &macsec_priv(dev)->secy; 1523 1524 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 1525 return ERR_PTR(-EINVAL); 1526 1527 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1528 rx_sc = find_rx_sc_rtnl(secy, sci); 1529 if (!rx_sc) 1530 return ERR_PTR(-ENODEV); 1531 1532 *secyp = secy; 1533 *devp = dev; 1534 1535 return rx_sc; 1536 } 1537 1538 static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, 1539 struct nlattr **attrs, 1540 struct nlattr **tb_rxsc, 1541 struct nlattr **tb_sa, 1542 struct net_device **devp, 1543 struct macsec_secy **secyp, 1544 struct macsec_rx_sc **scp, 1545 u8 *assoc_num) 1546 { 1547 struct macsec_rx_sc *rx_sc; 1548 struct macsec_rx_sa *rx_sa; 1549 1550 if (!tb_sa[MACSEC_SA_ATTR_AN]) 1551 return ERR_PTR(-EINVAL); 1552 1553 *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1554 if (*assoc_num >= MACSEC_NUM_AN) 1555 return ERR_PTR(-EINVAL); 1556 1557 rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); 1558 if (IS_ERR(rx_sc)) 1559 return ERR_CAST(rx_sc); 1560 1561 rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]); 1562 if (!rx_sa) 1563 return ERR_PTR(-ENODEV); 1564 1565 *scp = rx_sc; 1566 return rx_sa; 1567 } 1568 1569 static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { 1570 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, 1571 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, 1572 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, 1573 [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED }, 1574 }; 1575 1576 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { 1577 [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, 1578 [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, 1579 }; 1580 1581 static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { 1582 [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, 1583 [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, 1584 [MACSEC_SA_ATTR_PN] = { .type = NLA_MIN_LEN, .len = 4 }, 1585 [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, 1586 .len = MACSEC_KEYID_LEN, }, 1587 [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, 1588 .len = MACSEC_MAX_KEY_LEN, }, 1589 [MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 }, 1590 [MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY, 1591 .len = MACSEC_SALT_LEN, }, 1592 }; 1593 1594 static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = { 1595 [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 }, 1596 }; 1597 1598 /* Offloads an operation to a device driver */ 1599 static int macsec_offload(int (* const func)(struct macsec_context *), 1600 struct macsec_context *ctx) 1601 { 1602 int ret; 1603 1604 if (unlikely(!func)) 1605 return 0; 1606 1607 if (ctx->offload == MACSEC_OFFLOAD_PHY) 1608 mutex_lock(&ctx->phydev->lock); 1609 1610 /* Phase I: prepare. The drive should fail here if there are going to be 1611 * issues in the commit phase. 1612 */ 1613 ctx->prepare = true; 1614 ret = (*func)(ctx); 1615 if (ret) 1616 goto phy_unlock; 1617 1618 /* Phase II: commit. This step cannot fail. */ 1619 ctx->prepare = false; 1620 ret = (*func)(ctx); 1621 /* This should never happen: commit is not allowed to fail */ 1622 if (unlikely(ret)) 1623 WARN(1, "MACsec offloading commit failed (%d)\n", ret); 1624 1625 phy_unlock: 1626 if (ctx->offload == MACSEC_OFFLOAD_PHY) 1627 mutex_unlock(&ctx->phydev->lock); 1628 1629 return ret; 1630 } 1631 1632 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) 1633 { 1634 if (!attrs[MACSEC_ATTR_SA_CONFIG]) 1635 return -EINVAL; 1636 1637 if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL)) 1638 return -EINVAL; 1639 1640 return 0; 1641 } 1642 1643 static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) 1644 { 1645 if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) 1646 return -EINVAL; 1647 1648 if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL)) 1649 return -EINVAL; 1650 1651 return 0; 1652 } 1653 1654 static bool validate_add_rxsa(struct nlattr **attrs) 1655 { 1656 if (!attrs[MACSEC_SA_ATTR_AN] || 1657 !attrs[MACSEC_SA_ATTR_KEY] || 1658 !attrs[MACSEC_SA_ATTR_KEYID]) 1659 return false; 1660 1661 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1662 return false; 1663 1664 if (attrs[MACSEC_SA_ATTR_PN] && 1665 *(u64 *)nla_data(attrs[MACSEC_SA_ATTR_PN]) == 0) 1666 return false; 1667 1668 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1669 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1670 return false; 1671 } 1672 1673 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1674 return false; 1675 1676 return true; 1677 } 1678 1679 static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) 1680 { 1681 struct net_device *dev; 1682 struct nlattr **attrs = info->attrs; 1683 struct macsec_secy *secy; 1684 struct macsec_rx_sc *rx_sc; 1685 struct macsec_rx_sa *rx_sa; 1686 unsigned char assoc_num; 1687 int pn_len; 1688 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1689 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1690 int err; 1691 1692 if (!attrs[MACSEC_ATTR_IFINDEX]) 1693 return -EINVAL; 1694 1695 if (parse_sa_config(attrs, tb_sa)) 1696 return -EINVAL; 1697 1698 if (parse_rxsc_config(attrs, tb_rxsc)) 1699 return -EINVAL; 1700 1701 if (!validate_add_rxsa(tb_sa)) 1702 return -EINVAL; 1703 1704 rtnl_lock(); 1705 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 1706 if (IS_ERR(rx_sc)) { 1707 rtnl_unlock(); 1708 return PTR_ERR(rx_sc); 1709 } 1710 1711 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1712 1713 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1714 pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n", 1715 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1716 rtnl_unlock(); 1717 return -EINVAL; 1718 } 1719 1720 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 1721 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 1722 pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n", 1723 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 1724 rtnl_unlock(); 1725 return -EINVAL; 1726 } 1727 1728 if (secy->xpn) { 1729 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { 1730 rtnl_unlock(); 1731 return -EINVAL; 1732 } 1733 1734 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { 1735 pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n", 1736 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), 1737 MACSEC_SA_ATTR_SALT); 1738 rtnl_unlock(); 1739 return -EINVAL; 1740 } 1741 } 1742 1743 rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); 1744 if (rx_sa) { 1745 rtnl_unlock(); 1746 return -EBUSY; 1747 } 1748 1749 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1750 if (!rx_sa) { 1751 rtnl_unlock(); 1752 return -ENOMEM; 1753 } 1754 1755 err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1756 secy->key_len, secy->icv_len); 1757 if (err < 0) { 1758 kfree(rx_sa); 1759 rtnl_unlock(); 1760 return err; 1761 } 1762 1763 if (tb_sa[MACSEC_SA_ATTR_PN]) { 1764 spin_lock_bh(&rx_sa->lock); 1765 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 1766 spin_unlock_bh(&rx_sa->lock); 1767 } 1768 1769 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 1770 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 1771 1772 rx_sa->sc = rx_sc; 1773 1774 /* If h/w offloading is available, propagate to the device */ 1775 if (macsec_is_offloaded(netdev_priv(dev))) { 1776 const struct macsec_ops *ops; 1777 struct macsec_context ctx; 1778 1779 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1780 if (!ops) { 1781 err = -EOPNOTSUPP; 1782 goto cleanup; 1783 } 1784 1785 ctx.sa.assoc_num = assoc_num; 1786 ctx.sa.rx_sa = rx_sa; 1787 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1788 MACSEC_KEYID_LEN); 1789 1790 err = macsec_offload(ops->mdo_add_rxsa, &ctx); 1791 if (err) 1792 goto cleanup; 1793 } 1794 1795 if (secy->xpn) { 1796 rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); 1797 nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], 1798 MACSEC_SALT_LEN); 1799 } 1800 1801 nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 1802 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); 1803 1804 rtnl_unlock(); 1805 1806 return 0; 1807 1808 cleanup: 1809 kfree(rx_sa); 1810 rtnl_unlock(); 1811 return err; 1812 } 1813 1814 static bool validate_add_rxsc(struct nlattr **attrs) 1815 { 1816 if (!attrs[MACSEC_RXSC_ATTR_SCI]) 1817 return false; 1818 1819 if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { 1820 if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) 1821 return false; 1822 } 1823 1824 return true; 1825 } 1826 1827 static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) 1828 { 1829 struct net_device *dev; 1830 sci_t sci = MACSEC_UNDEF_SCI; 1831 struct nlattr **attrs = info->attrs; 1832 struct macsec_rx_sc *rx_sc; 1833 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 1834 bool was_active; 1835 int ret; 1836 1837 if (!attrs[MACSEC_ATTR_IFINDEX]) 1838 return -EINVAL; 1839 1840 if (parse_rxsc_config(attrs, tb_rxsc)) 1841 return -EINVAL; 1842 1843 if (!validate_add_rxsc(tb_rxsc)) 1844 return -EINVAL; 1845 1846 rtnl_lock(); 1847 dev = get_dev_from_nl(genl_info_net(info), attrs); 1848 if (IS_ERR(dev)) { 1849 rtnl_unlock(); 1850 return PTR_ERR(dev); 1851 } 1852 1853 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 1854 1855 rx_sc = create_rx_sc(dev, sci); 1856 if (IS_ERR(rx_sc)) { 1857 rtnl_unlock(); 1858 return PTR_ERR(rx_sc); 1859 } 1860 1861 was_active = rx_sc->active; 1862 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) 1863 rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 1864 1865 if (macsec_is_offloaded(netdev_priv(dev))) { 1866 const struct macsec_ops *ops; 1867 struct macsec_context ctx; 1868 1869 ops = macsec_get_ops(netdev_priv(dev), &ctx); 1870 if (!ops) { 1871 ret = -EOPNOTSUPP; 1872 goto cleanup; 1873 } 1874 1875 ctx.rx_sc = rx_sc; 1876 1877 ret = macsec_offload(ops->mdo_add_rxsc, &ctx); 1878 if (ret) 1879 goto cleanup; 1880 } 1881 1882 rtnl_unlock(); 1883 1884 return 0; 1885 1886 cleanup: 1887 rx_sc->active = was_active; 1888 rtnl_unlock(); 1889 return ret; 1890 } 1891 1892 static bool validate_add_txsa(struct nlattr **attrs) 1893 { 1894 if (!attrs[MACSEC_SA_ATTR_AN] || 1895 !attrs[MACSEC_SA_ATTR_PN] || 1896 !attrs[MACSEC_SA_ATTR_KEY] || 1897 !attrs[MACSEC_SA_ATTR_KEYID]) 1898 return false; 1899 1900 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 1901 return false; 1902 1903 if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 1904 return false; 1905 1906 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 1907 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 1908 return false; 1909 } 1910 1911 if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN) 1912 return false; 1913 1914 return true; 1915 } 1916 1917 static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) 1918 { 1919 struct net_device *dev; 1920 struct nlattr **attrs = info->attrs; 1921 struct macsec_secy *secy; 1922 struct macsec_tx_sc *tx_sc; 1923 struct macsec_tx_sa *tx_sa; 1924 unsigned char assoc_num; 1925 int pn_len; 1926 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 1927 bool was_operational; 1928 int err; 1929 1930 if (!attrs[MACSEC_ATTR_IFINDEX]) 1931 return -EINVAL; 1932 1933 if (parse_sa_config(attrs, tb_sa)) 1934 return -EINVAL; 1935 1936 if (!validate_add_txsa(tb_sa)) 1937 return -EINVAL; 1938 1939 rtnl_lock(); 1940 dev = get_dev_from_nl(genl_info_net(info), attrs); 1941 if (IS_ERR(dev)) { 1942 rtnl_unlock(); 1943 return PTR_ERR(dev); 1944 } 1945 1946 secy = &macsec_priv(dev)->secy; 1947 tx_sc = &secy->tx_sc; 1948 1949 assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); 1950 1951 if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { 1952 pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n", 1953 nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); 1954 rtnl_unlock(); 1955 return -EINVAL; 1956 } 1957 1958 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 1959 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 1960 pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n", 1961 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 1962 rtnl_unlock(); 1963 return -EINVAL; 1964 } 1965 1966 if (secy->xpn) { 1967 if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) { 1968 rtnl_unlock(); 1969 return -EINVAL; 1970 } 1971 1972 if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) { 1973 pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n", 1974 nla_len(tb_sa[MACSEC_SA_ATTR_SALT]), 1975 MACSEC_SA_ATTR_SALT); 1976 rtnl_unlock(); 1977 return -EINVAL; 1978 } 1979 } 1980 1981 tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); 1982 if (tx_sa) { 1983 rtnl_unlock(); 1984 return -EBUSY; 1985 } 1986 1987 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 1988 if (!tx_sa) { 1989 rtnl_unlock(); 1990 return -ENOMEM; 1991 } 1992 1993 err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1994 secy->key_len, secy->icv_len); 1995 if (err < 0) { 1996 kfree(tx_sa); 1997 rtnl_unlock(); 1998 return err; 1999 } 2000 2001 spin_lock_bh(&tx_sa->lock); 2002 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2003 spin_unlock_bh(&tx_sa->lock); 2004 2005 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2006 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2007 2008 was_operational = secy->operational; 2009 if (assoc_num == tx_sc->encoding_sa && tx_sa->active) 2010 secy->operational = true; 2011 2012 /* If h/w offloading is available, propagate to the device */ 2013 if (macsec_is_offloaded(netdev_priv(dev))) { 2014 const struct macsec_ops *ops; 2015 struct macsec_context ctx; 2016 2017 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2018 if (!ops) { 2019 err = -EOPNOTSUPP; 2020 goto cleanup; 2021 } 2022 2023 ctx.sa.assoc_num = assoc_num; 2024 ctx.sa.tx_sa = tx_sa; 2025 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 2026 MACSEC_KEYID_LEN); 2027 2028 err = macsec_offload(ops->mdo_add_txsa, &ctx); 2029 if (err) 2030 goto cleanup; 2031 } 2032 2033 if (secy->xpn) { 2034 tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]); 2035 nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT], 2036 MACSEC_SALT_LEN); 2037 } 2038 2039 nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); 2040 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); 2041 2042 rtnl_unlock(); 2043 2044 return 0; 2045 2046 cleanup: 2047 secy->operational = was_operational; 2048 kfree(tx_sa); 2049 rtnl_unlock(); 2050 return err; 2051 } 2052 2053 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) 2054 { 2055 struct nlattr **attrs = info->attrs; 2056 struct net_device *dev; 2057 struct macsec_secy *secy; 2058 struct macsec_rx_sc *rx_sc; 2059 struct macsec_rx_sa *rx_sa; 2060 u8 assoc_num; 2061 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2062 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2063 int ret; 2064 2065 if (!attrs[MACSEC_ATTR_IFINDEX]) 2066 return -EINVAL; 2067 2068 if (parse_sa_config(attrs, tb_sa)) 2069 return -EINVAL; 2070 2071 if (parse_rxsc_config(attrs, tb_rxsc)) 2072 return -EINVAL; 2073 2074 rtnl_lock(); 2075 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2076 &dev, &secy, &rx_sc, &assoc_num); 2077 if (IS_ERR(rx_sa)) { 2078 rtnl_unlock(); 2079 return PTR_ERR(rx_sa); 2080 } 2081 2082 if (rx_sa->active) { 2083 rtnl_unlock(); 2084 return -EBUSY; 2085 } 2086 2087 /* If h/w offloading is available, propagate to the device */ 2088 if (macsec_is_offloaded(netdev_priv(dev))) { 2089 const struct macsec_ops *ops; 2090 struct macsec_context ctx; 2091 2092 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2093 if (!ops) { 2094 ret = -EOPNOTSUPP; 2095 goto cleanup; 2096 } 2097 2098 ctx.sa.assoc_num = assoc_num; 2099 ctx.sa.rx_sa = rx_sa; 2100 2101 ret = macsec_offload(ops->mdo_del_rxsa, &ctx); 2102 if (ret) 2103 goto cleanup; 2104 } 2105 2106 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); 2107 clear_rx_sa(rx_sa); 2108 2109 rtnl_unlock(); 2110 2111 return 0; 2112 2113 cleanup: 2114 rtnl_unlock(); 2115 return ret; 2116 } 2117 2118 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) 2119 { 2120 struct nlattr **attrs = info->attrs; 2121 struct net_device *dev; 2122 struct macsec_secy *secy; 2123 struct macsec_rx_sc *rx_sc; 2124 sci_t sci; 2125 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2126 int ret; 2127 2128 if (!attrs[MACSEC_ATTR_IFINDEX]) 2129 return -EINVAL; 2130 2131 if (parse_rxsc_config(attrs, tb_rxsc)) 2132 return -EINVAL; 2133 2134 if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) 2135 return -EINVAL; 2136 2137 rtnl_lock(); 2138 dev = get_dev_from_nl(genl_info_net(info), info->attrs); 2139 if (IS_ERR(dev)) { 2140 rtnl_unlock(); 2141 return PTR_ERR(dev); 2142 } 2143 2144 secy = &macsec_priv(dev)->secy; 2145 sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); 2146 2147 rx_sc = del_rx_sc(secy, sci); 2148 if (!rx_sc) { 2149 rtnl_unlock(); 2150 return -ENODEV; 2151 } 2152 2153 /* If h/w offloading is available, propagate to the device */ 2154 if (macsec_is_offloaded(netdev_priv(dev))) { 2155 const struct macsec_ops *ops; 2156 struct macsec_context ctx; 2157 2158 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2159 if (!ops) { 2160 ret = -EOPNOTSUPP; 2161 goto cleanup; 2162 } 2163 2164 ctx.rx_sc = rx_sc; 2165 ret = macsec_offload(ops->mdo_del_rxsc, &ctx); 2166 if (ret) 2167 goto cleanup; 2168 } 2169 2170 free_rx_sc(rx_sc); 2171 rtnl_unlock(); 2172 2173 return 0; 2174 2175 cleanup: 2176 rtnl_unlock(); 2177 return ret; 2178 } 2179 2180 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) 2181 { 2182 struct nlattr **attrs = info->attrs; 2183 struct net_device *dev; 2184 struct macsec_secy *secy; 2185 struct macsec_tx_sc *tx_sc; 2186 struct macsec_tx_sa *tx_sa; 2187 u8 assoc_num; 2188 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2189 int ret; 2190 2191 if (!attrs[MACSEC_ATTR_IFINDEX]) 2192 return -EINVAL; 2193 2194 if (parse_sa_config(attrs, tb_sa)) 2195 return -EINVAL; 2196 2197 rtnl_lock(); 2198 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2199 &dev, &secy, &tx_sc, &assoc_num); 2200 if (IS_ERR(tx_sa)) { 2201 rtnl_unlock(); 2202 return PTR_ERR(tx_sa); 2203 } 2204 2205 if (tx_sa->active) { 2206 rtnl_unlock(); 2207 return -EBUSY; 2208 } 2209 2210 /* If h/w offloading is available, propagate to the device */ 2211 if (macsec_is_offloaded(netdev_priv(dev))) { 2212 const struct macsec_ops *ops; 2213 struct macsec_context ctx; 2214 2215 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2216 if (!ops) { 2217 ret = -EOPNOTSUPP; 2218 goto cleanup; 2219 } 2220 2221 ctx.sa.assoc_num = assoc_num; 2222 ctx.sa.tx_sa = tx_sa; 2223 2224 ret = macsec_offload(ops->mdo_del_txsa, &ctx); 2225 if (ret) 2226 goto cleanup; 2227 } 2228 2229 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); 2230 clear_tx_sa(tx_sa); 2231 2232 rtnl_unlock(); 2233 2234 return 0; 2235 2236 cleanup: 2237 rtnl_unlock(); 2238 return ret; 2239 } 2240 2241 static bool validate_upd_sa(struct nlattr **attrs) 2242 { 2243 if (!attrs[MACSEC_SA_ATTR_AN] || 2244 attrs[MACSEC_SA_ATTR_KEY] || 2245 attrs[MACSEC_SA_ATTR_KEYID] || 2246 attrs[MACSEC_SA_ATTR_SSCI] || 2247 attrs[MACSEC_SA_ATTR_SALT]) 2248 return false; 2249 2250 if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) 2251 return false; 2252 2253 if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) 2254 return false; 2255 2256 if (attrs[MACSEC_SA_ATTR_ACTIVE]) { 2257 if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) 2258 return false; 2259 } 2260 2261 return true; 2262 } 2263 2264 static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) 2265 { 2266 struct nlattr **attrs = info->attrs; 2267 struct net_device *dev; 2268 struct macsec_secy *secy; 2269 struct macsec_tx_sc *tx_sc; 2270 struct macsec_tx_sa *tx_sa; 2271 u8 assoc_num; 2272 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2273 bool was_operational, was_active; 2274 pn_t prev_pn; 2275 int ret = 0; 2276 2277 prev_pn.full64 = 0; 2278 2279 if (!attrs[MACSEC_ATTR_IFINDEX]) 2280 return -EINVAL; 2281 2282 if (parse_sa_config(attrs, tb_sa)) 2283 return -EINVAL; 2284 2285 if (!validate_upd_sa(tb_sa)) 2286 return -EINVAL; 2287 2288 rtnl_lock(); 2289 tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, 2290 &dev, &secy, &tx_sc, &assoc_num); 2291 if (IS_ERR(tx_sa)) { 2292 rtnl_unlock(); 2293 return PTR_ERR(tx_sa); 2294 } 2295 2296 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2297 int pn_len; 2298 2299 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2300 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2301 pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n", 2302 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2303 rtnl_unlock(); 2304 return -EINVAL; 2305 } 2306 2307 spin_lock_bh(&tx_sa->lock); 2308 prev_pn = tx_sa->next_pn_halves; 2309 tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2310 spin_unlock_bh(&tx_sa->lock); 2311 } 2312 2313 was_active = tx_sa->active; 2314 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2315 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2316 2317 was_operational = secy->operational; 2318 if (assoc_num == tx_sc->encoding_sa) 2319 secy->operational = tx_sa->active; 2320 2321 /* If h/w offloading is available, propagate to the device */ 2322 if (macsec_is_offloaded(netdev_priv(dev))) { 2323 const struct macsec_ops *ops; 2324 struct macsec_context ctx; 2325 2326 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2327 if (!ops) { 2328 ret = -EOPNOTSUPP; 2329 goto cleanup; 2330 } 2331 2332 ctx.sa.assoc_num = assoc_num; 2333 ctx.sa.tx_sa = tx_sa; 2334 2335 ret = macsec_offload(ops->mdo_upd_txsa, &ctx); 2336 if (ret) 2337 goto cleanup; 2338 } 2339 2340 rtnl_unlock(); 2341 2342 return 0; 2343 2344 cleanup: 2345 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2346 spin_lock_bh(&tx_sa->lock); 2347 tx_sa->next_pn_halves = prev_pn; 2348 spin_unlock_bh(&tx_sa->lock); 2349 } 2350 tx_sa->active = was_active; 2351 secy->operational = was_operational; 2352 rtnl_unlock(); 2353 return ret; 2354 } 2355 2356 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) 2357 { 2358 struct nlattr **attrs = info->attrs; 2359 struct net_device *dev; 2360 struct macsec_secy *secy; 2361 struct macsec_rx_sc *rx_sc; 2362 struct macsec_rx_sa *rx_sa; 2363 u8 assoc_num; 2364 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2365 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; 2366 bool was_active; 2367 pn_t prev_pn; 2368 int ret = 0; 2369 2370 prev_pn.full64 = 0; 2371 2372 if (!attrs[MACSEC_ATTR_IFINDEX]) 2373 return -EINVAL; 2374 2375 if (parse_rxsc_config(attrs, tb_rxsc)) 2376 return -EINVAL; 2377 2378 if (parse_sa_config(attrs, tb_sa)) 2379 return -EINVAL; 2380 2381 if (!validate_upd_sa(tb_sa)) 2382 return -EINVAL; 2383 2384 rtnl_lock(); 2385 rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, 2386 &dev, &secy, &rx_sc, &assoc_num); 2387 if (IS_ERR(rx_sa)) { 2388 rtnl_unlock(); 2389 return PTR_ERR(rx_sa); 2390 } 2391 2392 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2393 int pn_len; 2394 2395 pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN; 2396 if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) { 2397 pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n", 2398 nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len); 2399 rtnl_unlock(); 2400 return -EINVAL; 2401 } 2402 2403 spin_lock_bh(&rx_sa->lock); 2404 prev_pn = rx_sa->next_pn_halves; 2405 rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]); 2406 spin_unlock_bh(&rx_sa->lock); 2407 } 2408 2409 was_active = rx_sa->active; 2410 if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) 2411 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); 2412 2413 /* If h/w offloading is available, propagate to the device */ 2414 if (macsec_is_offloaded(netdev_priv(dev))) { 2415 const struct macsec_ops *ops; 2416 struct macsec_context ctx; 2417 2418 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2419 if (!ops) { 2420 ret = -EOPNOTSUPP; 2421 goto cleanup; 2422 } 2423 2424 ctx.sa.assoc_num = assoc_num; 2425 ctx.sa.rx_sa = rx_sa; 2426 2427 ret = macsec_offload(ops->mdo_upd_rxsa, &ctx); 2428 if (ret) 2429 goto cleanup; 2430 } 2431 2432 rtnl_unlock(); 2433 return 0; 2434 2435 cleanup: 2436 if (tb_sa[MACSEC_SA_ATTR_PN]) { 2437 spin_lock_bh(&rx_sa->lock); 2438 rx_sa->next_pn_halves = prev_pn; 2439 spin_unlock_bh(&rx_sa->lock); 2440 } 2441 rx_sa->active = was_active; 2442 rtnl_unlock(); 2443 return ret; 2444 } 2445 2446 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) 2447 { 2448 struct nlattr **attrs = info->attrs; 2449 struct net_device *dev; 2450 struct macsec_secy *secy; 2451 struct macsec_rx_sc *rx_sc; 2452 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; 2453 unsigned int prev_n_rx_sc; 2454 bool was_active; 2455 int ret; 2456 2457 if (!attrs[MACSEC_ATTR_IFINDEX]) 2458 return -EINVAL; 2459 2460 if (parse_rxsc_config(attrs, tb_rxsc)) 2461 return -EINVAL; 2462 2463 if (!validate_add_rxsc(tb_rxsc)) 2464 return -EINVAL; 2465 2466 rtnl_lock(); 2467 rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); 2468 if (IS_ERR(rx_sc)) { 2469 rtnl_unlock(); 2470 return PTR_ERR(rx_sc); 2471 } 2472 2473 was_active = rx_sc->active; 2474 prev_n_rx_sc = secy->n_rx_sc; 2475 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { 2476 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); 2477 2478 if (rx_sc->active != new) 2479 secy->n_rx_sc += new ? 1 : -1; 2480 2481 rx_sc->active = new; 2482 } 2483 2484 /* If h/w offloading is available, propagate to the device */ 2485 if (macsec_is_offloaded(netdev_priv(dev))) { 2486 const struct macsec_ops *ops; 2487 struct macsec_context ctx; 2488 2489 ops = macsec_get_ops(netdev_priv(dev), &ctx); 2490 if (!ops) { 2491 ret = -EOPNOTSUPP; 2492 goto cleanup; 2493 } 2494 2495 ctx.rx_sc = rx_sc; 2496 2497 ret = macsec_offload(ops->mdo_upd_rxsc, &ctx); 2498 if (ret) 2499 goto cleanup; 2500 } 2501 2502 rtnl_unlock(); 2503 2504 return 0; 2505 2506 cleanup: 2507 secy->n_rx_sc = prev_n_rx_sc; 2508 rx_sc->active = was_active; 2509 rtnl_unlock(); 2510 return ret; 2511 } 2512 2513 static bool macsec_is_configured(struct macsec_dev *macsec) 2514 { 2515 struct macsec_secy *secy = &macsec->secy; 2516 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2517 int i; 2518 2519 if (secy->n_rx_sc > 0) 2520 return true; 2521 2522 for (i = 0; i < MACSEC_NUM_AN; i++) 2523 if (tx_sc->sa[i]) 2524 return true; 2525 2526 return false; 2527 } 2528 2529 static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) 2530 { 2531 struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1]; 2532 enum macsec_offload offload, prev_offload; 2533 int (*func)(struct macsec_context *ctx); 2534 struct nlattr **attrs = info->attrs; 2535 struct net_device *dev, *loop_dev; 2536 const struct macsec_ops *ops; 2537 struct macsec_context ctx; 2538 struct macsec_dev *macsec; 2539 struct net *loop_net; 2540 int ret; 2541 2542 if (!attrs[MACSEC_ATTR_IFINDEX]) 2543 return -EINVAL; 2544 2545 if (!attrs[MACSEC_ATTR_OFFLOAD]) 2546 return -EINVAL; 2547 2548 if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX, 2549 attrs[MACSEC_ATTR_OFFLOAD], 2550 macsec_genl_offload_policy, NULL)) 2551 return -EINVAL; 2552 2553 dev = get_dev_from_nl(genl_info_net(info), attrs); 2554 if (IS_ERR(dev)) 2555 return PTR_ERR(dev); 2556 macsec = macsec_priv(dev); 2557 2558 offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); 2559 if (macsec->offload == offload) 2560 return 0; 2561 2562 /* Check if the offloading mode is supported by the underlying layers */ 2563 if (offload != MACSEC_OFFLOAD_OFF && 2564 !macsec_check_offload(offload, macsec)) 2565 return -EOPNOTSUPP; 2566 2567 if (offload == MACSEC_OFFLOAD_OFF) 2568 goto skip_limitation; 2569 2570 /* Check the physical interface isn't offloading another interface 2571 * first. 2572 */ 2573 for_each_net(loop_net) { 2574 for_each_netdev(loop_net, loop_dev) { 2575 struct macsec_dev *priv; 2576 2577 if (!netif_is_macsec(loop_dev)) 2578 continue; 2579 2580 priv = macsec_priv(loop_dev); 2581 2582 if (priv->real_dev == macsec->real_dev && 2583 priv->offload != MACSEC_OFFLOAD_OFF) 2584 return -EBUSY; 2585 } 2586 } 2587 2588 skip_limitation: 2589 /* Check if the net device is busy. */ 2590 if (netif_running(dev)) 2591 return -EBUSY; 2592 2593 rtnl_lock(); 2594 2595 prev_offload = macsec->offload; 2596 macsec->offload = offload; 2597 2598 /* Check if the device already has rules configured: we do not support 2599 * rules migration. 2600 */ 2601 if (macsec_is_configured(macsec)) { 2602 ret = -EBUSY; 2603 goto rollback; 2604 } 2605 2606 ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload, 2607 macsec, &ctx); 2608 if (!ops) { 2609 ret = -EOPNOTSUPP; 2610 goto rollback; 2611 } 2612 2613 if (prev_offload == MACSEC_OFFLOAD_OFF) 2614 func = ops->mdo_add_secy; 2615 else 2616 func = ops->mdo_del_secy; 2617 2618 ctx.secy = &macsec->secy; 2619 ret = macsec_offload(func, &ctx); 2620 if (ret) 2621 goto rollback; 2622 2623 rtnl_unlock(); 2624 return 0; 2625 2626 rollback: 2627 macsec->offload = prev_offload; 2628 2629 rtnl_unlock(); 2630 return ret; 2631 } 2632 2633 static int copy_tx_sa_stats(struct sk_buff *skb, 2634 struct macsec_tx_sa_stats __percpu *pstats) 2635 { 2636 struct macsec_tx_sa_stats sum = {0, }; 2637 int cpu; 2638 2639 for_each_possible_cpu(cpu) { 2640 const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu); 2641 2642 sum.OutPktsProtected += stats->OutPktsProtected; 2643 sum.OutPktsEncrypted += stats->OutPktsEncrypted; 2644 } 2645 2646 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) || 2647 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted)) 2648 return -EMSGSIZE; 2649 2650 return 0; 2651 } 2652 2653 static noinline_for_stack int 2654 copy_rx_sa_stats(struct sk_buff *skb, 2655 struct macsec_rx_sa_stats __percpu *pstats) 2656 { 2657 struct macsec_rx_sa_stats sum = {0, }; 2658 int cpu; 2659 2660 for_each_possible_cpu(cpu) { 2661 const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu); 2662 2663 sum.InPktsOK += stats->InPktsOK; 2664 sum.InPktsInvalid += stats->InPktsInvalid; 2665 sum.InPktsNotValid += stats->InPktsNotValid; 2666 sum.InPktsNotUsingSA += stats->InPktsNotUsingSA; 2667 sum.InPktsUnusedSA += stats->InPktsUnusedSA; 2668 } 2669 2670 if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) || 2671 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) || 2672 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) || 2673 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) || 2674 nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA)) 2675 return -EMSGSIZE; 2676 2677 return 0; 2678 } 2679 2680 static noinline_for_stack int 2681 copy_rx_sc_stats(struct sk_buff *skb, struct pcpu_rx_sc_stats __percpu *pstats) 2682 { 2683 struct macsec_rx_sc_stats sum = {0, }; 2684 int cpu; 2685 2686 for_each_possible_cpu(cpu) { 2687 const struct pcpu_rx_sc_stats *stats; 2688 struct macsec_rx_sc_stats tmp; 2689 unsigned int start; 2690 2691 stats = per_cpu_ptr(pstats, cpu); 2692 do { 2693 start = u64_stats_fetch_begin_irq(&stats->syncp); 2694 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2695 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2696 2697 sum.InOctetsValidated += tmp.InOctetsValidated; 2698 sum.InOctetsDecrypted += tmp.InOctetsDecrypted; 2699 sum.InPktsUnchecked += tmp.InPktsUnchecked; 2700 sum.InPktsDelayed += tmp.InPktsDelayed; 2701 sum.InPktsOK += tmp.InPktsOK; 2702 sum.InPktsInvalid += tmp.InPktsInvalid; 2703 sum.InPktsLate += tmp.InPktsLate; 2704 sum.InPktsNotValid += tmp.InPktsNotValid; 2705 sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA; 2706 sum.InPktsUnusedSA += tmp.InPktsUnusedSA; 2707 } 2708 2709 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, 2710 sum.InOctetsValidated, 2711 MACSEC_RXSC_STATS_ATTR_PAD) || 2712 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, 2713 sum.InOctetsDecrypted, 2714 MACSEC_RXSC_STATS_ATTR_PAD) || 2715 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, 2716 sum.InPktsUnchecked, 2717 MACSEC_RXSC_STATS_ATTR_PAD) || 2718 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, 2719 sum.InPktsDelayed, 2720 MACSEC_RXSC_STATS_ATTR_PAD) || 2721 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, 2722 sum.InPktsOK, 2723 MACSEC_RXSC_STATS_ATTR_PAD) || 2724 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, 2725 sum.InPktsInvalid, 2726 MACSEC_RXSC_STATS_ATTR_PAD) || 2727 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, 2728 sum.InPktsLate, 2729 MACSEC_RXSC_STATS_ATTR_PAD) || 2730 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, 2731 sum.InPktsNotValid, 2732 MACSEC_RXSC_STATS_ATTR_PAD) || 2733 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, 2734 sum.InPktsNotUsingSA, 2735 MACSEC_RXSC_STATS_ATTR_PAD) || 2736 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, 2737 sum.InPktsUnusedSA, 2738 MACSEC_RXSC_STATS_ATTR_PAD)) 2739 return -EMSGSIZE; 2740 2741 return 0; 2742 } 2743 2744 static noinline_for_stack int 2745 copy_tx_sc_stats(struct sk_buff *skb, struct pcpu_tx_sc_stats __percpu *pstats) 2746 { 2747 struct macsec_tx_sc_stats sum = {0, }; 2748 int cpu; 2749 2750 for_each_possible_cpu(cpu) { 2751 const struct pcpu_tx_sc_stats *stats; 2752 struct macsec_tx_sc_stats tmp; 2753 unsigned int start; 2754 2755 stats = per_cpu_ptr(pstats, cpu); 2756 do { 2757 start = u64_stats_fetch_begin_irq(&stats->syncp); 2758 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2759 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2760 2761 sum.OutPktsProtected += tmp.OutPktsProtected; 2762 sum.OutPktsEncrypted += tmp.OutPktsEncrypted; 2763 sum.OutOctetsProtected += tmp.OutOctetsProtected; 2764 sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted; 2765 } 2766 2767 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, 2768 sum.OutPktsProtected, 2769 MACSEC_TXSC_STATS_ATTR_PAD) || 2770 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, 2771 sum.OutPktsEncrypted, 2772 MACSEC_TXSC_STATS_ATTR_PAD) || 2773 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, 2774 sum.OutOctetsProtected, 2775 MACSEC_TXSC_STATS_ATTR_PAD) || 2776 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, 2777 sum.OutOctetsEncrypted, 2778 MACSEC_TXSC_STATS_ATTR_PAD)) 2779 return -EMSGSIZE; 2780 2781 return 0; 2782 } 2783 2784 static noinline_for_stack int 2785 copy_secy_stats(struct sk_buff *skb, struct pcpu_secy_stats __percpu *pstats) 2786 { 2787 struct macsec_dev_stats sum = {0, }; 2788 int cpu; 2789 2790 for_each_possible_cpu(cpu) { 2791 const struct pcpu_secy_stats *stats; 2792 struct macsec_dev_stats tmp; 2793 unsigned int start; 2794 2795 stats = per_cpu_ptr(pstats, cpu); 2796 do { 2797 start = u64_stats_fetch_begin_irq(&stats->syncp); 2798 memcpy(&tmp, &stats->stats, sizeof(tmp)); 2799 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 2800 2801 sum.OutPktsUntagged += tmp.OutPktsUntagged; 2802 sum.InPktsUntagged += tmp.InPktsUntagged; 2803 sum.OutPktsTooLong += tmp.OutPktsTooLong; 2804 sum.InPktsNoTag += tmp.InPktsNoTag; 2805 sum.InPktsBadTag += tmp.InPktsBadTag; 2806 sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI; 2807 sum.InPktsNoSCI += tmp.InPktsNoSCI; 2808 sum.InPktsOverrun += tmp.InPktsOverrun; 2809 } 2810 2811 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, 2812 sum.OutPktsUntagged, 2813 MACSEC_SECY_STATS_ATTR_PAD) || 2814 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, 2815 sum.InPktsUntagged, 2816 MACSEC_SECY_STATS_ATTR_PAD) || 2817 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, 2818 sum.OutPktsTooLong, 2819 MACSEC_SECY_STATS_ATTR_PAD) || 2820 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, 2821 sum.InPktsNoTag, 2822 MACSEC_SECY_STATS_ATTR_PAD) || 2823 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, 2824 sum.InPktsBadTag, 2825 MACSEC_SECY_STATS_ATTR_PAD) || 2826 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, 2827 sum.InPktsUnknownSCI, 2828 MACSEC_SECY_STATS_ATTR_PAD) || 2829 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, 2830 sum.InPktsNoSCI, 2831 MACSEC_SECY_STATS_ATTR_PAD) || 2832 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, 2833 sum.InPktsOverrun, 2834 MACSEC_SECY_STATS_ATTR_PAD)) 2835 return -EMSGSIZE; 2836 2837 return 0; 2838 } 2839 2840 static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) 2841 { 2842 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2843 struct nlattr *secy_nest = nla_nest_start_noflag(skb, 2844 MACSEC_ATTR_SECY); 2845 u64 csid; 2846 2847 if (!secy_nest) 2848 return 1; 2849 2850 switch (secy->key_len) { 2851 case MACSEC_GCM_AES_128_SAK_LEN: 2852 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; 2853 break; 2854 case MACSEC_GCM_AES_256_SAK_LEN: 2855 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; 2856 break; 2857 default: 2858 goto cancel; 2859 } 2860 2861 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, 2862 MACSEC_SECY_ATTR_PAD) || 2863 nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, 2864 csid, MACSEC_SECY_ATTR_PAD) || 2865 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 2866 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 2867 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 2868 nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) || 2869 nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) || 2870 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) || 2871 nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) || 2872 nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) || 2873 nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) || 2874 nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa)) 2875 goto cancel; 2876 2877 if (secy->replay_protect) { 2878 if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window)) 2879 goto cancel; 2880 } 2881 2882 nla_nest_end(skb, secy_nest); 2883 return 0; 2884 2885 cancel: 2886 nla_nest_cancel(skb, secy_nest); 2887 return 1; 2888 } 2889 2890 static noinline_for_stack int 2891 dump_secy(struct macsec_secy *secy, struct net_device *dev, 2892 struct sk_buff *skb, struct netlink_callback *cb) 2893 { 2894 struct macsec_dev *macsec = netdev_priv(dev); 2895 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 2896 struct nlattr *txsa_list, *rxsc_list; 2897 struct macsec_rx_sc *rx_sc; 2898 struct nlattr *attr; 2899 void *hdr; 2900 int i, j; 2901 2902 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 2903 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); 2904 if (!hdr) 2905 return -EMSGSIZE; 2906 2907 genl_dump_check_consistent(cb, hdr); 2908 2909 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 2910 goto nla_put_failure; 2911 2912 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD); 2913 if (!attr) 2914 goto nla_put_failure; 2915 if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload)) 2916 goto nla_put_failure; 2917 nla_nest_end(skb, attr); 2918 2919 if (nla_put_secy(secy, skb)) 2920 goto nla_put_failure; 2921 2922 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS); 2923 if (!attr) 2924 goto nla_put_failure; 2925 if (copy_tx_sc_stats(skb, tx_sc->stats)) { 2926 nla_nest_cancel(skb, attr); 2927 goto nla_put_failure; 2928 } 2929 nla_nest_end(skb, attr); 2930 2931 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS); 2932 if (!attr) 2933 goto nla_put_failure; 2934 if (copy_secy_stats(skb, macsec_priv(dev)->stats)) { 2935 nla_nest_cancel(skb, attr); 2936 goto nla_put_failure; 2937 } 2938 nla_nest_end(skb, attr); 2939 2940 txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST); 2941 if (!txsa_list) 2942 goto nla_put_failure; 2943 for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { 2944 struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); 2945 struct nlattr *txsa_nest; 2946 u64 pn; 2947 int pn_len; 2948 2949 if (!tx_sa) 2950 continue; 2951 2952 txsa_nest = nla_nest_start_noflag(skb, j++); 2953 if (!txsa_nest) { 2954 nla_nest_cancel(skb, txsa_list); 2955 goto nla_put_failure; 2956 } 2957 2958 if (secy->xpn) { 2959 pn = tx_sa->next_pn; 2960 pn_len = MACSEC_XPN_PN_LEN; 2961 } else { 2962 pn = tx_sa->next_pn_halves.lower; 2963 pn_len = MACSEC_DEFAULT_PN_LEN; 2964 } 2965 2966 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 2967 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || 2968 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || 2969 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) || 2970 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { 2971 nla_nest_cancel(skb, txsa_nest); 2972 nla_nest_cancel(skb, txsa_list); 2973 goto nla_put_failure; 2974 } 2975 2976 attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS); 2977 if (!attr) { 2978 nla_nest_cancel(skb, txsa_nest); 2979 nla_nest_cancel(skb, txsa_list); 2980 goto nla_put_failure; 2981 } 2982 if (copy_tx_sa_stats(skb, tx_sa->stats)) { 2983 nla_nest_cancel(skb, attr); 2984 nla_nest_cancel(skb, txsa_nest); 2985 nla_nest_cancel(skb, txsa_list); 2986 goto nla_put_failure; 2987 } 2988 nla_nest_end(skb, attr); 2989 2990 nla_nest_end(skb, txsa_nest); 2991 } 2992 nla_nest_end(skb, txsa_list); 2993 2994 rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST); 2995 if (!rxsc_list) 2996 goto nla_put_failure; 2997 2998 j = 1; 2999 for_each_rxsc_rtnl(secy, rx_sc) { 3000 int k; 3001 struct nlattr *rxsa_list; 3002 struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++); 3003 3004 if (!rxsc_nest) { 3005 nla_nest_cancel(skb, rxsc_list); 3006 goto nla_put_failure; 3007 } 3008 3009 if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || 3010 nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci, 3011 MACSEC_RXSC_ATTR_PAD)) { 3012 nla_nest_cancel(skb, rxsc_nest); 3013 nla_nest_cancel(skb, rxsc_list); 3014 goto nla_put_failure; 3015 } 3016 3017 attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS); 3018 if (!attr) { 3019 nla_nest_cancel(skb, rxsc_nest); 3020 nla_nest_cancel(skb, rxsc_list); 3021 goto nla_put_failure; 3022 } 3023 if (copy_rx_sc_stats(skb, rx_sc->stats)) { 3024 nla_nest_cancel(skb, attr); 3025 nla_nest_cancel(skb, rxsc_nest); 3026 nla_nest_cancel(skb, rxsc_list); 3027 goto nla_put_failure; 3028 } 3029 nla_nest_end(skb, attr); 3030 3031 rxsa_list = nla_nest_start_noflag(skb, 3032 MACSEC_RXSC_ATTR_SA_LIST); 3033 if (!rxsa_list) { 3034 nla_nest_cancel(skb, rxsc_nest); 3035 nla_nest_cancel(skb, rxsc_list); 3036 goto nla_put_failure; 3037 } 3038 3039 for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { 3040 struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); 3041 struct nlattr *rxsa_nest; 3042 u64 pn; 3043 int pn_len; 3044 3045 if (!rx_sa) 3046 continue; 3047 3048 rxsa_nest = nla_nest_start_noflag(skb, k++); 3049 if (!rxsa_nest) { 3050 nla_nest_cancel(skb, rxsa_list); 3051 nla_nest_cancel(skb, rxsc_nest); 3052 nla_nest_cancel(skb, rxsc_list); 3053 goto nla_put_failure; 3054 } 3055 3056 attr = nla_nest_start_noflag(skb, 3057 MACSEC_SA_ATTR_STATS); 3058 if (!attr) { 3059 nla_nest_cancel(skb, rxsa_list); 3060 nla_nest_cancel(skb, rxsc_nest); 3061 nla_nest_cancel(skb, rxsc_list); 3062 goto nla_put_failure; 3063 } 3064 if (copy_rx_sa_stats(skb, rx_sa->stats)) { 3065 nla_nest_cancel(skb, attr); 3066 nla_nest_cancel(skb, rxsa_list); 3067 nla_nest_cancel(skb, rxsc_nest); 3068 nla_nest_cancel(skb, rxsc_list); 3069 goto nla_put_failure; 3070 } 3071 nla_nest_end(skb, attr); 3072 3073 if (secy->xpn) { 3074 pn = rx_sa->next_pn; 3075 pn_len = MACSEC_XPN_PN_LEN; 3076 } else { 3077 pn = rx_sa->next_pn_halves.lower; 3078 pn_len = MACSEC_DEFAULT_PN_LEN; 3079 } 3080 3081 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || 3082 nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) || 3083 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || 3084 (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) || 3085 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { 3086 nla_nest_cancel(skb, rxsa_nest); 3087 nla_nest_cancel(skb, rxsc_nest); 3088 nla_nest_cancel(skb, rxsc_list); 3089 goto nla_put_failure; 3090 } 3091 nla_nest_end(skb, rxsa_nest); 3092 } 3093 3094 nla_nest_end(skb, rxsa_list); 3095 nla_nest_end(skb, rxsc_nest); 3096 } 3097 3098 nla_nest_end(skb, rxsc_list); 3099 3100 genlmsg_end(skb, hdr); 3101 3102 return 0; 3103 3104 nla_put_failure: 3105 genlmsg_cancel(skb, hdr); 3106 return -EMSGSIZE; 3107 } 3108 3109 static int macsec_generation = 1; /* protected by RTNL */ 3110 3111 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 3112 { 3113 struct net *net = sock_net(skb->sk); 3114 struct net_device *dev; 3115 int dev_idx, d; 3116 3117 dev_idx = cb->args[0]; 3118 3119 d = 0; 3120 rtnl_lock(); 3121 3122 cb->seq = macsec_generation; 3123 3124 for_each_netdev(net, dev) { 3125 struct macsec_secy *secy; 3126 3127 if (d < dev_idx) 3128 goto next; 3129 3130 if (!netif_is_macsec(dev)) 3131 goto next; 3132 3133 secy = &macsec_priv(dev)->secy; 3134 if (dump_secy(secy, dev, skb, cb) < 0) 3135 goto done; 3136 next: 3137 d++; 3138 } 3139 3140 done: 3141 rtnl_unlock(); 3142 cb->args[0] = d; 3143 return skb->len; 3144 } 3145 3146 static const struct genl_ops macsec_genl_ops[] = { 3147 { 3148 .cmd = MACSEC_CMD_GET_TXSC, 3149 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3150 .dumpit = macsec_dump_txsc, 3151 }, 3152 { 3153 .cmd = MACSEC_CMD_ADD_RXSC, 3154 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3155 .doit = macsec_add_rxsc, 3156 .flags = GENL_ADMIN_PERM, 3157 }, 3158 { 3159 .cmd = MACSEC_CMD_DEL_RXSC, 3160 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3161 .doit = macsec_del_rxsc, 3162 .flags = GENL_ADMIN_PERM, 3163 }, 3164 { 3165 .cmd = MACSEC_CMD_UPD_RXSC, 3166 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3167 .doit = macsec_upd_rxsc, 3168 .flags = GENL_ADMIN_PERM, 3169 }, 3170 { 3171 .cmd = MACSEC_CMD_ADD_TXSA, 3172 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3173 .doit = macsec_add_txsa, 3174 .flags = GENL_ADMIN_PERM, 3175 }, 3176 { 3177 .cmd = MACSEC_CMD_DEL_TXSA, 3178 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3179 .doit = macsec_del_txsa, 3180 .flags = GENL_ADMIN_PERM, 3181 }, 3182 { 3183 .cmd = MACSEC_CMD_UPD_TXSA, 3184 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3185 .doit = macsec_upd_txsa, 3186 .flags = GENL_ADMIN_PERM, 3187 }, 3188 { 3189 .cmd = MACSEC_CMD_ADD_RXSA, 3190 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3191 .doit = macsec_add_rxsa, 3192 .flags = GENL_ADMIN_PERM, 3193 }, 3194 { 3195 .cmd = MACSEC_CMD_DEL_RXSA, 3196 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3197 .doit = macsec_del_rxsa, 3198 .flags = GENL_ADMIN_PERM, 3199 }, 3200 { 3201 .cmd = MACSEC_CMD_UPD_RXSA, 3202 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3203 .doit = macsec_upd_rxsa, 3204 .flags = GENL_ADMIN_PERM, 3205 }, 3206 { 3207 .cmd = MACSEC_CMD_UPD_OFFLOAD, 3208 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 3209 .doit = macsec_upd_offload, 3210 .flags = GENL_ADMIN_PERM, 3211 }, 3212 }; 3213 3214 static struct genl_family macsec_fam __ro_after_init = { 3215 .name = MACSEC_GENL_NAME, 3216 .hdrsize = 0, 3217 .version = MACSEC_GENL_VERSION, 3218 .maxattr = MACSEC_ATTR_MAX, 3219 .policy = macsec_genl_policy, 3220 .netnsok = true, 3221 .module = THIS_MODULE, 3222 .ops = macsec_genl_ops, 3223 .n_ops = ARRAY_SIZE(macsec_genl_ops), 3224 }; 3225 3226 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, 3227 struct net_device *dev) 3228 { 3229 struct macsec_dev *macsec = netdev_priv(dev); 3230 struct macsec_secy *secy = &macsec->secy; 3231 struct pcpu_secy_stats *secy_stats; 3232 int ret, len; 3233 3234 if (macsec_is_offloaded(netdev_priv(dev))) { 3235 skb->dev = macsec->real_dev; 3236 return dev_queue_xmit(skb); 3237 } 3238 3239 /* 10.5 */ 3240 if (!secy->protect_frames) { 3241 secy_stats = this_cpu_ptr(macsec->stats); 3242 u64_stats_update_begin(&secy_stats->syncp); 3243 secy_stats->stats.OutPktsUntagged++; 3244 u64_stats_update_end(&secy_stats->syncp); 3245 skb->dev = macsec->real_dev; 3246 len = skb->len; 3247 ret = dev_queue_xmit(skb); 3248 count_tx(dev, ret, len); 3249 return ret; 3250 } 3251 3252 if (!secy->operational) { 3253 kfree_skb(skb); 3254 dev->stats.tx_dropped++; 3255 return NETDEV_TX_OK; 3256 } 3257 3258 skb = macsec_encrypt(skb, dev); 3259 if (IS_ERR(skb)) { 3260 if (PTR_ERR(skb) != -EINPROGRESS) 3261 dev->stats.tx_dropped++; 3262 return NETDEV_TX_OK; 3263 } 3264 3265 macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); 3266 3267 macsec_encrypt_finish(skb, dev); 3268 len = skb->len; 3269 ret = dev_queue_xmit(skb); 3270 count_tx(dev, ret, len); 3271 return ret; 3272 } 3273 3274 #define MACSEC_FEATURES \ 3275 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 3276 3277 static int macsec_dev_init(struct net_device *dev) 3278 { 3279 struct macsec_dev *macsec = macsec_priv(dev); 3280 struct net_device *real_dev = macsec->real_dev; 3281 int err; 3282 3283 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 3284 if (!dev->tstats) 3285 return -ENOMEM; 3286 3287 err = gro_cells_init(&macsec->gro_cells, dev); 3288 if (err) { 3289 free_percpu(dev->tstats); 3290 return err; 3291 } 3292 3293 dev->features = real_dev->features & MACSEC_FEATURES; 3294 dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; 3295 3296 dev->needed_headroom = real_dev->needed_headroom + 3297 MACSEC_NEEDED_HEADROOM; 3298 dev->needed_tailroom = real_dev->needed_tailroom + 3299 MACSEC_NEEDED_TAILROOM; 3300 3301 if (is_zero_ether_addr(dev->dev_addr)) 3302 eth_hw_addr_inherit(dev, real_dev); 3303 if (is_zero_ether_addr(dev->broadcast)) 3304 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 3305 3306 return 0; 3307 } 3308 3309 static void macsec_dev_uninit(struct net_device *dev) 3310 { 3311 struct macsec_dev *macsec = macsec_priv(dev); 3312 3313 gro_cells_destroy(&macsec->gro_cells); 3314 free_percpu(dev->tstats); 3315 } 3316 3317 static netdev_features_t macsec_fix_features(struct net_device *dev, 3318 netdev_features_t features) 3319 { 3320 struct macsec_dev *macsec = macsec_priv(dev); 3321 struct net_device *real_dev = macsec->real_dev; 3322 3323 features &= (real_dev->features & MACSEC_FEATURES) | 3324 NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; 3325 features |= NETIF_F_LLTX; 3326 3327 return features; 3328 } 3329 3330 static int macsec_dev_open(struct net_device *dev) 3331 { 3332 struct macsec_dev *macsec = macsec_priv(dev); 3333 struct net_device *real_dev = macsec->real_dev; 3334 int err; 3335 3336 err = dev_uc_add(real_dev, dev->dev_addr); 3337 if (err < 0) 3338 return err; 3339 3340 if (dev->flags & IFF_ALLMULTI) { 3341 err = dev_set_allmulti(real_dev, 1); 3342 if (err < 0) 3343 goto del_unicast; 3344 } 3345 3346 if (dev->flags & IFF_PROMISC) { 3347 err = dev_set_promiscuity(real_dev, 1); 3348 if (err < 0) 3349 goto clear_allmulti; 3350 } 3351 3352 /* If h/w offloading is available, propagate to the device */ 3353 if (macsec_is_offloaded(macsec)) { 3354 const struct macsec_ops *ops; 3355 struct macsec_context ctx; 3356 3357 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3358 if (!ops) { 3359 err = -EOPNOTSUPP; 3360 goto clear_allmulti; 3361 } 3362 3363 err = macsec_offload(ops->mdo_dev_open, &ctx); 3364 if (err) 3365 goto clear_allmulti; 3366 } 3367 3368 if (netif_carrier_ok(real_dev)) 3369 netif_carrier_on(dev); 3370 3371 return 0; 3372 clear_allmulti: 3373 if (dev->flags & IFF_ALLMULTI) 3374 dev_set_allmulti(real_dev, -1); 3375 del_unicast: 3376 dev_uc_del(real_dev, dev->dev_addr); 3377 netif_carrier_off(dev); 3378 return err; 3379 } 3380 3381 static int macsec_dev_stop(struct net_device *dev) 3382 { 3383 struct macsec_dev *macsec = macsec_priv(dev); 3384 struct net_device *real_dev = macsec->real_dev; 3385 3386 netif_carrier_off(dev); 3387 3388 /* If h/w offloading is available, propagate to the device */ 3389 if (macsec_is_offloaded(macsec)) { 3390 const struct macsec_ops *ops; 3391 struct macsec_context ctx; 3392 3393 ops = macsec_get_ops(macsec, &ctx); 3394 if (ops) 3395 macsec_offload(ops->mdo_dev_stop, &ctx); 3396 } 3397 3398 dev_mc_unsync(real_dev, dev); 3399 dev_uc_unsync(real_dev, dev); 3400 3401 if (dev->flags & IFF_ALLMULTI) 3402 dev_set_allmulti(real_dev, -1); 3403 3404 if (dev->flags & IFF_PROMISC) 3405 dev_set_promiscuity(real_dev, -1); 3406 3407 dev_uc_del(real_dev, dev->dev_addr); 3408 3409 return 0; 3410 } 3411 3412 static void macsec_dev_change_rx_flags(struct net_device *dev, int change) 3413 { 3414 struct net_device *real_dev = macsec_priv(dev)->real_dev; 3415 3416 if (!(dev->flags & IFF_UP)) 3417 return; 3418 3419 if (change & IFF_ALLMULTI) 3420 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 3421 3422 if (change & IFF_PROMISC) 3423 dev_set_promiscuity(real_dev, 3424 dev->flags & IFF_PROMISC ? 1 : -1); 3425 } 3426 3427 static void macsec_dev_set_rx_mode(struct net_device *dev) 3428 { 3429 struct net_device *real_dev = macsec_priv(dev)->real_dev; 3430 3431 dev_mc_sync(real_dev, dev); 3432 dev_uc_sync(real_dev, dev); 3433 } 3434 3435 static int macsec_set_mac_address(struct net_device *dev, void *p) 3436 { 3437 struct macsec_dev *macsec = macsec_priv(dev); 3438 struct net_device *real_dev = macsec->real_dev; 3439 struct sockaddr *addr = p; 3440 int err; 3441 3442 if (!is_valid_ether_addr(addr->sa_data)) 3443 return -EADDRNOTAVAIL; 3444 3445 if (!(dev->flags & IFF_UP)) 3446 goto out; 3447 3448 err = dev_uc_add(real_dev, addr->sa_data); 3449 if (err < 0) 3450 return err; 3451 3452 dev_uc_del(real_dev, dev->dev_addr); 3453 3454 out: 3455 ether_addr_copy(dev->dev_addr, addr->sa_data); 3456 macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES); 3457 3458 /* If h/w offloading is available, propagate to the device */ 3459 if (macsec_is_offloaded(macsec)) { 3460 const struct macsec_ops *ops; 3461 struct macsec_context ctx; 3462 3463 ops = macsec_get_ops(macsec, &ctx); 3464 if (ops) { 3465 ctx.secy = &macsec->secy; 3466 macsec_offload(ops->mdo_upd_secy, &ctx); 3467 } 3468 } 3469 3470 return 0; 3471 } 3472 3473 static int macsec_change_mtu(struct net_device *dev, int new_mtu) 3474 { 3475 struct macsec_dev *macsec = macsec_priv(dev); 3476 unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); 3477 3478 if (macsec->real_dev->mtu - extra < new_mtu) 3479 return -ERANGE; 3480 3481 dev->mtu = new_mtu; 3482 3483 return 0; 3484 } 3485 3486 static void macsec_get_stats64(struct net_device *dev, 3487 struct rtnl_link_stats64 *s) 3488 { 3489 int cpu; 3490 3491 if (!dev->tstats) 3492 return; 3493 3494 for_each_possible_cpu(cpu) { 3495 struct pcpu_sw_netstats *stats; 3496 struct pcpu_sw_netstats tmp; 3497 int start; 3498 3499 stats = per_cpu_ptr(dev->tstats, cpu); 3500 do { 3501 start = u64_stats_fetch_begin_irq(&stats->syncp); 3502 tmp.rx_packets = stats->rx_packets; 3503 tmp.rx_bytes = stats->rx_bytes; 3504 tmp.tx_packets = stats->tx_packets; 3505 tmp.tx_bytes = stats->tx_bytes; 3506 } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 3507 3508 s->rx_packets += tmp.rx_packets; 3509 s->rx_bytes += tmp.rx_bytes; 3510 s->tx_packets += tmp.tx_packets; 3511 s->tx_bytes += tmp.tx_bytes; 3512 } 3513 3514 s->rx_dropped = dev->stats.rx_dropped; 3515 s->tx_dropped = dev->stats.tx_dropped; 3516 } 3517 3518 static int macsec_get_iflink(const struct net_device *dev) 3519 { 3520 return macsec_priv(dev)->real_dev->ifindex; 3521 } 3522 3523 static const struct net_device_ops macsec_netdev_ops = { 3524 .ndo_init = macsec_dev_init, 3525 .ndo_uninit = macsec_dev_uninit, 3526 .ndo_open = macsec_dev_open, 3527 .ndo_stop = macsec_dev_stop, 3528 .ndo_fix_features = macsec_fix_features, 3529 .ndo_change_mtu = macsec_change_mtu, 3530 .ndo_set_rx_mode = macsec_dev_set_rx_mode, 3531 .ndo_change_rx_flags = macsec_dev_change_rx_flags, 3532 .ndo_set_mac_address = macsec_set_mac_address, 3533 .ndo_start_xmit = macsec_start_xmit, 3534 .ndo_get_stats64 = macsec_get_stats64, 3535 .ndo_get_iflink = macsec_get_iflink, 3536 }; 3537 3538 static const struct device_type macsec_type = { 3539 .name = "macsec", 3540 }; 3541 3542 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { 3543 [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, 3544 [IFLA_MACSEC_PORT] = { .type = NLA_U16 }, 3545 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, 3546 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, 3547 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, 3548 [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, 3549 [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, 3550 [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, 3551 [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, 3552 [IFLA_MACSEC_ES] = { .type = NLA_U8 }, 3553 [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, 3554 [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, 3555 [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, 3556 }; 3557 3558 static void macsec_free_netdev(struct net_device *dev) 3559 { 3560 struct macsec_dev *macsec = macsec_priv(dev); 3561 3562 free_percpu(macsec->stats); 3563 free_percpu(macsec->secy.tx_sc.stats); 3564 3565 } 3566 3567 static void macsec_setup(struct net_device *dev) 3568 { 3569 ether_setup(dev); 3570 dev->min_mtu = 0; 3571 dev->max_mtu = ETH_MAX_MTU; 3572 dev->priv_flags |= IFF_NO_QUEUE; 3573 dev->netdev_ops = &macsec_netdev_ops; 3574 dev->needs_free_netdev = true; 3575 dev->priv_destructor = macsec_free_netdev; 3576 SET_NETDEV_DEVTYPE(dev, &macsec_type); 3577 3578 eth_zero_addr(dev->broadcast); 3579 } 3580 3581 static int macsec_changelink_common(struct net_device *dev, 3582 struct nlattr *data[]) 3583 { 3584 struct macsec_secy *secy; 3585 struct macsec_tx_sc *tx_sc; 3586 3587 secy = &macsec_priv(dev)->secy; 3588 tx_sc = &secy->tx_sc; 3589 3590 if (data[IFLA_MACSEC_ENCODING_SA]) { 3591 struct macsec_tx_sa *tx_sa; 3592 3593 tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]); 3594 tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]); 3595 3596 secy->operational = tx_sa && tx_sa->active; 3597 } 3598 3599 if (data[IFLA_MACSEC_WINDOW]) 3600 secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); 3601 3602 if (data[IFLA_MACSEC_ENCRYPT]) 3603 tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); 3604 3605 if (data[IFLA_MACSEC_PROTECT]) 3606 secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]); 3607 3608 if (data[IFLA_MACSEC_INC_SCI]) 3609 tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); 3610 3611 if (data[IFLA_MACSEC_ES]) 3612 tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]); 3613 3614 if (data[IFLA_MACSEC_SCB]) 3615 tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]); 3616 3617 if (data[IFLA_MACSEC_REPLAY_PROTECT]) 3618 secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]); 3619 3620 if (data[IFLA_MACSEC_VALIDATION]) 3621 secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); 3622 3623 if (data[IFLA_MACSEC_CIPHER_SUITE]) { 3624 switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) { 3625 case MACSEC_CIPHER_ID_GCM_AES_128: 3626 case MACSEC_DEFAULT_CIPHER_ID: 3627 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3628 secy->xpn = false; 3629 break; 3630 case MACSEC_CIPHER_ID_GCM_AES_256: 3631 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3632 secy->xpn = false; 3633 break; 3634 case MACSEC_CIPHER_ID_GCM_AES_XPN_128: 3635 secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; 3636 secy->xpn = true; 3637 break; 3638 case MACSEC_CIPHER_ID_GCM_AES_XPN_256: 3639 secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; 3640 secy->xpn = true; 3641 break; 3642 default: 3643 return -EINVAL; 3644 } 3645 } 3646 3647 return 0; 3648 } 3649 3650 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], 3651 struct nlattr *data[], 3652 struct netlink_ext_ack *extack) 3653 { 3654 struct macsec_dev *macsec = macsec_priv(dev); 3655 struct macsec_tx_sa tx_sc; 3656 struct macsec_secy secy; 3657 int ret; 3658 3659 if (!data) 3660 return 0; 3661 3662 if (data[IFLA_MACSEC_CIPHER_SUITE] || 3663 data[IFLA_MACSEC_ICV_LEN] || 3664 data[IFLA_MACSEC_SCI] || 3665 data[IFLA_MACSEC_PORT]) 3666 return -EINVAL; 3667 3668 /* Keep a copy of unmodified secy and tx_sc, in case the offload 3669 * propagation fails, to revert macsec_changelink_common. 3670 */ 3671 memcpy(&secy, &macsec->secy, sizeof(secy)); 3672 memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc)); 3673 3674 ret = macsec_changelink_common(dev, data); 3675 if (ret) 3676 return ret; 3677 3678 /* If h/w offloading is available, propagate to the device */ 3679 if (macsec_is_offloaded(macsec)) { 3680 const struct macsec_ops *ops; 3681 struct macsec_context ctx; 3682 int ret; 3683 3684 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3685 if (!ops) { 3686 ret = -EOPNOTSUPP; 3687 goto cleanup; 3688 } 3689 3690 ctx.secy = &macsec->secy; 3691 ret = macsec_offload(ops->mdo_upd_secy, &ctx); 3692 if (ret) 3693 goto cleanup; 3694 } 3695 3696 return 0; 3697 3698 cleanup: 3699 memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc)); 3700 memcpy(&macsec->secy, &secy, sizeof(secy)); 3701 3702 return ret; 3703 } 3704 3705 static void macsec_del_dev(struct macsec_dev *macsec) 3706 { 3707 int i; 3708 3709 while (macsec->secy.rx_sc) { 3710 struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); 3711 3712 rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); 3713 free_rx_sc(rx_sc); 3714 } 3715 3716 for (i = 0; i < MACSEC_NUM_AN; i++) { 3717 struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); 3718 3719 if (sa) { 3720 RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); 3721 clear_tx_sa(sa); 3722 } 3723 } 3724 } 3725 3726 static void macsec_common_dellink(struct net_device *dev, struct list_head *head) 3727 { 3728 struct macsec_dev *macsec = macsec_priv(dev); 3729 struct net_device *real_dev = macsec->real_dev; 3730 3731 unregister_netdevice_queue(dev, head); 3732 list_del_rcu(&macsec->secys); 3733 macsec_del_dev(macsec); 3734 netdev_upper_dev_unlink(real_dev, dev); 3735 3736 macsec_generation++; 3737 } 3738 3739 static void macsec_dellink(struct net_device *dev, struct list_head *head) 3740 { 3741 struct macsec_dev *macsec = macsec_priv(dev); 3742 struct net_device *real_dev = macsec->real_dev; 3743 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3744 3745 /* If h/w offloading is available, propagate to the device */ 3746 if (macsec_is_offloaded(macsec)) { 3747 const struct macsec_ops *ops; 3748 struct macsec_context ctx; 3749 3750 ops = macsec_get_ops(netdev_priv(dev), &ctx); 3751 if (ops) { 3752 ctx.secy = &macsec->secy; 3753 macsec_offload(ops->mdo_del_secy, &ctx); 3754 } 3755 } 3756 3757 macsec_common_dellink(dev, head); 3758 3759 if (list_empty(&rxd->secys)) { 3760 netdev_rx_handler_unregister(real_dev); 3761 kfree(rxd); 3762 } 3763 } 3764 3765 static int register_macsec_dev(struct net_device *real_dev, 3766 struct net_device *dev) 3767 { 3768 struct macsec_dev *macsec = macsec_priv(dev); 3769 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3770 3771 if (!rxd) { 3772 int err; 3773 3774 rxd = kmalloc(sizeof(*rxd), GFP_KERNEL); 3775 if (!rxd) 3776 return -ENOMEM; 3777 3778 INIT_LIST_HEAD(&rxd->secys); 3779 3780 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 3781 rxd); 3782 if (err < 0) { 3783 kfree(rxd); 3784 return err; 3785 } 3786 } 3787 3788 list_add_tail_rcu(&macsec->secys, &rxd->secys); 3789 return 0; 3790 } 3791 3792 static bool sci_exists(struct net_device *dev, sci_t sci) 3793 { 3794 struct macsec_rxh_data *rxd = macsec_data_rtnl(dev); 3795 struct macsec_dev *macsec; 3796 3797 list_for_each_entry(macsec, &rxd->secys, secys) { 3798 if (macsec->secy.sci == sci) 3799 return true; 3800 } 3801 3802 return false; 3803 } 3804 3805 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) 3806 { 3807 struct macsec_dev *macsec = macsec_priv(dev); 3808 struct macsec_secy *secy = &macsec->secy; 3809 3810 macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); 3811 if (!macsec->stats) 3812 return -ENOMEM; 3813 3814 secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); 3815 if (!secy->tx_sc.stats) { 3816 free_percpu(macsec->stats); 3817 return -ENOMEM; 3818 } 3819 3820 if (sci == MACSEC_UNDEF_SCI) 3821 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3822 3823 secy->netdev = dev; 3824 secy->operational = true; 3825 secy->key_len = DEFAULT_SAK_LEN; 3826 secy->icv_len = icv_len; 3827 secy->validate_frames = MACSEC_VALIDATE_DEFAULT; 3828 secy->protect_frames = true; 3829 secy->replay_protect = false; 3830 secy->xpn = DEFAULT_XPN; 3831 3832 secy->sci = sci; 3833 secy->tx_sc.active = true; 3834 secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; 3835 secy->tx_sc.encrypt = DEFAULT_ENCRYPT; 3836 secy->tx_sc.send_sci = DEFAULT_SEND_SCI; 3837 secy->tx_sc.end_station = false; 3838 secy->tx_sc.scb = false; 3839 3840 return 0; 3841 } 3842 3843 static int macsec_newlink(struct net *net, struct net_device *dev, 3844 struct nlattr *tb[], struct nlattr *data[], 3845 struct netlink_ext_ack *extack) 3846 { 3847 struct macsec_dev *macsec = macsec_priv(dev); 3848 struct net_device *real_dev; 3849 int err; 3850 sci_t sci; 3851 u8 icv_len = DEFAULT_ICV_LEN; 3852 rx_handler_func_t *rx_handler; 3853 3854 if (!tb[IFLA_LINK]) 3855 return -EINVAL; 3856 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); 3857 if (!real_dev) 3858 return -ENODEV; 3859 3860 dev->priv_flags |= IFF_MACSEC; 3861 3862 macsec->real_dev = real_dev; 3863 3864 /* MACsec offloading is off by default */ 3865 macsec->offload = MACSEC_OFFLOAD_OFF; 3866 3867 if (data && data[IFLA_MACSEC_ICV_LEN]) 3868 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3869 dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true); 3870 3871 rx_handler = rtnl_dereference(real_dev->rx_handler); 3872 if (rx_handler && rx_handler != macsec_handle_frame) 3873 return -EBUSY; 3874 3875 err = register_netdevice(dev); 3876 if (err < 0) 3877 return err; 3878 3879 err = netdev_upper_dev_link(real_dev, dev, extack); 3880 if (err < 0) 3881 goto unregister; 3882 3883 /* need to be already registered so that ->init has run and 3884 * the MAC addr is set 3885 */ 3886 if (data && data[IFLA_MACSEC_SCI]) 3887 sci = nla_get_sci(data[IFLA_MACSEC_SCI]); 3888 else if (data && data[IFLA_MACSEC_PORT]) 3889 sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT])); 3890 else 3891 sci = dev_to_sci(dev, MACSEC_PORT_ES); 3892 3893 if (rx_handler && sci_exists(real_dev, sci)) { 3894 err = -EBUSY; 3895 goto unlink; 3896 } 3897 3898 err = macsec_add_dev(dev, sci, icv_len); 3899 if (err) 3900 goto unlink; 3901 3902 if (data) { 3903 err = macsec_changelink_common(dev, data); 3904 if (err) 3905 goto del_dev; 3906 } 3907 3908 err = register_macsec_dev(real_dev, dev); 3909 if (err < 0) 3910 goto del_dev; 3911 3912 netif_stacked_transfer_operstate(real_dev, dev); 3913 linkwatch_fire_event(dev); 3914 3915 macsec_generation++; 3916 3917 return 0; 3918 3919 del_dev: 3920 macsec_del_dev(macsec); 3921 unlink: 3922 netdev_upper_dev_unlink(real_dev, dev); 3923 unregister: 3924 unregister_netdevice(dev); 3925 return err; 3926 } 3927 3928 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[], 3929 struct netlink_ext_ack *extack) 3930 { 3931 u64 csid = MACSEC_DEFAULT_CIPHER_ID; 3932 u8 icv_len = DEFAULT_ICV_LEN; 3933 int flag; 3934 bool es, scb, sci; 3935 3936 if (!data) 3937 return 0; 3938 3939 if (data[IFLA_MACSEC_CIPHER_SUITE]) 3940 csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); 3941 3942 if (data[IFLA_MACSEC_ICV_LEN]) { 3943 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3944 if (icv_len != DEFAULT_ICV_LEN) { 3945 char dummy_key[DEFAULT_SAK_LEN] = { 0 }; 3946 struct crypto_aead *dummy_tfm; 3947 3948 dummy_tfm = macsec_alloc_tfm(dummy_key, 3949 DEFAULT_SAK_LEN, 3950 icv_len); 3951 if (IS_ERR(dummy_tfm)) 3952 return PTR_ERR(dummy_tfm); 3953 crypto_free_aead(dummy_tfm); 3954 } 3955 } 3956 3957 switch (csid) { 3958 case MACSEC_CIPHER_ID_GCM_AES_128: 3959 case MACSEC_CIPHER_ID_GCM_AES_256: 3960 case MACSEC_CIPHER_ID_GCM_AES_XPN_128: 3961 case MACSEC_CIPHER_ID_GCM_AES_XPN_256: 3962 case MACSEC_DEFAULT_CIPHER_ID: 3963 if (icv_len < MACSEC_MIN_ICV_LEN || 3964 icv_len > MACSEC_STD_ICV_LEN) 3965 return -EINVAL; 3966 break; 3967 default: 3968 return -EINVAL; 3969 } 3970 3971 if (data[IFLA_MACSEC_ENCODING_SA]) { 3972 if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) 3973 return -EINVAL; 3974 } 3975 3976 for (flag = IFLA_MACSEC_ENCODING_SA + 1; 3977 flag < IFLA_MACSEC_VALIDATION; 3978 flag++) { 3979 if (data[flag]) { 3980 if (nla_get_u8(data[flag]) > 1) 3981 return -EINVAL; 3982 } 3983 } 3984 3985 es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; 3986 sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; 3987 scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; 3988 3989 if ((sci && (scb || es)) || (scb && es)) 3990 return -EINVAL; 3991 3992 if (data[IFLA_MACSEC_VALIDATION] && 3993 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 3994 return -EINVAL; 3995 3996 if ((data[IFLA_MACSEC_REPLAY_PROTECT] && 3997 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && 3998 !data[IFLA_MACSEC_WINDOW]) 3999 return -EINVAL; 4000 4001 return 0; 4002 } 4003 4004 static struct net *macsec_get_link_net(const struct net_device *dev) 4005 { 4006 return dev_net(macsec_priv(dev)->real_dev); 4007 } 4008 4009 static size_t macsec_get_size(const struct net_device *dev) 4010 { 4011 return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */ 4012 nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */ 4013 nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */ 4014 nla_total_size(4) + /* IFLA_MACSEC_WINDOW */ 4015 nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */ 4016 nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */ 4017 nla_total_size(1) + /* IFLA_MACSEC_PROTECT */ 4018 nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */ 4019 nla_total_size(1) + /* IFLA_MACSEC_ES */ 4020 nla_total_size(1) + /* IFLA_MACSEC_SCB */ 4021 nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */ 4022 nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */ 4023 0; 4024 } 4025 4026 static int macsec_fill_info(struct sk_buff *skb, 4027 const struct net_device *dev) 4028 { 4029 struct macsec_secy *secy = &macsec_priv(dev)->secy; 4030 struct macsec_tx_sc *tx_sc = &secy->tx_sc; 4031 u64 csid; 4032 4033 switch (secy->key_len) { 4034 case MACSEC_GCM_AES_128_SAK_LEN: 4035 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; 4036 break; 4037 case MACSEC_GCM_AES_256_SAK_LEN: 4038 csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256; 4039 break; 4040 default: 4041 goto nla_put_failure; 4042 } 4043 4044 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, 4045 IFLA_MACSEC_PAD) || 4046 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 4047 nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, 4048 csid, IFLA_MACSEC_PAD) || 4049 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 4050 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 4051 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 4052 nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) || 4053 nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) || 4054 nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || 4055 nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || 4056 nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || 4057 0) 4058 goto nla_put_failure; 4059 4060 if (secy->replay_protect) { 4061 if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window)) 4062 goto nla_put_failure; 4063 } 4064 4065 return 0; 4066 4067 nla_put_failure: 4068 return -EMSGSIZE; 4069 } 4070 4071 static struct rtnl_link_ops macsec_link_ops __read_mostly = { 4072 .kind = "macsec", 4073 .priv_size = sizeof(struct macsec_dev), 4074 .maxtype = IFLA_MACSEC_MAX, 4075 .policy = macsec_rtnl_policy, 4076 .setup = macsec_setup, 4077 .validate = macsec_validate_attr, 4078 .newlink = macsec_newlink, 4079 .changelink = macsec_changelink, 4080 .dellink = macsec_dellink, 4081 .get_size = macsec_get_size, 4082 .fill_info = macsec_fill_info, 4083 .get_link_net = macsec_get_link_net, 4084 }; 4085 4086 static bool is_macsec_master(struct net_device *dev) 4087 { 4088 return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame; 4089 } 4090 4091 static int macsec_notify(struct notifier_block *this, unsigned long event, 4092 void *ptr) 4093 { 4094 struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); 4095 LIST_HEAD(head); 4096 4097 if (!is_macsec_master(real_dev)) 4098 return NOTIFY_DONE; 4099 4100 switch (event) { 4101 case NETDEV_DOWN: 4102 case NETDEV_UP: 4103 case NETDEV_CHANGE: { 4104 struct macsec_dev *m, *n; 4105 struct macsec_rxh_data *rxd; 4106 4107 rxd = macsec_data_rtnl(real_dev); 4108 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 4109 struct net_device *dev = m->secy.netdev; 4110 4111 netif_stacked_transfer_operstate(real_dev, dev); 4112 } 4113 break; 4114 } 4115 case NETDEV_UNREGISTER: { 4116 struct macsec_dev *m, *n; 4117 struct macsec_rxh_data *rxd; 4118 4119 rxd = macsec_data_rtnl(real_dev); 4120 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 4121 macsec_common_dellink(m->secy.netdev, &head); 4122 } 4123 4124 netdev_rx_handler_unregister(real_dev); 4125 kfree(rxd); 4126 4127 unregister_netdevice_many(&head); 4128 break; 4129 } 4130 case NETDEV_CHANGEMTU: { 4131 struct macsec_dev *m; 4132 struct macsec_rxh_data *rxd; 4133 4134 rxd = macsec_data_rtnl(real_dev); 4135 list_for_each_entry(m, &rxd->secys, secys) { 4136 struct net_device *dev = m->secy.netdev; 4137 unsigned int mtu = real_dev->mtu - (m->secy.icv_len + 4138 macsec_extra_len(true)); 4139 4140 if (dev->mtu > mtu) 4141 dev_set_mtu(dev, mtu); 4142 } 4143 } 4144 } 4145 4146 return NOTIFY_OK; 4147 } 4148 4149 static struct notifier_block macsec_notifier = { 4150 .notifier_call = macsec_notify, 4151 }; 4152 4153 static int __init macsec_init(void) 4154 { 4155 int err; 4156 4157 pr_info("MACsec IEEE 802.1AE\n"); 4158 err = register_netdevice_notifier(&macsec_notifier); 4159 if (err) 4160 return err; 4161 4162 err = rtnl_link_register(&macsec_link_ops); 4163 if (err) 4164 goto notifier; 4165 4166 err = genl_register_family(&macsec_fam); 4167 if (err) 4168 goto rtnl; 4169 4170 return 0; 4171 4172 rtnl: 4173 rtnl_link_unregister(&macsec_link_ops); 4174 notifier: 4175 unregister_netdevice_notifier(&macsec_notifier); 4176 return err; 4177 } 4178 4179 static void __exit macsec_exit(void) 4180 { 4181 genl_unregister_family(&macsec_fam); 4182 rtnl_link_unregister(&macsec_link_ops); 4183 unregister_netdevice_notifier(&macsec_notifier); 4184 rcu_barrier(); 4185 } 4186 4187 module_init(macsec_init); 4188 module_exit(macsec_exit); 4189 4190 MODULE_ALIAS_RTNL_LINK("macsec"); 4191 MODULE_ALIAS_GENL_FAMILY("macsec"); 4192 4193 MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); 4194 MODULE_LICENSE("GPL v2"); 4195