1 // SPDX-License-Identifier: GPL-2.0 2 /* NXP C45 PTP PHY driver interface 3 * Copyright 2023 NXP 4 * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com> 5 */ 6 7 #include <linux/delay.h> 8 #include <linux/ethtool_netlink.h> 9 #include <linux/kernel.h> 10 #include <linux/mii.h> 11 #include <linux/module.h> 12 #include <linux/phy.h> 13 #include <linux/processor.h> 14 #include <net/dst_metadata.h> 15 #include <net/macsec.h> 16 17 #include "nxp-c45-tja11xx.h" 18 19 #define MACSEC_REG_SIZE 32 20 #define TX_SC_MAX 4 21 22 #define TX_SC_BIT(secy_id) BIT(MACSEC_REG_SIZE - (secy_id) - 1) 23 24 #define VEND1_MACSEC_BASE 0x9000 25 26 #define MACSEC_CFG 0x0000 27 #define MACSEC_CFG_BYPASS BIT(1) 28 #define MACSEC_CFG_S0I BIT(0) 29 30 #define MACSEC_TPNET 0x0044 31 #define PN_WRAP_THRESHOLD 0xffffffff 32 33 #define MACSEC_RXSCA 0x0080 34 #define MACSEC_RXSCKA 0x0084 35 36 #define MACSEC_TXSCA 0x00C0 37 #define MACSEC_TXSCKA 0x00C4 38 39 #define MACSEC_RXSC_SCI_1H 0x0100 40 41 #define MACSEC_RXSC_CFG 0x0128 42 #define MACSEC_RXSC_CFG_XPN BIT(25) 43 #define MACSEC_RXSC_CFG_AES_256 BIT(24) 44 #define MACSEC_RXSC_CFG_SCI_EN BIT(11) 45 #define MACSEC_RXSC_CFG_RP BIT(10) 46 #define MACSEC_RXSC_CFG_VF_MASK GENMASK(9, 8) 47 #define MACSEC_RXSC_CFG_VF_OFF 8 48 49 #define MACSEC_RPW 0x012C 50 51 #define MACSEC_RXSA_A_CS 0x0180 52 #define MACSEC_RXSA_A_NPN 0x0184 53 #define MACSEC_RXSA_A_XNPN 0x0188 54 #define MACSEC_RXSA_A_LNPN 0x018C 55 #define MACSEC_RXSA_A_LXNPN 0x0190 56 57 #define MACSEC_RXSA_B_CS 0x01C0 58 #define MACSEC_RXSA_B_NPN 0x01C4 59 #define MACSEC_RXSA_B_XNPN 0x01C8 60 #define MACSEC_RXSA_B_LNPN 0x01CC 61 #define MACSEC_RXSA_B_LXNPN 0x01D0 62 63 #define MACSEC_RXSA_CS_AN_OFF 1 64 #define MACSEC_RXSA_CS_EN BIT(0) 65 66 #define MACSEC_TXSC_SCI_1H 0x0200 67 #define MACSEC_TXSC_CFG 0x0228 68 #define MACSEC_TXSC_CFG_XPN BIT(25) 69 #define MACSEC_TXSC_CFG_AES_256 BIT(24) 70 #define MACSEC_TXSC_CFG_AN_MASK GENMASK(19, 18) 71 #define MACSEC_TXSC_CFG_AN_OFF 18 72 #define MACSEC_TXSC_CFG_ASA BIT(17) 73 #define MACSEC_TXSC_CFG_SCE BIT(16) 74 #define MACSEC_TXSC_CFG_ENCRYPT BIT(4) 75 #define MACSEC_TXSC_CFG_PROTECT BIT(3) 76 #define MACSEC_TXSC_CFG_SEND_SCI BIT(2) 77 #define MACSEC_TXSC_CFG_END_STATION BIT(1) 78 #define MACSEC_TXSC_CFG_SCB BIT(0) 79 80 #define MACSEC_TXSA_A_CS 0x0280 81 #define MACSEC_TXSA_A_NPN 0x0284 82 #define MACSEC_TXSA_A_XNPN 0x0288 83 84 #define MACSEC_TXSA_B_CS 0x02C0 85 #define MACSEC_TXSA_B_NPN 0x02C4 86 #define MACSEC_TXSA_B_XNPN 0x02C8 87 88 #define MACSEC_SA_CS_A BIT(31) 89 90 #define MACSEC_EVR 0x0400 91 #define MACSEC_EVER 0x0404 92 93 #define MACSEC_RXSA_A_KA 0x0700 94 #define MACSEC_RXSA_A_SSCI 0x0720 95 #define MACSEC_RXSA_A_SALT 0x0724 96 97 #define MACSEC_RXSA_B_KA 0x0740 98 #define MACSEC_RXSA_B_SSCI 0x0760 99 #define MACSEC_RXSA_B_SALT 0x0764 100 101 #define MACSEC_TXSA_A_KA 0x0780 102 #define MACSEC_TXSA_A_SSCI 0x07A0 103 #define MACSEC_TXSA_A_SALT 0x07A4 104 105 #define MACSEC_TXSA_B_KA 0x07C0 106 #define MACSEC_TXSA_B_SSCI 0x07E0 107 #define MACSEC_TXSA_B_SALT 0x07E4 108 109 #define MACSEC_UPFR0D2 0x0A08 110 #define MACSEC_UPFR0M1 0x0A10 111 #define MACSEC_OVP BIT(12) 112 113 #define MACSEC_UPFR0M2 0x0A14 114 #define ETYPE_MASK 0xffff 115 116 #define MACSEC_UPFR0R 0x0A18 117 #define MACSEC_UPFR_EN BIT(0) 118 119 #define ADPTR_CNTRL 0x0F00 120 #define ADPTR_CNTRL_CONFIG_EN BIT(14) 121 #define ADPTR_CNTRL_ADPTR_EN BIT(12) 122 #define ADPTR_TX_TAG_CNTRL 0x0F0C 123 #define ADPTR_TX_TAG_CNTRL_ENA BIT(31) 124 125 #define TX_SC_FLT_BASE 0x800 126 #define TX_SC_FLT_SIZE 0x10 127 #define TX_FLT_BASE(flt_id) (TX_SC_FLT_BASE + \ 128 TX_SC_FLT_SIZE * (flt_id)) 129 130 #define TX_SC_FLT_OFF_MAC_DA_SA 0x04 131 #define TX_SC_FLT_OFF_MAC_SA 0x08 132 #define TX_SC_FLT_OFF_MAC_CFG 0x0C 133 #define TX_SC_FLT_BY_SA BIT(14) 134 #define TX_SC_FLT_EN BIT(8) 135 136 #define TX_SC_FLT_MAC_DA_SA(base) ((base) + TX_SC_FLT_OFF_MAC_DA_SA) 137 #define TX_SC_FLT_MAC_SA(base) ((base) + TX_SC_FLT_OFF_MAC_SA) 138 #define TX_SC_FLT_MAC_CFG(base) ((base) + TX_SC_FLT_OFF_MAC_CFG) 139 140 #define ADAPTER_EN BIT(6) 141 #define MACSEC_EN BIT(5) 142 143 #define MACSEC_INOV1HS 0x0140 144 #define MACSEC_INOV2HS 0x0144 145 #define MACSEC_INOD1HS 0x0148 146 #define MACSEC_INOD2HS 0x014C 147 #define MACSEC_RXSCIPUS 0x0150 148 #define MACSEC_RXSCIPDS 0x0154 149 #define MACSEC_RXSCIPLS 0x0158 150 #define MACSEC_RXAN0INUSS 0x0160 151 #define MACSEC_RXAN0IPUSS 0x0170 152 #define MACSEC_RXSA_A_IPOS 0x0194 153 #define MACSEC_RXSA_A_IPIS 0x01B0 154 #define MACSEC_RXSA_A_IPNVS 0x01B4 155 #define MACSEC_RXSA_B_IPOS 0x01D4 156 #define MACSEC_RXSA_B_IPIS 0x01F0 157 #define MACSEC_RXSA_B_IPNVS 0x01F4 158 #define MACSEC_OPUS 0x021C 159 #define MACSEC_OPTLS 0x022C 160 #define MACSEC_OOP1HS 0x0240 161 #define MACSEC_OOP2HS 0x0244 162 #define MACSEC_OOE1HS 0x0248 163 #define MACSEC_OOE2HS 0x024C 164 #define MACSEC_TXSA_A_OPPS 0x028C 165 #define MACSEC_TXSA_A_OPES 0x0290 166 #define MACSEC_TXSA_B_OPPS 0x02CC 167 #define MACSEC_TXSA_B_OPES 0x02D0 168 #define MACSEC_INPWTS 0x0630 169 #define MACSEC_INPBTS 0x0638 170 #define MACSEC_IPSNFS 0x063C 171 172 #define TJA11XX_TLV_TX_NEEDED_HEADROOM (32) 173 #define TJA11XX_TLV_NEEDED_TAILROOM (0) 174 175 #define ETH_P_TJA11XX_TLV (0x4e58) 176 177 enum nxp_c45_sa_type { 178 TX_SA, 179 RX_SA, 180 }; 181 182 struct nxp_c45_sa { 183 void *sa; 184 const struct nxp_c45_sa_regs *regs; 185 enum nxp_c45_sa_type type; 186 bool is_key_a; 187 u8 an; 188 struct list_head list; 189 }; 190 191 struct nxp_c45_secy { 192 struct macsec_secy *secy; 193 struct macsec_rx_sc *rx_sc; 194 struct list_head sa_list; 195 int secy_id; 196 bool rx_sc0_impl; 197 struct list_head list; 198 }; 199 200 struct nxp_c45_macsec { 201 struct list_head secy_list; 202 DECLARE_BITMAP(secy_bitmap, TX_SC_MAX); 203 DECLARE_BITMAP(tx_sc_bitmap, TX_SC_MAX); 204 }; 205 206 struct nxp_c45_sa_regs { 207 u16 cs; 208 u16 npn; 209 u16 xnpn; 210 u16 lnpn; 211 u16 lxnpn; 212 u16 ka; 213 u16 ssci; 214 u16 salt; 215 u16 ipis; 216 u16 ipnvs; 217 u16 ipos; 218 u16 opps; 219 u16 opes; 220 }; 221 222 static const struct nxp_c45_sa_regs rx_sa_a_regs = { 223 .cs = MACSEC_RXSA_A_CS, 224 .npn = MACSEC_RXSA_A_NPN, 225 .xnpn = MACSEC_RXSA_A_XNPN, 226 .lnpn = MACSEC_RXSA_A_LNPN, 227 .lxnpn = MACSEC_RXSA_A_LXNPN, 228 .ka = MACSEC_RXSA_A_KA, 229 .ssci = MACSEC_RXSA_A_SSCI, 230 .salt = MACSEC_RXSA_A_SALT, 231 .ipis = MACSEC_RXSA_A_IPIS, 232 .ipnvs = MACSEC_RXSA_A_IPNVS, 233 .ipos = MACSEC_RXSA_A_IPOS, 234 }; 235 236 static const struct nxp_c45_sa_regs rx_sa_b_regs = { 237 .cs = MACSEC_RXSA_B_CS, 238 .npn = MACSEC_RXSA_B_NPN, 239 .xnpn = MACSEC_RXSA_B_XNPN, 240 .lnpn = MACSEC_RXSA_B_LNPN, 241 .lxnpn = MACSEC_RXSA_B_LXNPN, 242 .ka = MACSEC_RXSA_B_KA, 243 .ssci = MACSEC_RXSA_B_SSCI, 244 .salt = MACSEC_RXSA_B_SALT, 245 .ipis = MACSEC_RXSA_B_IPIS, 246 .ipnvs = MACSEC_RXSA_B_IPNVS, 247 .ipos = MACSEC_RXSA_B_IPOS, 248 }; 249 250 static const struct nxp_c45_sa_regs tx_sa_a_regs = { 251 .cs = MACSEC_TXSA_A_CS, 252 .npn = MACSEC_TXSA_A_NPN, 253 .xnpn = MACSEC_TXSA_A_XNPN, 254 .ka = MACSEC_TXSA_A_KA, 255 .ssci = MACSEC_TXSA_A_SSCI, 256 .salt = MACSEC_TXSA_A_SALT, 257 .opps = MACSEC_TXSA_A_OPPS, 258 .opes = MACSEC_TXSA_A_OPES, 259 }; 260 261 static const struct nxp_c45_sa_regs tx_sa_b_regs = { 262 .cs = MACSEC_TXSA_B_CS, 263 .npn = MACSEC_TXSA_B_NPN, 264 .xnpn = MACSEC_TXSA_B_XNPN, 265 .ka = MACSEC_TXSA_B_KA, 266 .ssci = MACSEC_TXSA_B_SSCI, 267 .salt = MACSEC_TXSA_B_SALT, 268 .opps = MACSEC_TXSA_B_OPPS, 269 .opes = MACSEC_TXSA_B_OPES, 270 }; 271 272 static const 273 struct nxp_c45_sa_regs *nxp_c45_sa_regs_get(enum nxp_c45_sa_type sa_type, 274 bool key_a) 275 { 276 if (sa_type == RX_SA) 277 if (key_a) 278 return &rx_sa_a_regs; 279 else 280 return &rx_sa_b_regs; 281 else if (sa_type == TX_SA) 282 if (key_a) 283 return &tx_sa_a_regs; 284 else 285 return &tx_sa_b_regs; 286 else 287 return NULL; 288 } 289 290 static int nxp_c45_macsec_write(struct phy_device *phydev, u16 addr, u32 value) 291 { 292 u32 lvalue = value; 293 u16 laddr; 294 int ret; 295 296 WARN_ON_ONCE(addr % 4); 297 298 phydev_dbg(phydev, "write addr 0x%x value 0x%x\n", addr, value); 299 300 laddr = VEND1_MACSEC_BASE + addr / 2; 301 ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, laddr, lvalue); 302 if (ret) 303 return ret; 304 305 laddr += 1; 306 lvalue >>= 16; 307 ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, laddr, lvalue); 308 309 return ret; 310 } 311 312 static int nxp_c45_macsec_read(struct phy_device *phydev, u16 addr, u32 *value) 313 { 314 u32 lvalue; 315 u16 laddr; 316 int ret; 317 318 WARN_ON_ONCE(addr % 4); 319 320 laddr = VEND1_MACSEC_BASE + addr / 2; 321 ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, laddr); 322 if (ret < 0) 323 return ret; 324 325 laddr += 1; 326 lvalue = (u32)ret & 0xffff; 327 ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, laddr); 328 if (ret < 0) 329 return ret; 330 331 lvalue |= (u32)ret << 16; 332 *value = lvalue; 333 334 phydev_dbg(phydev, "read addr 0x%x value 0x%x\n", addr, *value); 335 336 return 0; 337 } 338 339 static void nxp_c45_macsec_read32_64(struct phy_device *phydev, u16 addr, 340 u64 *value) 341 { 342 u32 lvalue; 343 344 nxp_c45_macsec_read(phydev, addr, &lvalue); 345 *value = lvalue; 346 } 347 348 static void nxp_c45_macsec_read64(struct phy_device *phydev, u16 addr, 349 u64 *value) 350 { 351 u32 lvalue; 352 353 nxp_c45_macsec_read(phydev, addr, &lvalue); 354 *value = (u64)lvalue << 32; 355 nxp_c45_macsec_read(phydev, addr + 4, &lvalue); 356 *value |= lvalue; 357 } 358 359 static void nxp_c45_secy_irq_en(struct phy_device *phydev, 360 struct nxp_c45_secy *phy_secy, bool en) 361 { 362 u32 reg; 363 364 nxp_c45_macsec_read(phydev, MACSEC_EVER, ®); 365 if (en) 366 reg |= TX_SC_BIT(phy_secy->secy_id); 367 else 368 reg &= ~TX_SC_BIT(phy_secy->secy_id); 369 nxp_c45_macsec_write(phydev, MACSEC_EVER, reg); 370 } 371 372 static struct nxp_c45_secy *nxp_c45_find_secy(struct list_head *secy_list, 373 sci_t sci) 374 { 375 struct nxp_c45_secy *pos, *tmp; 376 377 list_for_each_entry_safe(pos, tmp, secy_list, list) 378 if (pos->secy->sci == sci) 379 return pos; 380 381 return ERR_PTR(-EINVAL); 382 } 383 384 static struct 385 nxp_c45_secy *nxp_c45_find_secy_by_id(struct list_head *secy_list, 386 int id) 387 { 388 struct nxp_c45_secy *pos, *tmp; 389 390 list_for_each_entry_safe(pos, tmp, secy_list, list) 391 if (pos->secy_id == id) 392 return pos; 393 394 return ERR_PTR(-EINVAL); 395 } 396 397 static void nxp_c45_secy_free(struct nxp_c45_secy *phy_secy) 398 { 399 list_del(&phy_secy->list); 400 kfree(phy_secy); 401 } 402 403 static struct nxp_c45_sa *nxp_c45_find_sa(struct list_head *sa_list, 404 enum nxp_c45_sa_type sa_type, u8 an) 405 { 406 struct nxp_c45_sa *pos, *tmp; 407 408 list_for_each_entry_safe(pos, tmp, sa_list, list) 409 if (pos->an == an && pos->type == sa_type) 410 return pos; 411 412 return ERR_PTR(-EINVAL); 413 } 414 415 static struct nxp_c45_sa *nxp_c45_sa_alloc(struct list_head *sa_list, void *sa, 416 enum nxp_c45_sa_type sa_type, u8 an) 417 { 418 struct nxp_c45_sa *first = NULL, *pos, *tmp; 419 int occurrences = 0; 420 421 list_for_each_entry_safe(pos, tmp, sa_list, list) { 422 if (pos->type != sa_type) 423 continue; 424 425 if (pos->an == an) 426 return ERR_PTR(-EINVAL); 427 428 first = pos; 429 occurrences++; 430 if (occurrences >= 2) 431 return ERR_PTR(-ENOSPC); 432 } 433 434 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 435 if (!tmp) 436 return ERR_PTR(-ENOMEM); 437 438 if (first) 439 tmp->is_key_a = !first->is_key_a; 440 else 441 tmp->is_key_a = true; 442 443 tmp->sa = sa; 444 tmp->type = sa_type; 445 tmp->an = an; 446 tmp->regs = nxp_c45_sa_regs_get(tmp->type, tmp->is_key_a); 447 list_add_tail(&tmp->list, sa_list); 448 449 return tmp; 450 } 451 452 static void nxp_c45_sa_free(struct nxp_c45_sa *sa) 453 { 454 list_del(&sa->list); 455 kfree(sa); 456 } 457 458 static void nxp_c45_sa_list_free(struct list_head *sa_list) 459 { 460 struct nxp_c45_sa *pos, *tmp; 461 462 list_for_each_entry_safe(pos, tmp, sa_list, list) 463 nxp_c45_sa_free(pos); 464 } 465 466 static void nxp_c45_sa_set_pn(struct phy_device *phydev, 467 struct nxp_c45_sa *sa, u64 pn, 468 u32 replay_window) 469 { 470 const struct nxp_c45_sa_regs *sa_regs = sa->regs; 471 pn_t npn = {.full64 = pn}; 472 pn_t lnpn; 473 474 nxp_c45_macsec_write(phydev, sa_regs->npn, npn.lower); 475 nxp_c45_macsec_write(phydev, sa_regs->xnpn, npn.upper); 476 if (sa->type != RX_SA) 477 return; 478 479 if (pn > replay_window) 480 lnpn.full64 = pn - replay_window; 481 else 482 lnpn.full64 = 1; 483 484 nxp_c45_macsec_write(phydev, sa_regs->lnpn, lnpn.lower); 485 nxp_c45_macsec_write(phydev, sa_regs->lxnpn, lnpn.upper); 486 } 487 488 static void nxp_c45_sa_set_key(struct macsec_context *ctx, 489 const struct nxp_c45_sa_regs *sa_regs, 490 u8 *salt, ssci_t ssci) 491 { 492 struct phy_device *phydev = ctx->phydev; 493 u32 key_size = ctx->secy->key_len / 4; 494 u32 salt_size = MACSEC_SALT_LEN / 4; 495 u32 *key_u32 = (u32 *)ctx->sa.key; 496 u32 *salt_u32 = (u32 *)salt; 497 u32 reg, value; 498 int i; 499 500 for (i = 0; i < key_size; i++) { 501 reg = sa_regs->ka + i * 4; 502 value = (__force u32)cpu_to_be32(key_u32[i]); 503 nxp_c45_macsec_write(phydev, reg, value); 504 } 505 506 if (ctx->secy->xpn) { 507 for (i = 0; i < salt_size; i++) { 508 reg = sa_regs->salt + (2 - i) * 4; 509 value = (__force u32)cpu_to_be32(salt_u32[i]); 510 nxp_c45_macsec_write(phydev, reg, value); 511 } 512 513 value = (__force u32)cpu_to_be32((__force u32)ssci); 514 nxp_c45_macsec_write(phydev, sa_regs->ssci, value); 515 } 516 517 nxp_c45_macsec_write(phydev, sa_regs->cs, MACSEC_SA_CS_A); 518 } 519 520 static void nxp_c45_rx_sa_clear_stats(struct phy_device *phydev, 521 struct nxp_c45_sa *sa) 522 { 523 nxp_c45_macsec_write(phydev, sa->regs->ipis, 0); 524 nxp_c45_macsec_write(phydev, sa->regs->ipnvs, 0); 525 nxp_c45_macsec_write(phydev, sa->regs->ipos, 0); 526 527 nxp_c45_macsec_write(phydev, MACSEC_RXAN0INUSS + sa->an * 4, 0); 528 nxp_c45_macsec_write(phydev, MACSEC_RXAN0IPUSS + sa->an * 4, 0); 529 } 530 531 static void nxp_c45_rx_sa_read_stats(struct phy_device *phydev, 532 struct nxp_c45_sa *sa, 533 struct macsec_rx_sa_stats *stats) 534 { 535 nxp_c45_macsec_read(phydev, sa->regs->ipis, &stats->InPktsInvalid); 536 nxp_c45_macsec_read(phydev, sa->regs->ipnvs, &stats->InPktsNotValid); 537 nxp_c45_macsec_read(phydev, sa->regs->ipos, &stats->InPktsOK); 538 } 539 540 static void nxp_c45_tx_sa_clear_stats(struct phy_device *phydev, 541 struct nxp_c45_sa *sa) 542 { 543 nxp_c45_macsec_write(phydev, sa->regs->opps, 0); 544 nxp_c45_macsec_write(phydev, sa->regs->opes, 0); 545 } 546 547 static void nxp_c45_tx_sa_read_stats(struct phy_device *phydev, 548 struct nxp_c45_sa *sa, 549 struct macsec_tx_sa_stats *stats) 550 { 551 nxp_c45_macsec_read(phydev, sa->regs->opps, &stats->OutPktsProtected); 552 nxp_c45_macsec_read(phydev, sa->regs->opes, &stats->OutPktsEncrypted); 553 } 554 555 static void nxp_c45_rx_sa_update(struct phy_device *phydev, 556 struct nxp_c45_sa *sa, bool en) 557 { 558 const struct nxp_c45_sa_regs *sa_regs = sa->regs; 559 u32 cfg; 560 561 cfg = sa->an << MACSEC_RXSA_CS_AN_OFF; 562 cfg |= en ? MACSEC_RXSA_CS_EN : 0; 563 nxp_c45_macsec_write(phydev, sa_regs->cs, cfg); 564 } 565 566 static void nxp_c45_tx_sa_update(struct phy_device *phydev, 567 struct nxp_c45_sa *sa, bool en) 568 { 569 u32 cfg = 0; 570 571 nxp_c45_macsec_read(phydev, MACSEC_TXSC_CFG, &cfg); 572 573 cfg &= ~MACSEC_TXSC_CFG_AN_MASK; 574 cfg |= sa->an << MACSEC_TXSC_CFG_AN_OFF; 575 576 if (sa->is_key_a) 577 cfg &= ~MACSEC_TXSC_CFG_ASA; 578 else 579 cfg |= MACSEC_TXSC_CFG_ASA; 580 581 if (en) 582 cfg |= MACSEC_TXSC_CFG_SCE; 583 else 584 cfg &= ~MACSEC_TXSC_CFG_SCE; 585 586 nxp_c45_macsec_write(phydev, MACSEC_TXSC_CFG, cfg); 587 } 588 589 static void nxp_c45_set_sci(struct phy_device *phydev, u16 sci_base_addr, 590 sci_t sci) 591 { 592 u64 lsci = sci_to_cpu(sci); 593 594 nxp_c45_macsec_write(phydev, sci_base_addr, lsci >> 32); 595 nxp_c45_macsec_write(phydev, sci_base_addr + 4, lsci); 596 } 597 598 static bool nxp_c45_port_is_1(sci_t sci) 599 { 600 u16 port = sci_to_cpu(sci); 601 602 return port == 1; 603 } 604 605 static void nxp_c45_select_secy(struct phy_device *phydev, u8 id) 606 { 607 nxp_c45_macsec_write(phydev, MACSEC_RXSCA, id); 608 nxp_c45_macsec_write(phydev, MACSEC_RXSCKA, id); 609 nxp_c45_macsec_write(phydev, MACSEC_TXSCA, id); 610 nxp_c45_macsec_write(phydev, MACSEC_TXSCKA, id); 611 } 612 613 static bool nxp_c45_secy_valid(struct nxp_c45_secy *phy_secy, 614 bool can_rx_sc0_impl) 615 { 616 bool end_station = phy_secy->secy->tx_sc.end_station; 617 bool scb = phy_secy->secy->tx_sc.scb; 618 619 phy_secy->rx_sc0_impl = false; 620 621 if (end_station) { 622 if (!nxp_c45_port_is_1(phy_secy->secy->sci)) 623 return false; 624 if (!phy_secy->rx_sc) 625 return true; 626 return nxp_c45_port_is_1(phy_secy->rx_sc->sci); 627 } 628 629 if (scb) 630 return false; 631 632 if (!can_rx_sc0_impl) 633 return false; 634 635 if (phy_secy->secy_id != 0) 636 return false; 637 638 phy_secy->rx_sc0_impl = true; 639 640 return true; 641 } 642 643 static bool nxp_c45_rx_sc0_impl(struct nxp_c45_secy *phy_secy) 644 { 645 bool end_station = phy_secy->secy->tx_sc.end_station; 646 bool send_sci = phy_secy->secy->tx_sc.send_sci; 647 bool scb = phy_secy->secy->tx_sc.scb; 648 649 return !end_station && !send_sci && !scb; 650 } 651 652 static bool nxp_c45_mac_addr_free(struct macsec_context *ctx) 653 { 654 struct nxp_c45_phy *priv = ctx->phydev->priv; 655 struct nxp_c45_secy *pos, *tmp; 656 657 list_for_each_entry_safe(pos, tmp, &priv->macsec->secy_list, list) { 658 if (pos->secy == ctx->secy) 659 continue; 660 661 if (memcmp(pos->secy->netdev->dev_addr, 662 ctx->secy->netdev->dev_addr, ETH_ALEN) == 0) 663 return false; 664 } 665 666 return true; 667 } 668 669 static void nxp_c45_tx_sc_en_flt(struct phy_device *phydev, int secy_id, 670 bool en) 671 { 672 u32 tx_flt_base = TX_FLT_BASE(secy_id); 673 u32 reg = 0; 674 675 nxp_c45_macsec_read(phydev, TX_SC_FLT_MAC_CFG(tx_flt_base), ®); 676 if (en) 677 reg |= TX_SC_FLT_EN; 678 else 679 reg &= ~TX_SC_FLT_EN; 680 nxp_c45_macsec_write(phydev, TX_SC_FLT_MAC_CFG(tx_flt_base), reg); 681 } 682 683 static void nxp_c45_tx_sc_set_flt(struct phy_device *phydev, 684 struct nxp_c45_secy *phy_secy) 685 { 686 const u8 *dev_addr = phy_secy->secy->netdev->dev_addr; 687 u32 tx_flt_base = TX_FLT_BASE(phy_secy->secy_id); 688 u32 reg; 689 690 reg = dev_addr[0] << 8 | dev_addr[1]; 691 nxp_c45_macsec_write(phydev, TX_SC_FLT_MAC_DA_SA(tx_flt_base), reg); 692 reg = dev_addr[5] | dev_addr[4] << 8 | dev_addr[3] << 16 | 693 dev_addr[2] << 24; 694 695 nxp_c45_macsec_write(phydev, TX_SC_FLT_MAC_SA(tx_flt_base), reg); 696 nxp_c45_macsec_read(phydev, TX_SC_FLT_MAC_CFG(tx_flt_base), ®); 697 reg &= TX_SC_FLT_EN; 698 reg |= TX_SC_FLT_BY_SA | phy_secy->secy_id; 699 nxp_c45_macsec_write(phydev, TX_SC_FLT_MAC_CFG(tx_flt_base), reg); 700 } 701 702 static void nxp_c45_tx_sc_update(struct phy_device *phydev, 703 struct nxp_c45_secy *phy_secy) 704 { 705 u32 cfg = 0; 706 707 nxp_c45_macsec_read(phydev, MACSEC_TXSC_CFG, &cfg); 708 709 phydev_dbg(phydev, "XPN %s\n", phy_secy->secy->xpn ? "on" : "off"); 710 if (phy_secy->secy->xpn) 711 cfg |= MACSEC_TXSC_CFG_XPN; 712 else 713 cfg &= ~MACSEC_TXSC_CFG_XPN; 714 715 phydev_dbg(phydev, "key len %u\n", phy_secy->secy->key_len); 716 if (phy_secy->secy->key_len == 32) 717 cfg |= MACSEC_TXSC_CFG_AES_256; 718 else 719 cfg &= ~MACSEC_TXSC_CFG_AES_256; 720 721 phydev_dbg(phydev, "encryption %s\n", 722 phy_secy->secy->tx_sc.encrypt ? "on" : "off"); 723 if (phy_secy->secy->tx_sc.encrypt) 724 cfg |= MACSEC_TXSC_CFG_ENCRYPT; 725 else 726 cfg &= ~MACSEC_TXSC_CFG_ENCRYPT; 727 728 phydev_dbg(phydev, "protect frames %s\n", 729 phy_secy->secy->protect_frames ? "on" : "off"); 730 if (phy_secy->secy->protect_frames) 731 cfg |= MACSEC_TXSC_CFG_PROTECT; 732 else 733 cfg &= ~MACSEC_TXSC_CFG_PROTECT; 734 735 phydev_dbg(phydev, "send sci %s\n", 736 phy_secy->secy->tx_sc.send_sci ? "on" : "off"); 737 if (phy_secy->secy->tx_sc.send_sci) 738 cfg |= MACSEC_TXSC_CFG_SEND_SCI; 739 else 740 cfg &= ~MACSEC_TXSC_CFG_SEND_SCI; 741 742 phydev_dbg(phydev, "end station %s\n", 743 phy_secy->secy->tx_sc.end_station ? "on" : "off"); 744 if (phy_secy->secy->tx_sc.end_station) 745 cfg |= MACSEC_TXSC_CFG_END_STATION; 746 else 747 cfg &= ~MACSEC_TXSC_CFG_END_STATION; 748 749 phydev_dbg(phydev, "scb %s\n", 750 phy_secy->secy->tx_sc.scb ? "on" : "off"); 751 if (phy_secy->secy->tx_sc.scb) 752 cfg |= MACSEC_TXSC_CFG_SCB; 753 else 754 cfg &= ~MACSEC_TXSC_CFG_SCB; 755 756 nxp_c45_macsec_write(phydev, MACSEC_TXSC_CFG, cfg); 757 } 758 759 static void nxp_c45_tx_sc_clear_stats(struct phy_device *phydev, 760 struct nxp_c45_secy *phy_secy) 761 { 762 struct nxp_c45_sa *pos, *tmp; 763 764 list_for_each_entry_safe(pos, tmp, &phy_secy->sa_list, list) 765 if (pos->type == TX_SA) 766 nxp_c45_tx_sa_clear_stats(phydev, pos); 767 768 nxp_c45_macsec_write(phydev, MACSEC_OPUS, 0); 769 nxp_c45_macsec_write(phydev, MACSEC_OPTLS, 0); 770 nxp_c45_macsec_write(phydev, MACSEC_OOP1HS, 0); 771 nxp_c45_macsec_write(phydev, MACSEC_OOP2HS, 0); 772 nxp_c45_macsec_write(phydev, MACSEC_OOE1HS, 0); 773 nxp_c45_macsec_write(phydev, MACSEC_OOE2HS, 0); 774 } 775 776 static void nxp_c45_set_rx_sc0_impl(struct phy_device *phydev, 777 bool enable) 778 { 779 u32 reg = 0; 780 781 nxp_c45_macsec_read(phydev, MACSEC_CFG, ®); 782 if (enable) 783 reg |= MACSEC_CFG_S0I; 784 else 785 reg &= ~MACSEC_CFG_S0I; 786 nxp_c45_macsec_write(phydev, MACSEC_CFG, reg); 787 } 788 789 static bool nxp_c45_is_rx_sc0_impl(struct list_head *secy_list) 790 { 791 struct nxp_c45_secy *pos, *tmp; 792 793 list_for_each_entry_safe(pos, tmp, secy_list, list) 794 if (pos->rx_sc0_impl) 795 return pos->rx_sc0_impl; 796 797 return false; 798 } 799 800 static void nxp_c45_rx_sc_en(struct phy_device *phydev, 801 struct macsec_rx_sc *rx_sc, bool en) 802 { 803 u32 reg = 0; 804 805 nxp_c45_macsec_read(phydev, MACSEC_RXSC_CFG, ®); 806 if (rx_sc->active && en) 807 reg |= MACSEC_RXSC_CFG_SCI_EN; 808 else 809 reg &= ~MACSEC_RXSC_CFG_SCI_EN; 810 nxp_c45_macsec_write(phydev, MACSEC_RXSC_CFG, reg); 811 } 812 813 static void nxp_c45_rx_sc_update(struct phy_device *phydev, 814 struct nxp_c45_secy *phy_secy) 815 { 816 struct macsec_rx_sc *rx_sc = phy_secy->rx_sc; 817 struct nxp_c45_phy *priv = phydev->priv; 818 u32 cfg = 0; 819 820 nxp_c45_macsec_read(phydev, MACSEC_RXSC_CFG, &cfg); 821 cfg &= ~MACSEC_RXSC_CFG_VF_MASK; 822 cfg = phy_secy->secy->validate_frames << MACSEC_RXSC_CFG_VF_OFF; 823 824 phydev_dbg(phydev, "validate frames %u\n", 825 phy_secy->secy->validate_frames); 826 phydev_dbg(phydev, "replay_protect %s window %u\n", 827 phy_secy->secy->replay_protect ? "on" : "off", 828 phy_secy->secy->replay_window); 829 if (phy_secy->secy->replay_protect) { 830 cfg |= MACSEC_RXSC_CFG_RP; 831 nxp_c45_macsec_write(phydev, MACSEC_RPW, 832 phy_secy->secy->replay_window); 833 } else { 834 cfg &= ~MACSEC_RXSC_CFG_RP; 835 } 836 837 phydev_dbg(phydev, "rx_sc->active %s\n", 838 rx_sc->active ? "on" : "off"); 839 if (rx_sc->active && 840 test_bit(phy_secy->secy_id, priv->macsec->secy_bitmap)) 841 cfg |= MACSEC_RXSC_CFG_SCI_EN; 842 else 843 cfg &= ~MACSEC_RXSC_CFG_SCI_EN; 844 845 phydev_dbg(phydev, "key len %u\n", phy_secy->secy->key_len); 846 if (phy_secy->secy->key_len == 32) 847 cfg |= MACSEC_RXSC_CFG_AES_256; 848 else 849 cfg &= ~MACSEC_RXSC_CFG_AES_256; 850 851 phydev_dbg(phydev, "XPN %s\n", phy_secy->secy->xpn ? "on" : "off"); 852 if (phy_secy->secy->xpn) 853 cfg |= MACSEC_RXSC_CFG_XPN; 854 else 855 cfg &= ~MACSEC_RXSC_CFG_XPN; 856 857 nxp_c45_macsec_write(phydev, MACSEC_RXSC_CFG, cfg); 858 } 859 860 static void nxp_c45_rx_sc_clear_stats(struct phy_device *phydev, 861 struct nxp_c45_secy *phy_secy) 862 { 863 struct nxp_c45_sa *pos, *tmp; 864 int i; 865 866 list_for_each_entry_safe(pos, tmp, &phy_secy->sa_list, list) 867 if (pos->type == RX_SA) 868 nxp_c45_rx_sa_clear_stats(phydev, pos); 869 870 nxp_c45_macsec_write(phydev, MACSEC_INOD1HS, 0); 871 nxp_c45_macsec_write(phydev, MACSEC_INOD2HS, 0); 872 873 nxp_c45_macsec_write(phydev, MACSEC_INOV1HS, 0); 874 nxp_c45_macsec_write(phydev, MACSEC_INOV2HS, 0); 875 876 nxp_c45_macsec_write(phydev, MACSEC_RXSCIPDS, 0); 877 nxp_c45_macsec_write(phydev, MACSEC_RXSCIPLS, 0); 878 nxp_c45_macsec_write(phydev, MACSEC_RXSCIPUS, 0); 879 880 for (i = 0; i < MACSEC_NUM_AN; i++) { 881 nxp_c45_macsec_write(phydev, MACSEC_RXAN0INUSS + i * 4, 0); 882 nxp_c45_macsec_write(phydev, MACSEC_RXAN0IPUSS + i * 4, 0); 883 } 884 } 885 886 static void nxp_c45_rx_sc_del(struct phy_device *phydev, 887 struct nxp_c45_secy *phy_secy) 888 { 889 struct nxp_c45_sa *pos, *tmp; 890 891 nxp_c45_macsec_write(phydev, MACSEC_RXSC_CFG, 0); 892 nxp_c45_macsec_write(phydev, MACSEC_RPW, 0); 893 nxp_c45_set_sci(phydev, MACSEC_RXSC_SCI_1H, 0); 894 895 nxp_c45_rx_sc_clear_stats(phydev, phy_secy); 896 897 list_for_each_entry_safe(pos, tmp, &phy_secy->sa_list, list) { 898 if (pos->type == RX_SA) { 899 nxp_c45_rx_sa_update(phydev, pos, false); 900 nxp_c45_sa_free(pos); 901 } 902 } 903 } 904 905 static void nxp_c45_clear_global_stats(struct phy_device *phydev) 906 { 907 nxp_c45_macsec_write(phydev, MACSEC_INPBTS, 0); 908 nxp_c45_macsec_write(phydev, MACSEC_INPWTS, 0); 909 nxp_c45_macsec_write(phydev, MACSEC_IPSNFS, 0); 910 } 911 912 static void nxp_c45_macsec_en(struct phy_device *phydev, bool en) 913 { 914 u32 reg; 915 916 nxp_c45_macsec_read(phydev, MACSEC_CFG, ®); 917 if (en) 918 reg |= MACSEC_CFG_BYPASS; 919 else 920 reg &= ~MACSEC_CFG_BYPASS; 921 nxp_c45_macsec_write(phydev, MACSEC_CFG, reg); 922 } 923 924 static int nxp_c45_mdo_dev_open(struct macsec_context *ctx) 925 { 926 struct phy_device *phydev = ctx->phydev; 927 struct nxp_c45_phy *priv = phydev->priv; 928 struct nxp_c45_secy *phy_secy; 929 930 phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci); 931 if (IS_ERR(phy_secy)) 932 return PTR_ERR(phy_secy); 933 934 nxp_c45_select_secy(phydev, phy_secy->secy_id); 935 936 nxp_c45_tx_sc_en_flt(phydev, phy_secy->secy_id, true); 937 nxp_c45_set_rx_sc0_impl(phydev, phy_secy->rx_sc0_impl); 938 if (phy_secy->rx_sc) 939 nxp_c45_rx_sc_en(phydev, phy_secy->rx_sc, true); 940 941 if (bitmap_empty(priv->macsec->secy_bitmap, TX_SC_MAX)) 942 nxp_c45_macsec_en(phydev, true); 943 944 set_bit(phy_secy->secy_id, priv->macsec->secy_bitmap); 945 946 return 0; 947 } 948 949 static int nxp_c45_mdo_dev_stop(struct macsec_context *ctx) 950 { 951 struct phy_device *phydev = ctx->phydev; 952 struct nxp_c45_phy *priv = phydev->priv; 953 struct nxp_c45_secy *phy_secy; 954 955 phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci); 956 if (IS_ERR(phy_secy)) 957 return PTR_ERR(phy_secy); 958 959 nxp_c45_select_secy(phydev, phy_secy->secy_id); 960 961 nxp_c45_tx_sc_en_flt(phydev, phy_secy->secy_id, false); 962 if (phy_secy->rx_sc) 963 nxp_c45_rx_sc_en(phydev, phy_secy->rx_sc, false); 964 nxp_c45_set_rx_sc0_impl(phydev, false); 965 966 clear_bit(phy_secy->secy_id, priv->macsec->secy_bitmap); 967 if (bitmap_empty(priv->macsec->secy_bitmap, TX_SC_MAX)) 968 nxp_c45_macsec_en(phydev, false); 969 970 return 0; 971 } 972 973 static int nxp_c45_mdo_add_secy(struct macsec_context *ctx) 974 { 975 struct phy_device *phydev = ctx->phydev; 976 struct nxp_c45_phy *priv = phydev->priv; 977 struct nxp_c45_secy *phy_secy; 978 bool can_rx_sc0_impl; 979 int idx; 980 981 phydev_dbg(phydev, "add SecY SCI %016llx\n", 982 sci_to_cpu(ctx->secy->sci)); 983 984 if (!nxp_c45_mac_addr_free(ctx)) 985 return -EBUSY; 986 987 if (nxp_c45_is_rx_sc0_impl(&priv->macsec->secy_list)) 988 return -EBUSY; 989 990 idx = find_first_zero_bit(priv->macsec->tx_sc_bitmap, TX_SC_MAX); 991 if (idx == TX_SC_MAX) 992 return -ENOSPC; 993 994 phy_secy = kzalloc(sizeof(*phy_secy), GFP_KERNEL); 995 if (!phy_secy) 996 return -ENOMEM; 997 998 INIT_LIST_HEAD(&phy_secy->sa_list); 999 phy_secy->secy = ctx->secy; 1000 phy_secy->secy_id = idx; 1001 1002 /* If the point to point mode should be enabled, we should have no 1003 * SecY added yet. 1004 */ 1005 can_rx_sc0_impl = list_count_nodes(&priv->macsec->secy_list) == 0; 1006 if (!nxp_c45_secy_valid(phy_secy, can_rx_sc0_impl)) { 1007 kfree(phy_secy); 1008 return -EINVAL; 1009 } 1010 1011 phy_secy->rx_sc0_impl = nxp_c45_rx_sc0_impl(phy_secy); 1012 1013 nxp_c45_select_secy(phydev, phy_secy->secy_id); 1014 nxp_c45_set_sci(phydev, MACSEC_TXSC_SCI_1H, ctx->secy->sci); 1015 nxp_c45_tx_sc_set_flt(phydev, phy_secy); 1016 nxp_c45_tx_sc_update(phydev, phy_secy); 1017 if (phy_interrupt_is_valid(phydev)) 1018 nxp_c45_secy_irq_en(phydev, phy_secy, true); 1019 1020 set_bit(idx, priv->macsec->tx_sc_bitmap); 1021 list_add_tail(&phy_secy->list, &priv->macsec->secy_list); 1022 1023 return 0; 1024 } 1025 1026 static void nxp_c45_tx_sa_next(struct nxp_c45_secy *phy_secy, 1027 struct nxp_c45_sa *next_sa, u8 encoding_sa) 1028 { 1029 struct nxp_c45_sa *sa; 1030 1031 sa = nxp_c45_find_sa(&phy_secy->sa_list, TX_SA, encoding_sa); 1032 if (!IS_ERR(sa)) { 1033 memcpy(next_sa, sa, sizeof(*sa)); 1034 } else { 1035 next_sa->is_key_a = true; 1036 next_sa->an = encoding_sa; 1037 } 1038 } 1039 1040 static int nxp_c45_mdo_upd_secy(struct macsec_context *ctx) 1041 { 1042 u8 encoding_sa = ctx->secy->tx_sc.encoding_sa; 1043 struct phy_device *phydev = ctx->phydev; 1044 struct nxp_c45_phy *priv = phydev->priv; 1045 struct nxp_c45_secy *phy_secy; 1046 struct nxp_c45_sa next_sa; 1047 bool can_rx_sc0_impl; 1048 1049 phydev_dbg(phydev, "update SecY SCI %016llx\n", 1050 sci_to_cpu(ctx->secy->sci)); 1051 1052 phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci); 1053 if (IS_ERR(phy_secy)) 1054 return PTR_ERR(phy_secy); 1055 1056 if (!nxp_c45_mac_addr_free(ctx)) 1057 return -EBUSY; 1058 1059 /* If the point to point mode should be enabled, we should have only 1060 * one SecY added, respectively the updated one. 1061 */ 1062 can_rx_sc0_impl = list_count_nodes(&priv->macsec->secy_list) == 1; 1063 if (!nxp_c45_secy_valid(phy_secy, can_rx_sc0_impl)) 1064 return -EINVAL; 1065 phy_secy->rx_sc0_impl = nxp_c45_rx_sc0_impl(phy_secy); 1066 1067 nxp_c45_select_secy(phydev, phy_secy->secy_id); 1068 nxp_c45_tx_sc_set_flt(phydev, phy_secy); 1069 nxp_c45_tx_sc_update(phydev, phy_secy); 1070 nxp_c45_tx_sa_next(phy_secy, &next_sa, encoding_sa); 1071 nxp_c45_tx_sa_update(phydev, &next_sa, ctx->secy->operational); 1072 1073 nxp_c45_set_rx_sc0_impl(phydev, phy_secy->rx_sc0_impl); 1074 if (phy_secy->rx_sc) 1075 nxp_c45_rx_sc_update(phydev, phy_secy); 1076 1077 return 0; 1078 } 1079 1080 static int nxp_c45_mdo_del_secy(struct macsec_context *ctx) 1081 { 1082 u8 encoding_sa = ctx->secy->tx_sc.encoding_sa; 1083 struct phy_device *phydev = ctx->phydev; 1084 struct nxp_c45_phy *priv = phydev->priv; 1085 struct nxp_c45_secy *phy_secy; 1086 struct nxp_c45_sa next_sa; 1087 1088 phydev_dbg(phydev, "delete SecY SCI %016llx\n", 1089 sci_to_cpu(ctx->secy->sci)); 1090 1091 phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci); 1092 if (IS_ERR(phy_secy)) 1093 return PTR_ERR(phy_secy); 1094 nxp_c45_select_secy(phydev, phy_secy->secy_id); 1095 1096 nxp_c45_mdo_dev_stop(ctx); 1097 nxp_c45_tx_sa_next(phy_secy, &next_sa, encoding_sa); 1098 nxp_c45_tx_sa_update(phydev, &next_sa, false); 1099 nxp_c45_tx_sc_clear_stats(phydev, phy_secy); 1100 if (phy_secy->rx_sc) 1101 nxp_c45_rx_sc_del(phydev, phy_secy); 1102 1103 nxp_c45_sa_list_free(&phy_secy->sa_list); 1104 if (phy_interrupt_is_valid(phydev)) 1105 nxp_c45_secy_irq_en(phydev, phy_secy, false); 1106 1107 clear_bit(phy_secy->secy_id, priv->macsec->tx_sc_bitmap); 1108 nxp_c45_secy_free(phy_secy); 1109 1110 if (list_empty(&priv->macsec->secy_list)) 1111 nxp_c45_clear_global_stats(phydev); 1112 1113 return 0; 1114 } 1115 1116 static int nxp_c45_mdo_add_rxsc(struct macsec_context *ctx) 1117 { 1118 struct phy_device *phydev = ctx->phydev; 1119 struct nxp_c45_phy *priv = phydev->priv; 1120 struct nxp_c45_secy *phy_secy; 1121 1122 phydev_dbg(phydev, "add RX SC SCI %016llx %s\n", 1123 sci_to_cpu(ctx->rx_sc->sci), 1124 ctx->rx_sc->active ? "enabled" : "disabled"); 1125 1126 phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci); 1127 if (IS_ERR(phy_secy)) 1128 return PTR_ERR(phy_secy); 1129 1130 if (phy_secy->rx_sc) 1131 return -ENOSPC; 1132 1133 if (phy_secy->secy->tx_sc.end_station && 1134 !nxp_c45_port_is_1(ctx->rx_sc->sci)) 1135 return -EINVAL; 1136 1137 phy_secy->rx_sc = ctx->rx_sc; 1138 1139 nxp_c45_select_secy(phydev, phy_secy->secy_id); 1140 nxp_c45_set_sci(phydev, MACSEC_RXSC_SCI_1H, ctx->rx_sc->sci); 1141 nxp_c45_rx_sc_update(phydev, phy_secy); 1142 1143 return 0; 1144 } 1145 1146 static int nxp_c45_mdo_upd_rxsc(struct macsec_context *ctx) 1147 { 1148 struct phy_device *phydev = ctx->phydev; 1149 struct nxp_c45_phy *priv = phydev->priv; 1150 struct nxp_c45_secy *phy_secy; 1151 1152 phydev_dbg(phydev, "update RX SC SCI %016llx %s\n", 1153 sci_to_cpu(ctx->rx_sc->sci), 1154 ctx->rx_sc->active ? "enabled" : "disabled"); 1155 1156 phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci); 1157 if (IS_ERR(phy_secy)) 1158 return PTR_ERR(phy_secy); 1159 1160 nxp_c45_select_secy(phydev, phy_secy->secy_id); 1161 nxp_c45_rx_sc_update(phydev, phy_secy); 1162 1163 return 0; 1164 } 1165 1166 static int nxp_c45_mdo_del_rxsc(struct macsec_context *ctx) 1167 { 1168 struct phy_device *phydev = ctx->phydev; 1169 struct nxp_c45_phy *priv = phydev->priv; 1170 struct nxp_c45_secy *phy_secy; 1171 1172 phydev_dbg(phydev, "delete RX SC SCI %016llx %s\n", 1173 sci_to_cpu(ctx->rx_sc->sci), 1174 ctx->rx_sc->active ? "enabled" : "disabled"); 1175 1176 phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci); 1177 if (IS_ERR(phy_secy)) 1178 return PTR_ERR(phy_secy); 1179 1180 nxp_c45_select_secy(phydev, phy_secy->secy_id); 1181 nxp_c45_rx_sc_del(phydev, phy_secy); 1182 phy_secy->rx_sc = NULL; 1183 1184 return 0; 1185 } 1186 1187 static int nxp_c45_mdo_add_rxsa(struct macsec_context *ctx) 1188 { 1189 struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa; 1190 struct phy_device *phydev = ctx->phydev; 1191 struct nxp_c45_phy *priv = phydev->priv; 1192 struct nxp_c45_secy *phy_secy; 1193 u8 an = ctx->sa.assoc_num; 1194 struct nxp_c45_sa *sa; 1195 1196 phydev_dbg(phydev, "add RX SA %u %s to RX SC SCI %016llx\n", 1197 an, rx_sa->active ? "enabled" : "disabled", 1198 sci_to_cpu(rx_sa->sc->sci)); 1199 1200 phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci); 1201 if (IS_ERR(phy_secy)) 1202 return PTR_ERR(phy_secy); 1203 1204 sa = nxp_c45_sa_alloc(&phy_secy->sa_list, rx_sa, RX_SA, an); 1205 if (IS_ERR(sa)) 1206 return PTR_ERR(sa); 1207 1208 nxp_c45_select_secy(phydev, phy_secy->secy_id); 1209 nxp_c45_sa_set_pn(phydev, sa, rx_sa->next_pn, 1210 ctx->secy->replay_window); 1211 nxp_c45_sa_set_key(ctx, sa->regs, rx_sa->key.salt.bytes, rx_sa->ssci); 1212 nxp_c45_rx_sa_update(phydev, sa, rx_sa->active); 1213 1214 return 0; 1215 } 1216 1217 static int nxp_c45_mdo_upd_rxsa(struct macsec_context *ctx) 1218 { 1219 struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa; 1220 struct phy_device *phydev = ctx->phydev; 1221 struct nxp_c45_phy *priv = phydev->priv; 1222 struct nxp_c45_secy *phy_secy; 1223 u8 an = ctx->sa.assoc_num; 1224 struct nxp_c45_sa *sa; 1225 1226 phydev_dbg(phydev, "update RX SA %u %s to RX SC SCI %016llx\n", 1227 an, rx_sa->active ? "enabled" : "disabled", 1228 sci_to_cpu(rx_sa->sc->sci)); 1229 1230 phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci); 1231 if (IS_ERR(phy_secy)) 1232 return PTR_ERR(phy_secy); 1233 1234 sa = nxp_c45_find_sa(&phy_secy->sa_list, RX_SA, an); 1235 if (IS_ERR(sa)) 1236 return PTR_ERR(sa); 1237 1238 nxp_c45_select_secy(phydev, phy_secy->secy_id); 1239 if (ctx->sa.update_pn) 1240 nxp_c45_sa_set_pn(phydev, sa, rx_sa->next_pn, 1241 ctx->secy->replay_window); 1242 nxp_c45_rx_sa_update(phydev, sa, rx_sa->active); 1243 1244 return 0; 1245 } 1246 1247 static int nxp_c45_mdo_del_rxsa(struct macsec_context *ctx) 1248 { 1249 struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa; 1250 struct phy_device *phydev = ctx->phydev; 1251 struct nxp_c45_phy *priv = phydev->priv; 1252 struct nxp_c45_secy *phy_secy; 1253 u8 an = ctx->sa.assoc_num; 1254 struct nxp_c45_sa *sa; 1255 1256 phydev_dbg(phydev, "delete RX SA %u %s to RX SC SCI %016llx\n", 1257 an, rx_sa->active ? "enabled" : "disabled", 1258 sci_to_cpu(rx_sa->sc->sci)); 1259 1260 phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci); 1261 if (IS_ERR(phy_secy)) 1262 return PTR_ERR(phy_secy); 1263 1264 sa = nxp_c45_find_sa(&phy_secy->sa_list, RX_SA, an); 1265 if (IS_ERR(sa)) 1266 return PTR_ERR(sa); 1267 1268 nxp_c45_select_secy(phydev, phy_secy->secy_id); 1269 nxp_c45_rx_sa_update(phydev, sa, false); 1270 nxp_c45_rx_sa_clear_stats(phydev, sa); 1271 1272 nxp_c45_sa_free(sa); 1273 1274 return 0; 1275 } 1276 1277 static int nxp_c45_mdo_add_txsa(struct macsec_context *ctx) 1278 { 1279 struct macsec_tx_sa *tx_sa = ctx->sa.tx_sa; 1280 struct phy_device *phydev = ctx->phydev; 1281 struct nxp_c45_phy *priv = phydev->priv; 1282 struct nxp_c45_secy *phy_secy; 1283 u8 an = ctx->sa.assoc_num; 1284 struct nxp_c45_sa *sa; 1285 1286 phydev_dbg(phydev, "add TX SA %u %s to TX SC %016llx\n", 1287 an, ctx->sa.tx_sa->active ? "enabled" : "disabled", 1288 sci_to_cpu(ctx->secy->sci)); 1289 1290 phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci); 1291 if (IS_ERR(phy_secy)) 1292 return PTR_ERR(phy_secy); 1293 1294 sa = nxp_c45_sa_alloc(&phy_secy->sa_list, tx_sa, TX_SA, an); 1295 if (IS_ERR(sa)) 1296 return PTR_ERR(sa); 1297 1298 nxp_c45_select_secy(phydev, phy_secy->secy_id); 1299 nxp_c45_sa_set_pn(phydev, sa, tx_sa->next_pn, 0); 1300 nxp_c45_sa_set_key(ctx, sa->regs, tx_sa->key.salt.bytes, tx_sa->ssci); 1301 if (ctx->secy->tx_sc.encoding_sa == sa->an) 1302 nxp_c45_tx_sa_update(phydev, sa, tx_sa->active); 1303 1304 return 0; 1305 } 1306 1307 static int nxp_c45_mdo_upd_txsa(struct macsec_context *ctx) 1308 { 1309 struct macsec_tx_sa *tx_sa = ctx->sa.tx_sa; 1310 struct phy_device *phydev = ctx->phydev; 1311 struct nxp_c45_phy *priv = phydev->priv; 1312 struct nxp_c45_secy *phy_secy; 1313 u8 an = ctx->sa.assoc_num; 1314 struct nxp_c45_sa *sa; 1315 1316 phydev_dbg(phydev, "update TX SA %u %s to TX SC %016llx\n", 1317 an, ctx->sa.tx_sa->active ? "enabled" : "disabled", 1318 sci_to_cpu(ctx->secy->sci)); 1319 1320 phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci); 1321 if (IS_ERR(phy_secy)) 1322 return PTR_ERR(phy_secy); 1323 1324 sa = nxp_c45_find_sa(&phy_secy->sa_list, TX_SA, an); 1325 if (IS_ERR(sa)) 1326 return PTR_ERR(sa); 1327 1328 nxp_c45_select_secy(phydev, phy_secy->secy_id); 1329 if (ctx->sa.update_pn) 1330 nxp_c45_sa_set_pn(phydev, sa, tx_sa->next_pn, 0); 1331 if (ctx->secy->tx_sc.encoding_sa == sa->an) 1332 nxp_c45_tx_sa_update(phydev, sa, tx_sa->active); 1333 1334 return 0; 1335 } 1336 1337 static int nxp_c45_mdo_del_txsa(struct macsec_context *ctx) 1338 { 1339 struct phy_device *phydev = ctx->phydev; 1340 struct nxp_c45_phy *priv = phydev->priv; 1341 struct nxp_c45_secy *phy_secy; 1342 u8 an = ctx->sa.assoc_num; 1343 struct nxp_c45_sa *sa; 1344 1345 phydev_dbg(phydev, "delete TX SA %u %s to TX SC %016llx\n", 1346 an, ctx->sa.tx_sa->active ? "enabled" : "disabled", 1347 sci_to_cpu(ctx->secy->sci)); 1348 1349 phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci); 1350 if (IS_ERR(phy_secy)) 1351 return PTR_ERR(phy_secy); 1352 1353 sa = nxp_c45_find_sa(&phy_secy->sa_list, TX_SA, an); 1354 if (IS_ERR(sa)) 1355 return PTR_ERR(sa); 1356 1357 nxp_c45_select_secy(phydev, phy_secy->secy_id); 1358 if (ctx->secy->tx_sc.encoding_sa == sa->an) 1359 nxp_c45_tx_sa_update(phydev, sa, false); 1360 nxp_c45_tx_sa_clear_stats(phydev, sa); 1361 1362 nxp_c45_sa_free(sa); 1363 1364 return 0; 1365 } 1366 1367 static int nxp_c45_mdo_get_dev_stats(struct macsec_context *ctx) 1368 { 1369 struct phy_device *phydev = ctx->phydev; 1370 struct nxp_c45_phy *priv = phydev->priv; 1371 struct macsec_dev_stats *dev_stats; 1372 struct nxp_c45_secy *phy_secy; 1373 1374 phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci); 1375 if (IS_ERR(phy_secy)) 1376 return PTR_ERR(phy_secy); 1377 1378 dev_stats = ctx->stats.dev_stats; 1379 nxp_c45_select_secy(phydev, phy_secy->secy_id); 1380 1381 nxp_c45_macsec_read32_64(phydev, MACSEC_OPUS, 1382 &dev_stats->OutPktsUntagged); 1383 nxp_c45_macsec_read32_64(phydev, MACSEC_OPTLS, 1384 &dev_stats->OutPktsTooLong); 1385 nxp_c45_macsec_read32_64(phydev, MACSEC_INPBTS, 1386 &dev_stats->InPktsBadTag); 1387 1388 if (phy_secy->secy->validate_frames == MACSEC_VALIDATE_STRICT) 1389 nxp_c45_macsec_read32_64(phydev, MACSEC_INPWTS, 1390 &dev_stats->InPktsNoTag); 1391 else 1392 nxp_c45_macsec_read32_64(phydev, MACSEC_INPWTS, 1393 &dev_stats->InPktsUntagged); 1394 1395 if (phy_secy->secy->validate_frames == MACSEC_VALIDATE_STRICT) 1396 nxp_c45_macsec_read32_64(phydev, MACSEC_IPSNFS, 1397 &dev_stats->InPktsNoSCI); 1398 else 1399 nxp_c45_macsec_read32_64(phydev, MACSEC_IPSNFS, 1400 &dev_stats->InPktsUnknownSCI); 1401 1402 /* Always 0. */ 1403 dev_stats->InPktsOverrun = 0; 1404 1405 return 0; 1406 } 1407 1408 static int nxp_c45_mdo_get_tx_sc_stats(struct macsec_context *ctx) 1409 { 1410 struct phy_device *phydev = ctx->phydev; 1411 struct nxp_c45_phy *priv = phydev->priv; 1412 struct macsec_tx_sa_stats tx_sa_stats; 1413 struct macsec_tx_sc_stats *stats; 1414 struct nxp_c45_secy *phy_secy; 1415 struct nxp_c45_sa *pos, *tmp; 1416 1417 phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci); 1418 if (IS_ERR(phy_secy)) 1419 return PTR_ERR(phy_secy); 1420 1421 stats = ctx->stats.tx_sc_stats; 1422 nxp_c45_select_secy(phydev, phy_secy->secy_id); 1423 1424 nxp_c45_macsec_read64(phydev, MACSEC_OOE1HS, 1425 &stats->OutOctetsEncrypted); 1426 nxp_c45_macsec_read64(phydev, MACSEC_OOP1HS, 1427 &stats->OutOctetsProtected); 1428 list_for_each_entry_safe(pos, tmp, &phy_secy->sa_list, list) { 1429 if (pos->type != TX_SA) 1430 continue; 1431 1432 memset(&tx_sa_stats, 0, sizeof(tx_sa_stats)); 1433 nxp_c45_tx_sa_read_stats(phydev, pos, &tx_sa_stats); 1434 1435 stats->OutPktsEncrypted += tx_sa_stats.OutPktsEncrypted; 1436 stats->OutPktsProtected += tx_sa_stats.OutPktsProtected; 1437 } 1438 1439 return 0; 1440 } 1441 1442 static int nxp_c45_mdo_get_tx_sa_stats(struct macsec_context *ctx) 1443 { 1444 struct phy_device *phydev = ctx->phydev; 1445 struct nxp_c45_phy *priv = phydev->priv; 1446 struct macsec_tx_sa_stats *stats; 1447 struct nxp_c45_secy *phy_secy; 1448 u8 an = ctx->sa.assoc_num; 1449 struct nxp_c45_sa *sa; 1450 1451 phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci); 1452 if (IS_ERR(phy_secy)) 1453 return PTR_ERR(phy_secy); 1454 1455 sa = nxp_c45_find_sa(&phy_secy->sa_list, TX_SA, an); 1456 if (IS_ERR(sa)) 1457 return PTR_ERR(sa); 1458 1459 stats = ctx->stats.tx_sa_stats; 1460 nxp_c45_select_secy(phydev, phy_secy->secy_id); 1461 nxp_c45_tx_sa_read_stats(phydev, sa, stats); 1462 1463 return 0; 1464 } 1465 1466 static int nxp_c45_mdo_get_rx_sc_stats(struct macsec_context *ctx) 1467 { 1468 struct phy_device *phydev = ctx->phydev; 1469 struct nxp_c45_phy *priv = phydev->priv; 1470 struct macsec_rx_sa_stats rx_sa_stats; 1471 struct macsec_rx_sc_stats *stats; 1472 struct nxp_c45_secy *phy_secy; 1473 struct nxp_c45_sa *pos, *tmp; 1474 u32 reg = 0; 1475 int i; 1476 1477 phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci); 1478 if (IS_ERR(phy_secy)) 1479 return PTR_ERR(phy_secy); 1480 1481 if (phy_secy->rx_sc != ctx->rx_sc) 1482 return -EINVAL; 1483 1484 stats = ctx->stats.rx_sc_stats; 1485 nxp_c45_select_secy(phydev, phy_secy->secy_id); 1486 1487 list_for_each_entry_safe(pos, tmp, &phy_secy->sa_list, list) { 1488 if (pos->type != RX_SA) 1489 continue; 1490 1491 memset(&rx_sa_stats, 0, sizeof(rx_sa_stats)); 1492 nxp_c45_rx_sa_read_stats(phydev, pos, &rx_sa_stats); 1493 1494 stats->InPktsInvalid += rx_sa_stats.InPktsInvalid; 1495 stats->InPktsNotValid += rx_sa_stats.InPktsNotValid; 1496 stats->InPktsOK += rx_sa_stats.InPktsOK; 1497 } 1498 1499 for (i = 0; i < MACSEC_NUM_AN; i++) { 1500 nxp_c45_macsec_read(phydev, MACSEC_RXAN0INUSS + i * 4, ®); 1501 stats->InPktsNotUsingSA += reg; 1502 nxp_c45_macsec_read(phydev, MACSEC_RXAN0IPUSS + i * 4, ®); 1503 stats->InPktsUnusedSA += reg; 1504 } 1505 1506 nxp_c45_macsec_read64(phydev, MACSEC_INOD1HS, 1507 &stats->InOctetsDecrypted); 1508 nxp_c45_macsec_read64(phydev, MACSEC_INOV1HS, 1509 &stats->InOctetsValidated); 1510 1511 nxp_c45_macsec_read32_64(phydev, MACSEC_RXSCIPDS, 1512 &stats->InPktsDelayed); 1513 nxp_c45_macsec_read32_64(phydev, MACSEC_RXSCIPLS, 1514 &stats->InPktsLate); 1515 nxp_c45_macsec_read32_64(phydev, MACSEC_RXSCIPUS, 1516 &stats->InPktsUnchecked); 1517 1518 return 0; 1519 } 1520 1521 static int nxp_c45_mdo_get_rx_sa_stats(struct macsec_context *ctx) 1522 { 1523 struct phy_device *phydev = ctx->phydev; 1524 struct nxp_c45_phy *priv = phydev->priv; 1525 struct macsec_rx_sa_stats *stats; 1526 struct nxp_c45_secy *phy_secy; 1527 u8 an = ctx->sa.assoc_num; 1528 struct nxp_c45_sa *sa; 1529 1530 phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci); 1531 if (IS_ERR(phy_secy)) 1532 return PTR_ERR(phy_secy); 1533 1534 sa = nxp_c45_find_sa(&phy_secy->sa_list, RX_SA, an); 1535 if (IS_ERR(sa)) 1536 return PTR_ERR(sa); 1537 1538 stats = ctx->stats.rx_sa_stats; 1539 nxp_c45_select_secy(phydev, phy_secy->secy_id); 1540 1541 nxp_c45_rx_sa_read_stats(phydev, sa, stats); 1542 nxp_c45_macsec_read(phydev, MACSEC_RXAN0INUSS + an * 4, 1543 &stats->InPktsNotUsingSA); 1544 nxp_c45_macsec_read(phydev, MACSEC_RXAN0IPUSS + an * 4, 1545 &stats->InPktsUnusedSA); 1546 1547 return 0; 1548 } 1549 1550 struct tja11xx_tlv_header { 1551 struct ethhdr eth; 1552 u8 subtype; 1553 u8 len; 1554 u8 payload[28]; 1555 }; 1556 1557 static int nxp_c45_mdo_insert_tx_tag(struct phy_device *phydev, 1558 struct sk_buff *skb) 1559 { 1560 struct tja11xx_tlv_header *tlv; 1561 struct ethhdr *eth; 1562 1563 eth = eth_hdr(skb); 1564 tlv = skb_push(skb, TJA11XX_TLV_TX_NEEDED_HEADROOM); 1565 memmove(tlv, eth, sizeof(*eth)); 1566 skb_reset_mac_header(skb); 1567 tlv->eth.h_proto = htons(ETH_P_TJA11XX_TLV); 1568 tlv->subtype = 1; 1569 tlv->len = sizeof(tlv->payload); 1570 memset(tlv->payload, 0, sizeof(tlv->payload)); 1571 1572 return 0; 1573 } 1574 1575 static const struct macsec_ops nxp_c45_macsec_ops = { 1576 .mdo_dev_open = nxp_c45_mdo_dev_open, 1577 .mdo_dev_stop = nxp_c45_mdo_dev_stop, 1578 .mdo_add_secy = nxp_c45_mdo_add_secy, 1579 .mdo_upd_secy = nxp_c45_mdo_upd_secy, 1580 .mdo_del_secy = nxp_c45_mdo_del_secy, 1581 .mdo_add_rxsc = nxp_c45_mdo_add_rxsc, 1582 .mdo_upd_rxsc = nxp_c45_mdo_upd_rxsc, 1583 .mdo_del_rxsc = nxp_c45_mdo_del_rxsc, 1584 .mdo_add_rxsa = nxp_c45_mdo_add_rxsa, 1585 .mdo_upd_rxsa = nxp_c45_mdo_upd_rxsa, 1586 .mdo_del_rxsa = nxp_c45_mdo_del_rxsa, 1587 .mdo_add_txsa = nxp_c45_mdo_add_txsa, 1588 .mdo_upd_txsa = nxp_c45_mdo_upd_txsa, 1589 .mdo_del_txsa = nxp_c45_mdo_del_txsa, 1590 .mdo_get_dev_stats = nxp_c45_mdo_get_dev_stats, 1591 .mdo_get_tx_sc_stats = nxp_c45_mdo_get_tx_sc_stats, 1592 .mdo_get_tx_sa_stats = nxp_c45_mdo_get_tx_sa_stats, 1593 .mdo_get_rx_sc_stats = nxp_c45_mdo_get_rx_sc_stats, 1594 .mdo_get_rx_sa_stats = nxp_c45_mdo_get_rx_sa_stats, 1595 .mdo_insert_tx_tag = nxp_c45_mdo_insert_tx_tag, 1596 .needed_headroom = TJA11XX_TLV_TX_NEEDED_HEADROOM, 1597 .needed_tailroom = TJA11XX_TLV_NEEDED_TAILROOM, 1598 }; 1599 1600 int nxp_c45_macsec_config_init(struct phy_device *phydev) 1601 { 1602 struct nxp_c45_phy *priv = phydev->priv; 1603 int ret; 1604 1605 if (!priv->macsec) 1606 return 0; 1607 1608 ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES, 1609 MACSEC_EN | ADAPTER_EN); 1610 if (ret) 1611 return ret; 1612 1613 ret = nxp_c45_macsec_write(phydev, ADPTR_CNTRL, ADPTR_CNTRL_CONFIG_EN | 1614 ADPTR_CNTRL_ADPTR_EN); 1615 if (ret) 1616 return ret; 1617 1618 ret = nxp_c45_macsec_write(phydev, ADPTR_TX_TAG_CNTRL, 1619 ADPTR_TX_TAG_CNTRL_ENA); 1620 if (ret) 1621 return ret; 1622 1623 ret = nxp_c45_macsec_write(phydev, ADPTR_CNTRL, ADPTR_CNTRL_ADPTR_EN); 1624 if (ret) 1625 return ret; 1626 1627 ret = nxp_c45_macsec_write(phydev, MACSEC_TPNET, PN_WRAP_THRESHOLD); 1628 if (ret) 1629 return ret; 1630 1631 /* Set MKA filter. */ 1632 ret = nxp_c45_macsec_write(phydev, MACSEC_UPFR0D2, ETH_P_PAE); 1633 if (ret) 1634 return ret; 1635 1636 ret = nxp_c45_macsec_write(phydev, MACSEC_UPFR0M1, MACSEC_OVP); 1637 if (ret) 1638 return ret; 1639 1640 ret = nxp_c45_macsec_write(phydev, MACSEC_UPFR0M2, ETYPE_MASK); 1641 if (ret) 1642 return ret; 1643 1644 ret = nxp_c45_macsec_write(phydev, MACSEC_UPFR0R, MACSEC_UPFR_EN); 1645 1646 return ret; 1647 } 1648 1649 int nxp_c45_macsec_probe(struct phy_device *phydev) 1650 { 1651 struct nxp_c45_phy *priv = phydev->priv; 1652 struct device *dev = &phydev->mdio.dev; 1653 1654 priv->macsec = devm_kzalloc(dev, sizeof(*priv->macsec), GFP_KERNEL); 1655 if (!priv->macsec) 1656 return -ENOMEM; 1657 1658 INIT_LIST_HEAD(&priv->macsec->secy_list); 1659 phydev->macsec_ops = &nxp_c45_macsec_ops; 1660 1661 return 0; 1662 } 1663 1664 void nxp_c45_macsec_remove(struct phy_device *phydev) 1665 { 1666 struct nxp_c45_phy *priv = phydev->priv; 1667 struct nxp_c45_secy *secy_p, *secy_t; 1668 struct nxp_c45_sa *sa_p, *sa_t; 1669 struct list_head *secy_list; 1670 1671 if (!priv->macsec) 1672 return; 1673 1674 secy_list = &priv->macsec->secy_list; 1675 nxp_c45_macsec_en(phydev, false); 1676 1677 list_for_each_entry_safe(secy_p, secy_t, secy_list, list) { 1678 list_for_each_entry_safe(sa_p, sa_t, &secy_p->sa_list, list) 1679 nxp_c45_sa_free(sa_p); 1680 nxp_c45_secy_free(secy_p); 1681 } 1682 } 1683 1684 void nxp_c45_handle_macsec_interrupt(struct phy_device *phydev, 1685 irqreturn_t *ret) 1686 { 1687 struct nxp_c45_phy *priv = phydev->priv; 1688 struct nxp_c45_secy *secy; 1689 struct nxp_c45_sa *sa; 1690 u8 encoding_sa; 1691 int secy_id; 1692 u32 reg = 0; 1693 1694 if (!priv->macsec) 1695 return; 1696 1697 do { 1698 nxp_c45_macsec_read(phydev, MACSEC_EVR, ®); 1699 if (!reg) 1700 return; 1701 1702 secy_id = MACSEC_REG_SIZE - ffs(reg); 1703 secy = nxp_c45_find_secy_by_id(&priv->macsec->secy_list, 1704 secy_id); 1705 if (IS_ERR(secy)) { 1706 WARN_ON(1); 1707 goto macsec_ack_irq; 1708 } 1709 1710 encoding_sa = secy->secy->tx_sc.encoding_sa; 1711 phydev_dbg(phydev, "pn_wrapped: TX SC %d, encoding_sa %u\n", 1712 secy->secy_id, encoding_sa); 1713 1714 sa = nxp_c45_find_sa(&secy->sa_list, TX_SA, encoding_sa); 1715 if (!IS_ERR(sa)) 1716 macsec_pn_wrapped(secy->secy, sa->sa); 1717 else 1718 WARN_ON(1); 1719 1720 macsec_ack_irq: 1721 nxp_c45_macsec_write(phydev, MACSEC_EVR, 1722 TX_SC_BIT(secy_id)); 1723 *ret = IRQ_HANDLED; 1724 } while (reg); 1725 } 1726