xref: /linux/drivers/net/phy/nxp-c45-tja11xx-macsec.c (revision 06d07429858317ded2db7986113a9e0129cd599b)
1 // SPDX-License-Identifier: GPL-2.0
2 /* NXP C45 PTP PHY driver interface
3  * Copyright 2023 NXP
4  * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/ethtool_netlink.h>
9 #include <linux/kernel.h>
10 #include <linux/mii.h>
11 #include <linux/module.h>
12 #include <linux/phy.h>
13 #include <linux/processor.h>
14 #include <net/dst_metadata.h>
15 #include <net/macsec.h>
16 
17 #include "nxp-c45-tja11xx.h"
18 
19 #define MACSEC_REG_SIZE			32
20 #define TX_SC_MAX			4
21 
22 #define TX_SC_BIT(secy_id)		BIT(MACSEC_REG_SIZE - (secy_id) - 1)
23 
24 #define VEND1_MACSEC_BASE		0x9000
25 
26 #define MACSEC_CFG			0x0000
27 #define MACSEC_CFG_BYPASS		BIT(1)
28 #define MACSEC_CFG_S0I			BIT(0)
29 
30 #define MACSEC_TPNET			0x0044
31 #define PN_WRAP_THRESHOLD		0xffffffff
32 
33 #define MACSEC_RXSCA			0x0080
34 #define MACSEC_RXSCKA			0x0084
35 
36 #define MACSEC_TXSCA			0x00C0
37 #define MACSEC_TXSCKA			0x00C4
38 
39 #define MACSEC_RXSC_SCI_1H		0x0100
40 
41 #define MACSEC_RXSC_CFG			0x0128
42 #define MACSEC_RXSC_CFG_XPN		BIT(25)
43 #define MACSEC_RXSC_CFG_AES_256		BIT(24)
44 #define MACSEC_RXSC_CFG_SCI_EN		BIT(11)
45 #define MACSEC_RXSC_CFG_RP		BIT(10)
46 #define MACSEC_RXSC_CFG_VF_MASK		GENMASK(9, 8)
47 #define MACSEC_RXSC_CFG_VF_OFF		8
48 
49 #define MACSEC_RPW			0x012C
50 
51 #define MACSEC_RXSA_A_CS		0x0180
52 #define MACSEC_RXSA_A_NPN		0x0184
53 #define MACSEC_RXSA_A_XNPN		0x0188
54 #define MACSEC_RXSA_A_LNPN		0x018C
55 #define MACSEC_RXSA_A_LXNPN		0x0190
56 
57 #define MACSEC_RXSA_B_CS		0x01C0
58 #define MACSEC_RXSA_B_NPN		0x01C4
59 #define MACSEC_RXSA_B_XNPN		0x01C8
60 #define MACSEC_RXSA_B_LNPN		0x01CC
61 #define MACSEC_RXSA_B_LXNPN		0x01D0
62 
63 #define MACSEC_RXSA_CS_AN_OFF		1
64 #define MACSEC_RXSA_CS_EN		BIT(0)
65 
66 #define MACSEC_TXSC_SCI_1H		0x0200
67 #define MACSEC_TXSC_CFG			0x0228
68 #define MACSEC_TXSC_CFG_XPN		BIT(25)
69 #define MACSEC_TXSC_CFG_AES_256		BIT(24)
70 #define MACSEC_TXSC_CFG_AN_MASK		GENMASK(19, 18)
71 #define MACSEC_TXSC_CFG_AN_OFF		18
72 #define MACSEC_TXSC_CFG_ASA		BIT(17)
73 #define MACSEC_TXSC_CFG_SCE		BIT(16)
74 #define MACSEC_TXSC_CFG_ENCRYPT		BIT(4)
75 #define MACSEC_TXSC_CFG_PROTECT		BIT(3)
76 #define MACSEC_TXSC_CFG_SEND_SCI	BIT(2)
77 #define MACSEC_TXSC_CFG_END_STATION	BIT(1)
78 #define MACSEC_TXSC_CFG_SCB		BIT(0)
79 
80 #define MACSEC_TXSA_A_CS		0x0280
81 #define MACSEC_TXSA_A_NPN		0x0284
82 #define MACSEC_TXSA_A_XNPN		0x0288
83 
84 #define MACSEC_TXSA_B_CS		0x02C0
85 #define MACSEC_TXSA_B_NPN		0x02C4
86 #define MACSEC_TXSA_B_XNPN		0x02C8
87 
88 #define MACSEC_SA_CS_A			BIT(31)
89 
90 #define MACSEC_EVR			0x0400
91 #define MACSEC_EVER			0x0404
92 
93 #define MACSEC_RXSA_A_KA		0x0700
94 #define MACSEC_RXSA_A_SSCI		0x0720
95 #define MACSEC_RXSA_A_SALT		0x0724
96 
97 #define MACSEC_RXSA_B_KA		0x0740
98 #define MACSEC_RXSA_B_SSCI		0x0760
99 #define MACSEC_RXSA_B_SALT		0x0764
100 
101 #define MACSEC_TXSA_A_KA		0x0780
102 #define MACSEC_TXSA_A_SSCI		0x07A0
103 #define MACSEC_TXSA_A_SALT		0x07A4
104 
105 #define MACSEC_TXSA_B_KA		0x07C0
106 #define MACSEC_TXSA_B_SSCI		0x07E0
107 #define MACSEC_TXSA_B_SALT		0x07E4
108 
109 #define MACSEC_UPFR0D2			0x0A08
110 #define MACSEC_UPFR0M1			0x0A10
111 #define MACSEC_OVP			BIT(12)
112 
113 #define	MACSEC_UPFR0M2			0x0A14
114 #define ETYPE_MASK			0xffff
115 
116 #define MACSEC_UPFR0R			0x0A18
117 #define MACSEC_UPFR_EN			BIT(0)
118 
119 #define ADPTR_CNTRL			0x0F00
120 #define ADPTR_CNTRL_CONFIG_EN		BIT(14)
121 #define ADPTR_CNTRL_ADPTR_EN		BIT(12)
122 #define ADPTR_TX_TAG_CNTRL		0x0F0C
123 #define ADPTR_TX_TAG_CNTRL_ENA		BIT(31)
124 
125 #define TX_SC_FLT_BASE			0x800
126 #define TX_SC_FLT_SIZE			0x10
127 #define TX_FLT_BASE(flt_id)		(TX_SC_FLT_BASE + \
128 	TX_SC_FLT_SIZE * (flt_id))
129 
130 #define TX_SC_FLT_OFF_MAC_DA_SA		0x04
131 #define TX_SC_FLT_OFF_MAC_SA		0x08
132 #define TX_SC_FLT_OFF_MAC_CFG		0x0C
133 #define TX_SC_FLT_BY_SA			BIT(14)
134 #define TX_SC_FLT_EN			BIT(8)
135 
136 #define TX_SC_FLT_MAC_DA_SA(base)	((base) + TX_SC_FLT_OFF_MAC_DA_SA)
137 #define TX_SC_FLT_MAC_SA(base)		((base) + TX_SC_FLT_OFF_MAC_SA)
138 #define TX_SC_FLT_MAC_CFG(base)		((base) + TX_SC_FLT_OFF_MAC_CFG)
139 
140 #define ADAPTER_EN	BIT(6)
141 #define MACSEC_EN	BIT(5)
142 
143 #define MACSEC_INOV1HS			0x0140
144 #define MACSEC_INOV2HS			0x0144
145 #define MACSEC_INOD1HS			0x0148
146 #define MACSEC_INOD2HS			0x014C
147 #define MACSEC_RXSCIPUS			0x0150
148 #define MACSEC_RXSCIPDS			0x0154
149 #define MACSEC_RXSCIPLS			0x0158
150 #define MACSEC_RXAN0INUSS		0x0160
151 #define MACSEC_RXAN0IPUSS		0x0170
152 #define MACSEC_RXSA_A_IPOS		0x0194
153 #define MACSEC_RXSA_A_IPIS		0x01B0
154 #define MACSEC_RXSA_A_IPNVS		0x01B4
155 #define MACSEC_RXSA_B_IPOS		0x01D4
156 #define MACSEC_RXSA_B_IPIS		0x01F0
157 #define MACSEC_RXSA_B_IPNVS		0x01F4
158 #define MACSEC_OPUS			0x021C
159 #define MACSEC_OPTLS			0x022C
160 #define MACSEC_OOP1HS			0x0240
161 #define MACSEC_OOP2HS			0x0244
162 #define MACSEC_OOE1HS			0x0248
163 #define MACSEC_OOE2HS			0x024C
164 #define MACSEC_TXSA_A_OPPS		0x028C
165 #define MACSEC_TXSA_A_OPES		0x0290
166 #define MACSEC_TXSA_B_OPPS		0x02CC
167 #define MACSEC_TXSA_B_OPES		0x02D0
168 #define MACSEC_INPWTS			0x0630
169 #define MACSEC_INPBTS			0x0638
170 #define MACSEC_IPSNFS			0x063C
171 
172 #define TJA11XX_TLV_TX_NEEDED_HEADROOM	(32)
173 #define TJA11XX_TLV_NEEDED_TAILROOM	(0)
174 
175 #define ETH_P_TJA11XX_TLV		(0x4e58)
176 
177 enum nxp_c45_sa_type {
178 	TX_SA,
179 	RX_SA,
180 };
181 
182 struct nxp_c45_sa {
183 	void *sa;
184 	const struct nxp_c45_sa_regs *regs;
185 	enum nxp_c45_sa_type type;
186 	bool is_key_a;
187 	u8 an;
188 	struct list_head list;
189 };
190 
191 struct nxp_c45_secy {
192 	struct macsec_secy *secy;
193 	struct macsec_rx_sc *rx_sc;
194 	struct list_head sa_list;
195 	int secy_id;
196 	bool rx_sc0_impl;
197 	struct list_head list;
198 };
199 
200 struct nxp_c45_macsec {
201 	struct list_head secy_list;
202 	DECLARE_BITMAP(secy_bitmap, TX_SC_MAX);
203 	DECLARE_BITMAP(tx_sc_bitmap, TX_SC_MAX);
204 };
205 
206 struct nxp_c45_sa_regs {
207 	u16 cs;
208 	u16 npn;
209 	u16 xnpn;
210 	u16 lnpn;
211 	u16 lxnpn;
212 	u16 ka;
213 	u16 ssci;
214 	u16 salt;
215 	u16 ipis;
216 	u16 ipnvs;
217 	u16 ipos;
218 	u16 opps;
219 	u16 opes;
220 };
221 
222 static const struct nxp_c45_sa_regs rx_sa_a_regs = {
223 	.cs	= MACSEC_RXSA_A_CS,
224 	.npn	= MACSEC_RXSA_A_NPN,
225 	.xnpn	= MACSEC_RXSA_A_XNPN,
226 	.lnpn	= MACSEC_RXSA_A_LNPN,
227 	.lxnpn	= MACSEC_RXSA_A_LXNPN,
228 	.ka	= MACSEC_RXSA_A_KA,
229 	.ssci	= MACSEC_RXSA_A_SSCI,
230 	.salt	= MACSEC_RXSA_A_SALT,
231 	.ipis	= MACSEC_RXSA_A_IPIS,
232 	.ipnvs	= MACSEC_RXSA_A_IPNVS,
233 	.ipos	= MACSEC_RXSA_A_IPOS,
234 };
235 
236 static const struct nxp_c45_sa_regs rx_sa_b_regs = {
237 	.cs	= MACSEC_RXSA_B_CS,
238 	.npn	= MACSEC_RXSA_B_NPN,
239 	.xnpn	= MACSEC_RXSA_B_XNPN,
240 	.lnpn	= MACSEC_RXSA_B_LNPN,
241 	.lxnpn	= MACSEC_RXSA_B_LXNPN,
242 	.ka	= MACSEC_RXSA_B_KA,
243 	.ssci	= MACSEC_RXSA_B_SSCI,
244 	.salt	= MACSEC_RXSA_B_SALT,
245 	.ipis	= MACSEC_RXSA_B_IPIS,
246 	.ipnvs	= MACSEC_RXSA_B_IPNVS,
247 	.ipos	= MACSEC_RXSA_B_IPOS,
248 };
249 
250 static const struct nxp_c45_sa_regs tx_sa_a_regs = {
251 	.cs	= MACSEC_TXSA_A_CS,
252 	.npn	= MACSEC_TXSA_A_NPN,
253 	.xnpn	= MACSEC_TXSA_A_XNPN,
254 	.ka	= MACSEC_TXSA_A_KA,
255 	.ssci	= MACSEC_TXSA_A_SSCI,
256 	.salt	= MACSEC_TXSA_A_SALT,
257 	.opps	= MACSEC_TXSA_A_OPPS,
258 	.opes	= MACSEC_TXSA_A_OPES,
259 };
260 
261 static const struct nxp_c45_sa_regs tx_sa_b_regs = {
262 	.cs	= MACSEC_TXSA_B_CS,
263 	.npn	= MACSEC_TXSA_B_NPN,
264 	.xnpn	= MACSEC_TXSA_B_XNPN,
265 	.ka	= MACSEC_TXSA_B_KA,
266 	.ssci	= MACSEC_TXSA_B_SSCI,
267 	.salt	= MACSEC_TXSA_B_SALT,
268 	.opps	= MACSEC_TXSA_B_OPPS,
269 	.opes	= MACSEC_TXSA_B_OPES,
270 };
271 
272 static const
nxp_c45_sa_regs_get(enum nxp_c45_sa_type sa_type,bool key_a)273 struct nxp_c45_sa_regs *nxp_c45_sa_regs_get(enum nxp_c45_sa_type sa_type,
274 					    bool key_a)
275 {
276 	if (sa_type == RX_SA)
277 		if (key_a)
278 			return &rx_sa_a_regs;
279 		else
280 			return &rx_sa_b_regs;
281 	else if (sa_type == TX_SA)
282 		if (key_a)
283 			return &tx_sa_a_regs;
284 		else
285 			return &tx_sa_b_regs;
286 	else
287 		return NULL;
288 }
289 
nxp_c45_macsec_write(struct phy_device * phydev,u16 addr,u32 value)290 static int nxp_c45_macsec_write(struct phy_device *phydev, u16 addr, u32 value)
291 {
292 	u32 lvalue = value;
293 	u16 laddr;
294 	int ret;
295 
296 	WARN_ON_ONCE(addr % 4);
297 
298 	phydev_dbg(phydev, "write addr 0x%x value 0x%x\n", addr, value);
299 
300 	laddr = VEND1_MACSEC_BASE + addr / 2;
301 	ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, laddr, lvalue);
302 	if (ret)
303 		return ret;
304 
305 	laddr += 1;
306 	lvalue >>= 16;
307 	ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, laddr, lvalue);
308 
309 	return ret;
310 }
311 
nxp_c45_macsec_read(struct phy_device * phydev,u16 addr,u32 * value)312 static int nxp_c45_macsec_read(struct phy_device *phydev, u16 addr, u32 *value)
313 {
314 	u32 lvalue;
315 	u16 laddr;
316 	int ret;
317 
318 	WARN_ON_ONCE(addr % 4);
319 
320 	laddr = VEND1_MACSEC_BASE + addr / 2;
321 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, laddr);
322 	if (ret < 0)
323 		return ret;
324 
325 	laddr += 1;
326 	lvalue = (u32)ret & 0xffff;
327 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, laddr);
328 	if (ret < 0)
329 		return ret;
330 
331 	lvalue |= (u32)ret << 16;
332 	*value = lvalue;
333 
334 	phydev_dbg(phydev, "read addr 0x%x value 0x%x\n", addr, *value);
335 
336 	return 0;
337 }
338 
nxp_c45_macsec_read32_64(struct phy_device * phydev,u16 addr,u64 * value)339 static void nxp_c45_macsec_read32_64(struct phy_device *phydev, u16 addr,
340 				     u64 *value)
341 {
342 	u32 lvalue;
343 
344 	nxp_c45_macsec_read(phydev, addr, &lvalue);
345 	*value = lvalue;
346 }
347 
nxp_c45_macsec_read64(struct phy_device * phydev,u16 addr,u64 * value)348 static void nxp_c45_macsec_read64(struct phy_device *phydev, u16 addr,
349 				  u64 *value)
350 {
351 	u32 lvalue;
352 
353 	nxp_c45_macsec_read(phydev, addr, &lvalue);
354 	*value = (u64)lvalue << 32;
355 	nxp_c45_macsec_read(phydev, addr + 4, &lvalue);
356 	*value |= lvalue;
357 }
358 
nxp_c45_secy_irq_en(struct phy_device * phydev,struct nxp_c45_secy * phy_secy,bool en)359 static void nxp_c45_secy_irq_en(struct phy_device *phydev,
360 				struct nxp_c45_secy *phy_secy, bool en)
361 {
362 	u32 reg;
363 
364 	nxp_c45_macsec_read(phydev, MACSEC_EVER, &reg);
365 	if (en)
366 		reg |= TX_SC_BIT(phy_secy->secy_id);
367 	else
368 		reg &= ~TX_SC_BIT(phy_secy->secy_id);
369 	nxp_c45_macsec_write(phydev, MACSEC_EVER, reg);
370 }
371 
nxp_c45_find_secy(struct list_head * secy_list,sci_t sci)372 static struct nxp_c45_secy *nxp_c45_find_secy(struct list_head *secy_list,
373 					      sci_t sci)
374 {
375 	struct nxp_c45_secy *pos, *tmp;
376 
377 	list_for_each_entry_safe(pos, tmp, secy_list, list)
378 		if (pos->secy->sci == sci)
379 			return pos;
380 
381 	return ERR_PTR(-EINVAL);
382 }
383 
384 static struct
nxp_c45_find_secy_by_id(struct list_head * secy_list,int id)385 nxp_c45_secy *nxp_c45_find_secy_by_id(struct list_head *secy_list,
386 				      int id)
387 {
388 	struct nxp_c45_secy *pos, *tmp;
389 
390 	list_for_each_entry_safe(pos, tmp, secy_list, list)
391 		if (pos->secy_id == id)
392 			return pos;
393 
394 	return ERR_PTR(-EINVAL);
395 }
396 
nxp_c45_secy_free(struct nxp_c45_secy * phy_secy)397 static void nxp_c45_secy_free(struct nxp_c45_secy *phy_secy)
398 {
399 	list_del(&phy_secy->list);
400 	kfree(phy_secy);
401 }
402 
nxp_c45_find_sa(struct list_head * sa_list,enum nxp_c45_sa_type sa_type,u8 an)403 static struct nxp_c45_sa *nxp_c45_find_sa(struct list_head *sa_list,
404 					  enum nxp_c45_sa_type sa_type, u8 an)
405 {
406 	struct nxp_c45_sa *pos, *tmp;
407 
408 	list_for_each_entry_safe(pos, tmp, sa_list, list)
409 		if (pos->an == an && pos->type == sa_type)
410 			return pos;
411 
412 	return ERR_PTR(-EINVAL);
413 }
414 
nxp_c45_sa_alloc(struct list_head * sa_list,void * sa,enum nxp_c45_sa_type sa_type,u8 an)415 static struct nxp_c45_sa *nxp_c45_sa_alloc(struct list_head *sa_list, void *sa,
416 					   enum nxp_c45_sa_type sa_type, u8 an)
417 {
418 	struct nxp_c45_sa *first = NULL, *pos, *tmp;
419 	int occurrences = 0;
420 
421 	list_for_each_entry_safe(pos, tmp, sa_list, list) {
422 		if (pos->type != sa_type)
423 			continue;
424 
425 		if (pos->an == an)
426 			return ERR_PTR(-EINVAL);
427 
428 		first = pos;
429 		occurrences++;
430 		if (occurrences >= 2)
431 			return ERR_PTR(-ENOSPC);
432 	}
433 
434 	tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
435 	if (!tmp)
436 		return ERR_PTR(-ENOMEM);
437 
438 	if (first)
439 		tmp->is_key_a = !first->is_key_a;
440 	else
441 		tmp->is_key_a = true;
442 
443 	tmp->sa = sa;
444 	tmp->type = sa_type;
445 	tmp->an = an;
446 	tmp->regs = nxp_c45_sa_regs_get(tmp->type, tmp->is_key_a);
447 	list_add_tail(&tmp->list, sa_list);
448 
449 	return tmp;
450 }
451 
nxp_c45_sa_free(struct nxp_c45_sa * sa)452 static void nxp_c45_sa_free(struct nxp_c45_sa *sa)
453 {
454 	list_del(&sa->list);
455 	kfree(sa);
456 }
457 
nxp_c45_sa_list_free(struct list_head * sa_list)458 static void nxp_c45_sa_list_free(struct list_head *sa_list)
459 {
460 	struct nxp_c45_sa *pos, *tmp;
461 
462 	list_for_each_entry_safe(pos, tmp, sa_list, list)
463 		nxp_c45_sa_free(pos);
464 }
465 
nxp_c45_sa_set_pn(struct phy_device * phydev,struct nxp_c45_sa * sa,u64 pn,u32 replay_window)466 static void nxp_c45_sa_set_pn(struct phy_device *phydev,
467 			      struct nxp_c45_sa *sa, u64 pn,
468 			      u32 replay_window)
469 {
470 	const struct nxp_c45_sa_regs *sa_regs = sa->regs;
471 	pn_t npn = {.full64 = pn};
472 	pn_t lnpn;
473 
474 	nxp_c45_macsec_write(phydev, sa_regs->npn, npn.lower);
475 	nxp_c45_macsec_write(phydev, sa_regs->xnpn, npn.upper);
476 	if (sa->type != RX_SA)
477 		return;
478 
479 	if (pn > replay_window)
480 		lnpn.full64 = pn - replay_window;
481 	else
482 		lnpn.full64 = 1;
483 
484 	nxp_c45_macsec_write(phydev, sa_regs->lnpn, lnpn.lower);
485 	nxp_c45_macsec_write(phydev, sa_regs->lxnpn, lnpn.upper);
486 }
487 
nxp_c45_sa_set_key(struct macsec_context * ctx,const struct nxp_c45_sa_regs * sa_regs,u8 * salt,ssci_t ssci)488 static void nxp_c45_sa_set_key(struct macsec_context *ctx,
489 			       const struct nxp_c45_sa_regs *sa_regs,
490 			       u8 *salt, ssci_t ssci)
491 {
492 	struct phy_device *phydev = ctx->phydev;
493 	u32 key_size = ctx->secy->key_len / 4;
494 	u32 salt_size = MACSEC_SALT_LEN / 4;
495 	u32 *key_u32 = (u32 *)ctx->sa.key;
496 	u32 *salt_u32 = (u32 *)salt;
497 	u32 reg, value;
498 	int i;
499 
500 	for (i = 0; i < key_size; i++) {
501 		reg = sa_regs->ka + i * 4;
502 		value = (__force u32)cpu_to_be32(key_u32[i]);
503 		nxp_c45_macsec_write(phydev, reg, value);
504 	}
505 
506 	if (ctx->secy->xpn) {
507 		for (i = 0; i < salt_size; i++) {
508 			reg = sa_regs->salt + (2 - i) * 4;
509 			value = (__force u32)cpu_to_be32(salt_u32[i]);
510 			nxp_c45_macsec_write(phydev, reg, value);
511 		}
512 
513 		value = (__force u32)cpu_to_be32((__force u32)ssci);
514 		nxp_c45_macsec_write(phydev, sa_regs->ssci, value);
515 	}
516 
517 	nxp_c45_macsec_write(phydev, sa_regs->cs, MACSEC_SA_CS_A);
518 }
519 
nxp_c45_rx_sa_clear_stats(struct phy_device * phydev,struct nxp_c45_sa * sa)520 static void nxp_c45_rx_sa_clear_stats(struct phy_device *phydev,
521 				      struct nxp_c45_sa *sa)
522 {
523 	nxp_c45_macsec_write(phydev, sa->regs->ipis, 0);
524 	nxp_c45_macsec_write(phydev, sa->regs->ipnvs, 0);
525 	nxp_c45_macsec_write(phydev, sa->regs->ipos, 0);
526 
527 	nxp_c45_macsec_write(phydev, MACSEC_RXAN0INUSS + sa->an * 4, 0);
528 	nxp_c45_macsec_write(phydev, MACSEC_RXAN0IPUSS + sa->an * 4, 0);
529 }
530 
nxp_c45_rx_sa_read_stats(struct phy_device * phydev,struct nxp_c45_sa * sa,struct macsec_rx_sa_stats * stats)531 static void nxp_c45_rx_sa_read_stats(struct phy_device *phydev,
532 				     struct nxp_c45_sa *sa,
533 				     struct macsec_rx_sa_stats *stats)
534 {
535 	nxp_c45_macsec_read(phydev, sa->regs->ipis, &stats->InPktsInvalid);
536 	nxp_c45_macsec_read(phydev, sa->regs->ipnvs, &stats->InPktsNotValid);
537 	nxp_c45_macsec_read(phydev, sa->regs->ipos, &stats->InPktsOK);
538 }
539 
nxp_c45_tx_sa_clear_stats(struct phy_device * phydev,struct nxp_c45_sa * sa)540 static void nxp_c45_tx_sa_clear_stats(struct phy_device *phydev,
541 				      struct nxp_c45_sa *sa)
542 {
543 	nxp_c45_macsec_write(phydev, sa->regs->opps, 0);
544 	nxp_c45_macsec_write(phydev, sa->regs->opes, 0);
545 }
546 
nxp_c45_tx_sa_read_stats(struct phy_device * phydev,struct nxp_c45_sa * sa,struct macsec_tx_sa_stats * stats)547 static void nxp_c45_tx_sa_read_stats(struct phy_device *phydev,
548 				     struct nxp_c45_sa *sa,
549 				     struct macsec_tx_sa_stats *stats)
550 {
551 	nxp_c45_macsec_read(phydev, sa->regs->opps, &stats->OutPktsProtected);
552 	nxp_c45_macsec_read(phydev, sa->regs->opes, &stats->OutPktsEncrypted);
553 }
554 
nxp_c45_rx_sa_update(struct phy_device * phydev,struct nxp_c45_sa * sa,bool en)555 static void nxp_c45_rx_sa_update(struct phy_device *phydev,
556 				 struct nxp_c45_sa *sa, bool en)
557 {
558 	const struct nxp_c45_sa_regs *sa_regs = sa->regs;
559 	u32 cfg;
560 
561 	cfg = sa->an << MACSEC_RXSA_CS_AN_OFF;
562 	cfg |= en ? MACSEC_RXSA_CS_EN : 0;
563 	nxp_c45_macsec_write(phydev, sa_regs->cs, cfg);
564 }
565 
nxp_c45_tx_sa_update(struct phy_device * phydev,struct nxp_c45_sa * sa,bool en)566 static void nxp_c45_tx_sa_update(struct phy_device *phydev,
567 				 struct nxp_c45_sa *sa, bool en)
568 {
569 	u32 cfg = 0;
570 
571 	nxp_c45_macsec_read(phydev, MACSEC_TXSC_CFG, &cfg);
572 
573 	cfg &= ~MACSEC_TXSC_CFG_AN_MASK;
574 	cfg |= sa->an << MACSEC_TXSC_CFG_AN_OFF;
575 
576 	if (sa->is_key_a)
577 		cfg &= ~MACSEC_TXSC_CFG_ASA;
578 	else
579 		cfg |= MACSEC_TXSC_CFG_ASA;
580 
581 	if (en)
582 		cfg |= MACSEC_TXSC_CFG_SCE;
583 	else
584 		cfg &= ~MACSEC_TXSC_CFG_SCE;
585 
586 	nxp_c45_macsec_write(phydev, MACSEC_TXSC_CFG, cfg);
587 }
588 
nxp_c45_set_sci(struct phy_device * phydev,u16 sci_base_addr,sci_t sci)589 static void nxp_c45_set_sci(struct phy_device *phydev, u16 sci_base_addr,
590 			    sci_t sci)
591 {
592 	u64 lsci = sci_to_cpu(sci);
593 
594 	nxp_c45_macsec_write(phydev, sci_base_addr, lsci >> 32);
595 	nxp_c45_macsec_write(phydev, sci_base_addr + 4, lsci);
596 }
597 
nxp_c45_port_is_1(sci_t sci)598 static bool nxp_c45_port_is_1(sci_t sci)
599 {
600 	u16 port = sci_to_cpu(sci);
601 
602 	return port == 1;
603 }
604 
nxp_c45_select_secy(struct phy_device * phydev,u8 id)605 static void nxp_c45_select_secy(struct phy_device *phydev, u8 id)
606 {
607 	nxp_c45_macsec_write(phydev, MACSEC_RXSCA, id);
608 	nxp_c45_macsec_write(phydev, MACSEC_RXSCKA, id);
609 	nxp_c45_macsec_write(phydev, MACSEC_TXSCA, id);
610 	nxp_c45_macsec_write(phydev, MACSEC_TXSCKA, id);
611 }
612 
nxp_c45_secy_valid(struct nxp_c45_secy * phy_secy,bool can_rx_sc0_impl)613 static bool nxp_c45_secy_valid(struct nxp_c45_secy *phy_secy,
614 			       bool can_rx_sc0_impl)
615 {
616 	bool end_station = phy_secy->secy->tx_sc.end_station;
617 	bool scb = phy_secy->secy->tx_sc.scb;
618 
619 	phy_secy->rx_sc0_impl = false;
620 
621 	if (end_station) {
622 		if (!nxp_c45_port_is_1(phy_secy->secy->sci))
623 			return false;
624 		if (!phy_secy->rx_sc)
625 			return true;
626 		return nxp_c45_port_is_1(phy_secy->rx_sc->sci);
627 	}
628 
629 	if (scb)
630 		return false;
631 
632 	if (!can_rx_sc0_impl)
633 		return false;
634 
635 	if (phy_secy->secy_id != 0)
636 		return false;
637 
638 	phy_secy->rx_sc0_impl = true;
639 
640 	return true;
641 }
642 
nxp_c45_rx_sc0_impl(struct nxp_c45_secy * phy_secy)643 static bool nxp_c45_rx_sc0_impl(struct nxp_c45_secy *phy_secy)
644 {
645 	bool end_station = phy_secy->secy->tx_sc.end_station;
646 	bool send_sci = phy_secy->secy->tx_sc.send_sci;
647 	bool scb = phy_secy->secy->tx_sc.scb;
648 
649 	return !end_station && !send_sci && !scb;
650 }
651 
nxp_c45_mac_addr_free(struct macsec_context * ctx)652 static bool nxp_c45_mac_addr_free(struct macsec_context *ctx)
653 {
654 	struct nxp_c45_phy *priv = ctx->phydev->priv;
655 	struct nxp_c45_secy *pos, *tmp;
656 
657 	list_for_each_entry_safe(pos, tmp, &priv->macsec->secy_list, list) {
658 		if (pos->secy == ctx->secy)
659 			continue;
660 
661 		if (memcmp(pos->secy->netdev->dev_addr,
662 			   ctx->secy->netdev->dev_addr, ETH_ALEN) == 0)
663 			return false;
664 	}
665 
666 	return true;
667 }
668 
nxp_c45_tx_sc_en_flt(struct phy_device * phydev,int secy_id,bool en)669 static void nxp_c45_tx_sc_en_flt(struct phy_device *phydev, int secy_id,
670 				 bool en)
671 {
672 	u32 tx_flt_base = TX_FLT_BASE(secy_id);
673 	u32 reg = 0;
674 
675 	nxp_c45_macsec_read(phydev, TX_SC_FLT_MAC_CFG(tx_flt_base), &reg);
676 	if (en)
677 		reg |= TX_SC_FLT_EN;
678 	else
679 		reg &= ~TX_SC_FLT_EN;
680 	nxp_c45_macsec_write(phydev, TX_SC_FLT_MAC_CFG(tx_flt_base), reg);
681 }
682 
nxp_c45_tx_sc_set_flt(struct phy_device * phydev,struct nxp_c45_secy * phy_secy)683 static void nxp_c45_tx_sc_set_flt(struct phy_device *phydev,
684 				  struct nxp_c45_secy *phy_secy)
685 {
686 	const u8 *dev_addr = phy_secy->secy->netdev->dev_addr;
687 	u32 tx_flt_base = TX_FLT_BASE(phy_secy->secy_id);
688 	u32 reg;
689 
690 	reg = dev_addr[0] << 8 | dev_addr[1];
691 	nxp_c45_macsec_write(phydev, TX_SC_FLT_MAC_DA_SA(tx_flt_base), reg);
692 	reg = dev_addr[5] | dev_addr[4] << 8 | dev_addr[3] << 16 |
693 		dev_addr[2] << 24;
694 
695 	nxp_c45_macsec_write(phydev, TX_SC_FLT_MAC_SA(tx_flt_base), reg);
696 	nxp_c45_macsec_read(phydev, TX_SC_FLT_MAC_CFG(tx_flt_base), &reg);
697 	reg &= TX_SC_FLT_EN;
698 	reg |= TX_SC_FLT_BY_SA | phy_secy->secy_id;
699 	nxp_c45_macsec_write(phydev, TX_SC_FLT_MAC_CFG(tx_flt_base), reg);
700 }
701 
nxp_c45_tx_sc_update(struct phy_device * phydev,struct nxp_c45_secy * phy_secy)702 static void nxp_c45_tx_sc_update(struct phy_device *phydev,
703 				 struct nxp_c45_secy *phy_secy)
704 {
705 	u32 cfg = 0;
706 
707 	nxp_c45_macsec_read(phydev, MACSEC_TXSC_CFG, &cfg);
708 
709 	phydev_dbg(phydev, "XPN %s\n", phy_secy->secy->xpn ? "on" : "off");
710 	if (phy_secy->secy->xpn)
711 		cfg |= MACSEC_TXSC_CFG_XPN;
712 	else
713 		cfg &= ~MACSEC_TXSC_CFG_XPN;
714 
715 	phydev_dbg(phydev, "key len %u\n", phy_secy->secy->key_len);
716 	if (phy_secy->secy->key_len == 32)
717 		cfg |= MACSEC_TXSC_CFG_AES_256;
718 	else
719 		cfg &= ~MACSEC_TXSC_CFG_AES_256;
720 
721 	phydev_dbg(phydev, "encryption %s\n",
722 		   phy_secy->secy->tx_sc.encrypt ? "on" : "off");
723 	if (phy_secy->secy->tx_sc.encrypt)
724 		cfg |= MACSEC_TXSC_CFG_ENCRYPT;
725 	else
726 		cfg &= ~MACSEC_TXSC_CFG_ENCRYPT;
727 
728 	phydev_dbg(phydev, "protect frames %s\n",
729 		   phy_secy->secy->protect_frames ? "on" : "off");
730 	if (phy_secy->secy->protect_frames)
731 		cfg |= MACSEC_TXSC_CFG_PROTECT;
732 	else
733 		cfg &= ~MACSEC_TXSC_CFG_PROTECT;
734 
735 	phydev_dbg(phydev, "send sci %s\n",
736 		   phy_secy->secy->tx_sc.send_sci ? "on" : "off");
737 	if (phy_secy->secy->tx_sc.send_sci)
738 		cfg |= MACSEC_TXSC_CFG_SEND_SCI;
739 	else
740 		cfg &= ~MACSEC_TXSC_CFG_SEND_SCI;
741 
742 	phydev_dbg(phydev, "end station %s\n",
743 		   phy_secy->secy->tx_sc.end_station ? "on" : "off");
744 	if (phy_secy->secy->tx_sc.end_station)
745 		cfg |= MACSEC_TXSC_CFG_END_STATION;
746 	else
747 		cfg &= ~MACSEC_TXSC_CFG_END_STATION;
748 
749 	phydev_dbg(phydev, "scb %s\n",
750 		   phy_secy->secy->tx_sc.scb ? "on" : "off");
751 	if (phy_secy->secy->tx_sc.scb)
752 		cfg |= MACSEC_TXSC_CFG_SCB;
753 	else
754 		cfg &= ~MACSEC_TXSC_CFG_SCB;
755 
756 	nxp_c45_macsec_write(phydev, MACSEC_TXSC_CFG, cfg);
757 }
758 
nxp_c45_tx_sc_clear_stats(struct phy_device * phydev,struct nxp_c45_secy * phy_secy)759 static void nxp_c45_tx_sc_clear_stats(struct phy_device *phydev,
760 				      struct nxp_c45_secy *phy_secy)
761 {
762 	struct nxp_c45_sa *pos, *tmp;
763 
764 	list_for_each_entry_safe(pos, tmp, &phy_secy->sa_list, list)
765 		if (pos->type == TX_SA)
766 			nxp_c45_tx_sa_clear_stats(phydev, pos);
767 
768 	nxp_c45_macsec_write(phydev, MACSEC_OPUS, 0);
769 	nxp_c45_macsec_write(phydev, MACSEC_OPTLS, 0);
770 	nxp_c45_macsec_write(phydev, MACSEC_OOP1HS, 0);
771 	nxp_c45_macsec_write(phydev, MACSEC_OOP2HS, 0);
772 	nxp_c45_macsec_write(phydev, MACSEC_OOE1HS, 0);
773 	nxp_c45_macsec_write(phydev, MACSEC_OOE2HS, 0);
774 }
775 
nxp_c45_set_rx_sc0_impl(struct phy_device * phydev,bool enable)776 static void nxp_c45_set_rx_sc0_impl(struct phy_device *phydev,
777 				    bool enable)
778 {
779 	u32 reg = 0;
780 
781 	nxp_c45_macsec_read(phydev, MACSEC_CFG, &reg);
782 	if (enable)
783 		reg |= MACSEC_CFG_S0I;
784 	else
785 		reg &= ~MACSEC_CFG_S0I;
786 	nxp_c45_macsec_write(phydev, MACSEC_CFG, reg);
787 }
788 
nxp_c45_is_rx_sc0_impl(struct list_head * secy_list)789 static bool nxp_c45_is_rx_sc0_impl(struct list_head *secy_list)
790 {
791 	struct nxp_c45_secy *pos, *tmp;
792 
793 	list_for_each_entry_safe(pos, tmp, secy_list, list)
794 		if (pos->rx_sc0_impl)
795 			return pos->rx_sc0_impl;
796 
797 	return false;
798 }
799 
nxp_c45_rx_sc_en(struct phy_device * phydev,struct macsec_rx_sc * rx_sc,bool en)800 static void nxp_c45_rx_sc_en(struct phy_device *phydev,
801 			     struct macsec_rx_sc *rx_sc, bool en)
802 {
803 	u32 reg = 0;
804 
805 	nxp_c45_macsec_read(phydev, MACSEC_RXSC_CFG, &reg);
806 	if (rx_sc->active && en)
807 		reg |= MACSEC_RXSC_CFG_SCI_EN;
808 	else
809 		reg &= ~MACSEC_RXSC_CFG_SCI_EN;
810 	nxp_c45_macsec_write(phydev, MACSEC_RXSC_CFG, reg);
811 }
812 
nxp_c45_rx_sc_update(struct phy_device * phydev,struct nxp_c45_secy * phy_secy)813 static void nxp_c45_rx_sc_update(struct phy_device *phydev,
814 				 struct nxp_c45_secy *phy_secy)
815 {
816 	struct macsec_rx_sc *rx_sc = phy_secy->rx_sc;
817 	struct nxp_c45_phy *priv = phydev->priv;
818 	u32 cfg = 0;
819 
820 	nxp_c45_macsec_read(phydev, MACSEC_RXSC_CFG, &cfg);
821 	cfg &= ~MACSEC_RXSC_CFG_VF_MASK;
822 	cfg = phy_secy->secy->validate_frames << MACSEC_RXSC_CFG_VF_OFF;
823 
824 	phydev_dbg(phydev, "validate frames %u\n",
825 		   phy_secy->secy->validate_frames);
826 	phydev_dbg(phydev, "replay_protect %s window %u\n",
827 		   phy_secy->secy->replay_protect ? "on" : "off",
828 		   phy_secy->secy->replay_window);
829 	if (phy_secy->secy->replay_protect) {
830 		cfg |= MACSEC_RXSC_CFG_RP;
831 		nxp_c45_macsec_write(phydev, MACSEC_RPW,
832 				     phy_secy->secy->replay_window);
833 	} else {
834 		cfg &= ~MACSEC_RXSC_CFG_RP;
835 	}
836 
837 	phydev_dbg(phydev, "rx_sc->active %s\n",
838 		   rx_sc->active ? "on" : "off");
839 	if (rx_sc->active &&
840 	    test_bit(phy_secy->secy_id, priv->macsec->secy_bitmap))
841 		cfg |= MACSEC_RXSC_CFG_SCI_EN;
842 	else
843 		cfg &= ~MACSEC_RXSC_CFG_SCI_EN;
844 
845 	phydev_dbg(phydev, "key len %u\n", phy_secy->secy->key_len);
846 	if (phy_secy->secy->key_len == 32)
847 		cfg |= MACSEC_RXSC_CFG_AES_256;
848 	else
849 		cfg &= ~MACSEC_RXSC_CFG_AES_256;
850 
851 	phydev_dbg(phydev, "XPN %s\n", phy_secy->secy->xpn ? "on" : "off");
852 	if (phy_secy->secy->xpn)
853 		cfg |= MACSEC_RXSC_CFG_XPN;
854 	else
855 		cfg &= ~MACSEC_RXSC_CFG_XPN;
856 
857 	nxp_c45_macsec_write(phydev, MACSEC_RXSC_CFG, cfg);
858 }
859 
nxp_c45_rx_sc_clear_stats(struct phy_device * phydev,struct nxp_c45_secy * phy_secy)860 static void nxp_c45_rx_sc_clear_stats(struct phy_device *phydev,
861 				      struct nxp_c45_secy *phy_secy)
862 {
863 	struct nxp_c45_sa *pos, *tmp;
864 	int i;
865 
866 	list_for_each_entry_safe(pos, tmp, &phy_secy->sa_list, list)
867 		if (pos->type == RX_SA)
868 			nxp_c45_rx_sa_clear_stats(phydev, pos);
869 
870 	nxp_c45_macsec_write(phydev, MACSEC_INOD1HS, 0);
871 	nxp_c45_macsec_write(phydev, MACSEC_INOD2HS, 0);
872 
873 	nxp_c45_macsec_write(phydev, MACSEC_INOV1HS, 0);
874 	nxp_c45_macsec_write(phydev, MACSEC_INOV2HS, 0);
875 
876 	nxp_c45_macsec_write(phydev, MACSEC_RXSCIPDS, 0);
877 	nxp_c45_macsec_write(phydev, MACSEC_RXSCIPLS, 0);
878 	nxp_c45_macsec_write(phydev, MACSEC_RXSCIPUS, 0);
879 
880 	for (i = 0; i < MACSEC_NUM_AN; i++) {
881 		nxp_c45_macsec_write(phydev, MACSEC_RXAN0INUSS + i * 4, 0);
882 		nxp_c45_macsec_write(phydev, MACSEC_RXAN0IPUSS + i * 4, 0);
883 	}
884 }
885 
nxp_c45_rx_sc_del(struct phy_device * phydev,struct nxp_c45_secy * phy_secy)886 static void nxp_c45_rx_sc_del(struct phy_device *phydev,
887 			      struct nxp_c45_secy *phy_secy)
888 {
889 	struct nxp_c45_sa *pos, *tmp;
890 
891 	nxp_c45_macsec_write(phydev, MACSEC_RXSC_CFG, 0);
892 	nxp_c45_macsec_write(phydev, MACSEC_RPW, 0);
893 	nxp_c45_set_sci(phydev, MACSEC_RXSC_SCI_1H, 0);
894 
895 	nxp_c45_rx_sc_clear_stats(phydev, phy_secy);
896 
897 	list_for_each_entry_safe(pos, tmp, &phy_secy->sa_list, list) {
898 		if (pos->type == RX_SA) {
899 			nxp_c45_rx_sa_update(phydev, pos, false);
900 			nxp_c45_sa_free(pos);
901 		}
902 	}
903 }
904 
nxp_c45_clear_global_stats(struct phy_device * phydev)905 static void nxp_c45_clear_global_stats(struct phy_device *phydev)
906 {
907 	nxp_c45_macsec_write(phydev, MACSEC_INPBTS, 0);
908 	nxp_c45_macsec_write(phydev, MACSEC_INPWTS, 0);
909 	nxp_c45_macsec_write(phydev, MACSEC_IPSNFS, 0);
910 }
911 
nxp_c45_macsec_en(struct phy_device * phydev,bool en)912 static void nxp_c45_macsec_en(struct phy_device *phydev, bool en)
913 {
914 	u32 reg;
915 
916 	nxp_c45_macsec_read(phydev, MACSEC_CFG, &reg);
917 	if (en)
918 		reg |= MACSEC_CFG_BYPASS;
919 	else
920 		reg &= ~MACSEC_CFG_BYPASS;
921 	nxp_c45_macsec_write(phydev, MACSEC_CFG, reg);
922 }
923 
nxp_c45_mdo_dev_open(struct macsec_context * ctx)924 static int nxp_c45_mdo_dev_open(struct macsec_context *ctx)
925 {
926 	struct phy_device *phydev = ctx->phydev;
927 	struct nxp_c45_phy *priv = phydev->priv;
928 	struct nxp_c45_secy *phy_secy;
929 	int any_bit_set;
930 
931 	phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
932 	if (IS_ERR(phy_secy))
933 		return PTR_ERR(phy_secy);
934 
935 	nxp_c45_select_secy(phydev, phy_secy->secy_id);
936 
937 	nxp_c45_tx_sc_en_flt(phydev, phy_secy->secy_id, true);
938 	nxp_c45_set_rx_sc0_impl(phydev, phy_secy->rx_sc0_impl);
939 	if (phy_secy->rx_sc)
940 		nxp_c45_rx_sc_en(phydev, phy_secy->rx_sc, true);
941 
942 	any_bit_set = find_first_bit(priv->macsec->secy_bitmap, TX_SC_MAX);
943 	if (any_bit_set == TX_SC_MAX)
944 		nxp_c45_macsec_en(phydev, true);
945 
946 	set_bit(phy_secy->secy_id, priv->macsec->secy_bitmap);
947 
948 	return 0;
949 }
950 
nxp_c45_mdo_dev_stop(struct macsec_context * ctx)951 static int nxp_c45_mdo_dev_stop(struct macsec_context *ctx)
952 {
953 	struct phy_device *phydev = ctx->phydev;
954 	struct nxp_c45_phy *priv = phydev->priv;
955 	struct nxp_c45_secy *phy_secy;
956 	int any_bit_set;
957 
958 	phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
959 	if (IS_ERR(phy_secy))
960 		return PTR_ERR(phy_secy);
961 
962 	nxp_c45_select_secy(phydev, phy_secy->secy_id);
963 
964 	nxp_c45_tx_sc_en_flt(phydev, phy_secy->secy_id, false);
965 	if (phy_secy->rx_sc)
966 		nxp_c45_rx_sc_en(phydev, phy_secy->rx_sc, false);
967 	nxp_c45_set_rx_sc0_impl(phydev, false);
968 
969 	clear_bit(phy_secy->secy_id, priv->macsec->secy_bitmap);
970 	any_bit_set = find_first_bit(priv->macsec->secy_bitmap, TX_SC_MAX);
971 	if (any_bit_set == TX_SC_MAX)
972 		nxp_c45_macsec_en(phydev, false);
973 
974 	return 0;
975 }
976 
nxp_c45_mdo_add_secy(struct macsec_context * ctx)977 static int nxp_c45_mdo_add_secy(struct macsec_context *ctx)
978 {
979 	struct phy_device *phydev = ctx->phydev;
980 	struct nxp_c45_phy *priv = phydev->priv;
981 	struct nxp_c45_secy *phy_secy;
982 	bool can_rx_sc0_impl;
983 	int idx;
984 
985 	phydev_dbg(phydev, "add SecY SCI %016llx\n",
986 		   sci_to_cpu(ctx->secy->sci));
987 
988 	if (!nxp_c45_mac_addr_free(ctx))
989 		return -EBUSY;
990 
991 	if (nxp_c45_is_rx_sc0_impl(&priv->macsec->secy_list))
992 		return -EBUSY;
993 
994 	idx = find_first_zero_bit(priv->macsec->tx_sc_bitmap, TX_SC_MAX);
995 	if (idx == TX_SC_MAX)
996 		return -ENOSPC;
997 
998 	phy_secy = kzalloc(sizeof(*phy_secy), GFP_KERNEL);
999 	if (!phy_secy)
1000 		return -ENOMEM;
1001 
1002 	INIT_LIST_HEAD(&phy_secy->sa_list);
1003 	phy_secy->secy = ctx->secy;
1004 	phy_secy->secy_id = idx;
1005 
1006 	/* If the point to point mode should be enabled, we should have no
1007 	 * SecY added yet.
1008 	 */
1009 	can_rx_sc0_impl = list_count_nodes(&priv->macsec->secy_list) == 0;
1010 	if (!nxp_c45_secy_valid(phy_secy, can_rx_sc0_impl)) {
1011 		kfree(phy_secy);
1012 		return -EINVAL;
1013 	}
1014 
1015 	phy_secy->rx_sc0_impl = nxp_c45_rx_sc0_impl(phy_secy);
1016 
1017 	nxp_c45_select_secy(phydev, phy_secy->secy_id);
1018 	nxp_c45_set_sci(phydev, MACSEC_TXSC_SCI_1H, ctx->secy->sci);
1019 	nxp_c45_tx_sc_set_flt(phydev, phy_secy);
1020 	nxp_c45_tx_sc_update(phydev, phy_secy);
1021 	if (phy_interrupt_is_valid(phydev))
1022 		nxp_c45_secy_irq_en(phydev, phy_secy, true);
1023 
1024 	set_bit(idx, priv->macsec->tx_sc_bitmap);
1025 	list_add_tail(&phy_secy->list, &priv->macsec->secy_list);
1026 
1027 	return 0;
1028 }
1029 
nxp_c45_tx_sa_next(struct nxp_c45_secy * phy_secy,struct nxp_c45_sa * next_sa,u8 encoding_sa)1030 static void nxp_c45_tx_sa_next(struct nxp_c45_secy *phy_secy,
1031 			       struct nxp_c45_sa *next_sa, u8 encoding_sa)
1032 {
1033 	struct nxp_c45_sa *sa;
1034 
1035 	sa = nxp_c45_find_sa(&phy_secy->sa_list, TX_SA, encoding_sa);
1036 	if (!IS_ERR(sa)) {
1037 		memcpy(next_sa, sa, sizeof(*sa));
1038 	} else {
1039 		next_sa->is_key_a = true;
1040 		next_sa->an = encoding_sa;
1041 	}
1042 }
1043 
nxp_c45_mdo_upd_secy(struct macsec_context * ctx)1044 static int nxp_c45_mdo_upd_secy(struct macsec_context *ctx)
1045 {
1046 	u8 encoding_sa = ctx->secy->tx_sc.encoding_sa;
1047 	struct phy_device *phydev = ctx->phydev;
1048 	struct nxp_c45_phy *priv = phydev->priv;
1049 	struct nxp_c45_secy *phy_secy;
1050 	struct nxp_c45_sa next_sa;
1051 	bool can_rx_sc0_impl;
1052 
1053 	phydev_dbg(phydev, "update SecY SCI %016llx\n",
1054 		   sci_to_cpu(ctx->secy->sci));
1055 
1056 	phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
1057 	if (IS_ERR(phy_secy))
1058 		return PTR_ERR(phy_secy);
1059 
1060 	if (!nxp_c45_mac_addr_free(ctx))
1061 		return -EBUSY;
1062 
1063 	/* If the point to point mode should be enabled, we should have only
1064 	 * one SecY added, respectively the updated one.
1065 	 */
1066 	can_rx_sc0_impl = list_count_nodes(&priv->macsec->secy_list) == 1;
1067 	if (!nxp_c45_secy_valid(phy_secy, can_rx_sc0_impl))
1068 		return -EINVAL;
1069 	phy_secy->rx_sc0_impl = nxp_c45_rx_sc0_impl(phy_secy);
1070 
1071 	nxp_c45_select_secy(phydev, phy_secy->secy_id);
1072 	nxp_c45_tx_sc_set_flt(phydev, phy_secy);
1073 	nxp_c45_tx_sc_update(phydev, phy_secy);
1074 	nxp_c45_tx_sa_next(phy_secy, &next_sa, encoding_sa);
1075 	nxp_c45_tx_sa_update(phydev, &next_sa, ctx->secy->operational);
1076 
1077 	nxp_c45_set_rx_sc0_impl(phydev, phy_secy->rx_sc0_impl);
1078 	if (phy_secy->rx_sc)
1079 		nxp_c45_rx_sc_update(phydev, phy_secy);
1080 
1081 	return 0;
1082 }
1083 
nxp_c45_mdo_del_secy(struct macsec_context * ctx)1084 static int nxp_c45_mdo_del_secy(struct macsec_context *ctx)
1085 {
1086 	u8 encoding_sa = ctx->secy->tx_sc.encoding_sa;
1087 	struct phy_device *phydev = ctx->phydev;
1088 	struct nxp_c45_phy *priv = phydev->priv;
1089 	struct nxp_c45_secy *phy_secy;
1090 	struct nxp_c45_sa next_sa;
1091 
1092 	phydev_dbg(phydev, "delete SecY SCI %016llx\n",
1093 		   sci_to_cpu(ctx->secy->sci));
1094 
1095 	phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
1096 	if (IS_ERR(phy_secy))
1097 		return PTR_ERR(phy_secy);
1098 	nxp_c45_select_secy(phydev, phy_secy->secy_id);
1099 
1100 	nxp_c45_mdo_dev_stop(ctx);
1101 	nxp_c45_tx_sa_next(phy_secy, &next_sa, encoding_sa);
1102 	nxp_c45_tx_sa_update(phydev, &next_sa, false);
1103 	nxp_c45_tx_sc_clear_stats(phydev, phy_secy);
1104 	if (phy_secy->rx_sc)
1105 		nxp_c45_rx_sc_del(phydev, phy_secy);
1106 
1107 	nxp_c45_sa_list_free(&phy_secy->sa_list);
1108 	if (phy_interrupt_is_valid(phydev))
1109 		nxp_c45_secy_irq_en(phydev, phy_secy, false);
1110 
1111 	clear_bit(phy_secy->secy_id, priv->macsec->tx_sc_bitmap);
1112 	nxp_c45_secy_free(phy_secy);
1113 
1114 	if (list_empty(&priv->macsec->secy_list))
1115 		nxp_c45_clear_global_stats(phydev);
1116 
1117 	return 0;
1118 }
1119 
nxp_c45_mdo_add_rxsc(struct macsec_context * ctx)1120 static int nxp_c45_mdo_add_rxsc(struct macsec_context *ctx)
1121 {
1122 	struct phy_device *phydev = ctx->phydev;
1123 	struct nxp_c45_phy *priv = phydev->priv;
1124 	struct nxp_c45_secy *phy_secy;
1125 
1126 	phydev_dbg(phydev, "add RX SC SCI %016llx %s\n",
1127 		   sci_to_cpu(ctx->rx_sc->sci),
1128 		   ctx->rx_sc->active ? "enabled" : "disabled");
1129 
1130 	phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
1131 	if (IS_ERR(phy_secy))
1132 		return PTR_ERR(phy_secy);
1133 
1134 	if (phy_secy->rx_sc)
1135 		return -ENOSPC;
1136 
1137 	if (phy_secy->secy->tx_sc.end_station &&
1138 	    !nxp_c45_port_is_1(ctx->rx_sc->sci))
1139 		return -EINVAL;
1140 
1141 	phy_secy->rx_sc = ctx->rx_sc;
1142 
1143 	nxp_c45_select_secy(phydev, phy_secy->secy_id);
1144 	nxp_c45_set_sci(phydev, MACSEC_RXSC_SCI_1H, ctx->rx_sc->sci);
1145 	nxp_c45_rx_sc_update(phydev, phy_secy);
1146 
1147 	return 0;
1148 }
1149 
nxp_c45_mdo_upd_rxsc(struct macsec_context * ctx)1150 static int nxp_c45_mdo_upd_rxsc(struct macsec_context *ctx)
1151 {
1152 	struct phy_device *phydev = ctx->phydev;
1153 	struct nxp_c45_phy *priv = phydev->priv;
1154 	struct nxp_c45_secy *phy_secy;
1155 
1156 	phydev_dbg(phydev, "update RX SC SCI %016llx %s\n",
1157 		   sci_to_cpu(ctx->rx_sc->sci),
1158 		   ctx->rx_sc->active ? "enabled" : "disabled");
1159 
1160 	phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
1161 	if (IS_ERR(phy_secy))
1162 		return PTR_ERR(phy_secy);
1163 
1164 	nxp_c45_select_secy(phydev, phy_secy->secy_id);
1165 	nxp_c45_rx_sc_update(phydev, phy_secy);
1166 
1167 	return 0;
1168 }
1169 
nxp_c45_mdo_del_rxsc(struct macsec_context * ctx)1170 static int nxp_c45_mdo_del_rxsc(struct macsec_context *ctx)
1171 {
1172 	struct phy_device *phydev = ctx->phydev;
1173 	struct nxp_c45_phy *priv = phydev->priv;
1174 	struct nxp_c45_secy *phy_secy;
1175 
1176 	phydev_dbg(phydev, "delete RX SC SCI %016llx %s\n",
1177 		   sci_to_cpu(ctx->rx_sc->sci),
1178 		   ctx->rx_sc->active ? "enabled" : "disabled");
1179 
1180 	phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
1181 	if (IS_ERR(phy_secy))
1182 		return PTR_ERR(phy_secy);
1183 
1184 	nxp_c45_select_secy(phydev, phy_secy->secy_id);
1185 	nxp_c45_rx_sc_del(phydev, phy_secy);
1186 	phy_secy->rx_sc = NULL;
1187 
1188 	return 0;
1189 }
1190 
nxp_c45_mdo_add_rxsa(struct macsec_context * ctx)1191 static int nxp_c45_mdo_add_rxsa(struct macsec_context *ctx)
1192 {
1193 	struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
1194 	struct phy_device *phydev = ctx->phydev;
1195 	struct nxp_c45_phy *priv = phydev->priv;
1196 	struct nxp_c45_secy *phy_secy;
1197 	u8 an = ctx->sa.assoc_num;
1198 	struct nxp_c45_sa *sa;
1199 
1200 	phydev_dbg(phydev, "add RX SA %u %s to RX SC SCI %016llx\n",
1201 		   an, rx_sa->active ? "enabled" : "disabled",
1202 		   sci_to_cpu(rx_sa->sc->sci));
1203 
1204 	phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
1205 	if (IS_ERR(phy_secy))
1206 		return PTR_ERR(phy_secy);
1207 
1208 	sa = nxp_c45_sa_alloc(&phy_secy->sa_list, rx_sa, RX_SA, an);
1209 	if (IS_ERR(sa))
1210 		return PTR_ERR(sa);
1211 
1212 	nxp_c45_select_secy(phydev, phy_secy->secy_id);
1213 	nxp_c45_sa_set_pn(phydev, sa, rx_sa->next_pn,
1214 			  ctx->secy->replay_window);
1215 	nxp_c45_sa_set_key(ctx, sa->regs, rx_sa->key.salt.bytes, rx_sa->ssci);
1216 	nxp_c45_rx_sa_update(phydev, sa, rx_sa->active);
1217 
1218 	return 0;
1219 }
1220 
nxp_c45_mdo_upd_rxsa(struct macsec_context * ctx)1221 static int nxp_c45_mdo_upd_rxsa(struct macsec_context *ctx)
1222 {
1223 	struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
1224 	struct phy_device *phydev = ctx->phydev;
1225 	struct nxp_c45_phy *priv = phydev->priv;
1226 	struct nxp_c45_secy *phy_secy;
1227 	u8 an = ctx->sa.assoc_num;
1228 	struct nxp_c45_sa *sa;
1229 
1230 	phydev_dbg(phydev, "update RX SA %u %s to RX SC SCI %016llx\n",
1231 		   an, rx_sa->active ? "enabled" : "disabled",
1232 		   sci_to_cpu(rx_sa->sc->sci));
1233 
1234 	phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
1235 	if (IS_ERR(phy_secy))
1236 		return PTR_ERR(phy_secy);
1237 
1238 	sa = nxp_c45_find_sa(&phy_secy->sa_list, RX_SA, an);
1239 	if (IS_ERR(sa))
1240 		return PTR_ERR(sa);
1241 
1242 	nxp_c45_select_secy(phydev, phy_secy->secy_id);
1243 	if (ctx->sa.update_pn)
1244 		nxp_c45_sa_set_pn(phydev, sa, rx_sa->next_pn,
1245 				  ctx->secy->replay_window);
1246 	nxp_c45_rx_sa_update(phydev, sa, rx_sa->active);
1247 
1248 	return 0;
1249 }
1250 
nxp_c45_mdo_del_rxsa(struct macsec_context * ctx)1251 static int nxp_c45_mdo_del_rxsa(struct macsec_context *ctx)
1252 {
1253 	struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
1254 	struct phy_device *phydev = ctx->phydev;
1255 	struct nxp_c45_phy *priv = phydev->priv;
1256 	struct nxp_c45_secy *phy_secy;
1257 	u8 an = ctx->sa.assoc_num;
1258 	struct nxp_c45_sa *sa;
1259 
1260 	phydev_dbg(phydev, "delete RX SA %u %s to RX SC SCI %016llx\n",
1261 		   an, rx_sa->active ? "enabled" : "disabled",
1262 		   sci_to_cpu(rx_sa->sc->sci));
1263 
1264 	phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
1265 	if (IS_ERR(phy_secy))
1266 		return PTR_ERR(phy_secy);
1267 
1268 	sa = nxp_c45_find_sa(&phy_secy->sa_list, RX_SA, an);
1269 	if (IS_ERR(sa))
1270 		return PTR_ERR(sa);
1271 
1272 	nxp_c45_select_secy(phydev, phy_secy->secy_id);
1273 	nxp_c45_rx_sa_update(phydev, sa, false);
1274 	nxp_c45_rx_sa_clear_stats(phydev, sa);
1275 
1276 	nxp_c45_sa_free(sa);
1277 
1278 	return 0;
1279 }
1280 
nxp_c45_mdo_add_txsa(struct macsec_context * ctx)1281 static int nxp_c45_mdo_add_txsa(struct macsec_context *ctx)
1282 {
1283 	struct macsec_tx_sa *tx_sa = ctx->sa.tx_sa;
1284 	struct phy_device *phydev = ctx->phydev;
1285 	struct nxp_c45_phy *priv = phydev->priv;
1286 	struct nxp_c45_secy *phy_secy;
1287 	u8 an = ctx->sa.assoc_num;
1288 	struct nxp_c45_sa *sa;
1289 
1290 	phydev_dbg(phydev, "add TX SA %u %s to TX SC %016llx\n",
1291 		   an, ctx->sa.tx_sa->active ? "enabled" : "disabled",
1292 		   sci_to_cpu(ctx->secy->sci));
1293 
1294 	phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
1295 	if (IS_ERR(phy_secy))
1296 		return PTR_ERR(phy_secy);
1297 
1298 	sa = nxp_c45_sa_alloc(&phy_secy->sa_list, tx_sa, TX_SA, an);
1299 	if (IS_ERR(sa))
1300 		return PTR_ERR(sa);
1301 
1302 	nxp_c45_select_secy(phydev, phy_secy->secy_id);
1303 	nxp_c45_sa_set_pn(phydev, sa, tx_sa->next_pn, 0);
1304 	nxp_c45_sa_set_key(ctx, sa->regs, tx_sa->key.salt.bytes, tx_sa->ssci);
1305 	if (ctx->secy->tx_sc.encoding_sa == sa->an)
1306 		nxp_c45_tx_sa_update(phydev, sa, tx_sa->active);
1307 
1308 	return 0;
1309 }
1310 
nxp_c45_mdo_upd_txsa(struct macsec_context * ctx)1311 static int nxp_c45_mdo_upd_txsa(struct macsec_context *ctx)
1312 {
1313 	struct macsec_tx_sa *tx_sa = ctx->sa.tx_sa;
1314 	struct phy_device *phydev = ctx->phydev;
1315 	struct nxp_c45_phy *priv = phydev->priv;
1316 	struct nxp_c45_secy *phy_secy;
1317 	u8 an = ctx->sa.assoc_num;
1318 	struct nxp_c45_sa *sa;
1319 
1320 	phydev_dbg(phydev, "update TX SA %u %s to TX SC %016llx\n",
1321 		   an, ctx->sa.tx_sa->active ? "enabled" : "disabled",
1322 		   sci_to_cpu(ctx->secy->sci));
1323 
1324 	phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
1325 	if (IS_ERR(phy_secy))
1326 		return PTR_ERR(phy_secy);
1327 
1328 	sa = nxp_c45_find_sa(&phy_secy->sa_list, TX_SA, an);
1329 	if (IS_ERR(sa))
1330 		return PTR_ERR(sa);
1331 
1332 	nxp_c45_select_secy(phydev, phy_secy->secy_id);
1333 	if (ctx->sa.update_pn)
1334 		nxp_c45_sa_set_pn(phydev, sa, tx_sa->next_pn, 0);
1335 	if (ctx->secy->tx_sc.encoding_sa == sa->an)
1336 		nxp_c45_tx_sa_update(phydev, sa, tx_sa->active);
1337 
1338 	return 0;
1339 }
1340 
nxp_c45_mdo_del_txsa(struct macsec_context * ctx)1341 static int nxp_c45_mdo_del_txsa(struct macsec_context *ctx)
1342 {
1343 	struct phy_device *phydev = ctx->phydev;
1344 	struct nxp_c45_phy *priv = phydev->priv;
1345 	struct nxp_c45_secy *phy_secy;
1346 	u8 an = ctx->sa.assoc_num;
1347 	struct nxp_c45_sa *sa;
1348 
1349 	phydev_dbg(phydev, "delete TX SA %u %s to TX SC %016llx\n",
1350 		   an, ctx->sa.tx_sa->active ? "enabled" : "disabled",
1351 		   sci_to_cpu(ctx->secy->sci));
1352 
1353 	phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
1354 	if (IS_ERR(phy_secy))
1355 		return PTR_ERR(phy_secy);
1356 
1357 	sa = nxp_c45_find_sa(&phy_secy->sa_list, TX_SA, an);
1358 	if (IS_ERR(sa))
1359 		return PTR_ERR(sa);
1360 
1361 	nxp_c45_select_secy(phydev, phy_secy->secy_id);
1362 	if (ctx->secy->tx_sc.encoding_sa == sa->an)
1363 		nxp_c45_tx_sa_update(phydev, sa, false);
1364 	nxp_c45_tx_sa_clear_stats(phydev, sa);
1365 
1366 	nxp_c45_sa_free(sa);
1367 
1368 	return 0;
1369 }
1370 
nxp_c45_mdo_get_dev_stats(struct macsec_context * ctx)1371 static int nxp_c45_mdo_get_dev_stats(struct macsec_context *ctx)
1372 {
1373 	struct phy_device *phydev = ctx->phydev;
1374 	struct nxp_c45_phy *priv = phydev->priv;
1375 	struct macsec_dev_stats  *dev_stats;
1376 	struct nxp_c45_secy *phy_secy;
1377 
1378 	phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
1379 	if (IS_ERR(phy_secy))
1380 		return PTR_ERR(phy_secy);
1381 
1382 	dev_stats = ctx->stats.dev_stats;
1383 	nxp_c45_select_secy(phydev, phy_secy->secy_id);
1384 
1385 	nxp_c45_macsec_read32_64(phydev, MACSEC_OPUS,
1386 				 &dev_stats->OutPktsUntagged);
1387 	nxp_c45_macsec_read32_64(phydev, MACSEC_OPTLS,
1388 				 &dev_stats->OutPktsTooLong);
1389 	nxp_c45_macsec_read32_64(phydev, MACSEC_INPBTS,
1390 				 &dev_stats->InPktsBadTag);
1391 
1392 	if (phy_secy->secy->validate_frames == MACSEC_VALIDATE_STRICT)
1393 		nxp_c45_macsec_read32_64(phydev, MACSEC_INPWTS,
1394 					 &dev_stats->InPktsNoTag);
1395 	else
1396 		nxp_c45_macsec_read32_64(phydev, MACSEC_INPWTS,
1397 					 &dev_stats->InPktsUntagged);
1398 
1399 	if (phy_secy->secy->validate_frames == MACSEC_VALIDATE_STRICT)
1400 		nxp_c45_macsec_read32_64(phydev, MACSEC_IPSNFS,
1401 					 &dev_stats->InPktsNoSCI);
1402 	else
1403 		nxp_c45_macsec_read32_64(phydev, MACSEC_IPSNFS,
1404 					 &dev_stats->InPktsUnknownSCI);
1405 
1406 	/* Always 0. */
1407 	dev_stats->InPktsOverrun = 0;
1408 
1409 	return 0;
1410 }
1411 
nxp_c45_mdo_get_tx_sc_stats(struct macsec_context * ctx)1412 static int nxp_c45_mdo_get_tx_sc_stats(struct macsec_context *ctx)
1413 {
1414 	struct phy_device *phydev = ctx->phydev;
1415 	struct nxp_c45_phy *priv = phydev->priv;
1416 	struct macsec_tx_sa_stats tx_sa_stats;
1417 	struct macsec_tx_sc_stats *stats;
1418 	struct nxp_c45_secy *phy_secy;
1419 	struct nxp_c45_sa *pos, *tmp;
1420 
1421 	phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
1422 	if (IS_ERR(phy_secy))
1423 		return PTR_ERR(phy_secy);
1424 
1425 	stats = ctx->stats.tx_sc_stats;
1426 	nxp_c45_select_secy(phydev, phy_secy->secy_id);
1427 
1428 	nxp_c45_macsec_read64(phydev, MACSEC_OOE1HS,
1429 			      &stats->OutOctetsEncrypted);
1430 	nxp_c45_macsec_read64(phydev, MACSEC_OOP1HS,
1431 			      &stats->OutOctetsProtected);
1432 	list_for_each_entry_safe(pos, tmp, &phy_secy->sa_list, list) {
1433 		if (pos->type != TX_SA)
1434 			continue;
1435 
1436 		memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
1437 		nxp_c45_tx_sa_read_stats(phydev, pos, &tx_sa_stats);
1438 
1439 		stats->OutPktsEncrypted += tx_sa_stats.OutPktsEncrypted;
1440 		stats->OutPktsProtected += tx_sa_stats.OutPktsProtected;
1441 	}
1442 
1443 	return 0;
1444 }
1445 
nxp_c45_mdo_get_tx_sa_stats(struct macsec_context * ctx)1446 static int nxp_c45_mdo_get_tx_sa_stats(struct macsec_context *ctx)
1447 {
1448 	struct phy_device *phydev = ctx->phydev;
1449 	struct nxp_c45_phy *priv = phydev->priv;
1450 	struct macsec_tx_sa_stats *stats;
1451 	struct nxp_c45_secy *phy_secy;
1452 	u8 an = ctx->sa.assoc_num;
1453 	struct nxp_c45_sa *sa;
1454 
1455 	phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
1456 	if (IS_ERR(phy_secy))
1457 		return PTR_ERR(phy_secy);
1458 
1459 	sa = nxp_c45_find_sa(&phy_secy->sa_list, TX_SA, an);
1460 	if (IS_ERR(sa))
1461 		return PTR_ERR(sa);
1462 
1463 	stats = ctx->stats.tx_sa_stats;
1464 	nxp_c45_select_secy(phydev, phy_secy->secy_id);
1465 	nxp_c45_tx_sa_read_stats(phydev, sa, stats);
1466 
1467 	return 0;
1468 }
1469 
nxp_c45_mdo_get_rx_sc_stats(struct macsec_context * ctx)1470 static int nxp_c45_mdo_get_rx_sc_stats(struct macsec_context *ctx)
1471 {
1472 	struct phy_device *phydev = ctx->phydev;
1473 	struct nxp_c45_phy *priv = phydev->priv;
1474 	struct macsec_rx_sa_stats rx_sa_stats;
1475 	struct macsec_rx_sc_stats *stats;
1476 	struct nxp_c45_secy *phy_secy;
1477 	struct nxp_c45_sa *pos, *tmp;
1478 	u32 reg = 0;
1479 	int i;
1480 
1481 	phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
1482 	if (IS_ERR(phy_secy))
1483 		return PTR_ERR(phy_secy);
1484 
1485 	if (phy_secy->rx_sc != ctx->rx_sc)
1486 		return -EINVAL;
1487 
1488 	stats = ctx->stats.rx_sc_stats;
1489 	nxp_c45_select_secy(phydev, phy_secy->secy_id);
1490 
1491 	list_for_each_entry_safe(pos, tmp, &phy_secy->sa_list, list) {
1492 		if (pos->type != RX_SA)
1493 			continue;
1494 
1495 		memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
1496 		nxp_c45_rx_sa_read_stats(phydev, pos, &rx_sa_stats);
1497 
1498 		stats->InPktsInvalid += rx_sa_stats.InPktsInvalid;
1499 		stats->InPktsNotValid += rx_sa_stats.InPktsNotValid;
1500 		stats->InPktsOK += rx_sa_stats.InPktsOK;
1501 	}
1502 
1503 	for (i = 0; i < MACSEC_NUM_AN; i++) {
1504 		nxp_c45_macsec_read(phydev, MACSEC_RXAN0INUSS + i * 4, &reg);
1505 		stats->InPktsNotUsingSA += reg;
1506 		nxp_c45_macsec_read(phydev, MACSEC_RXAN0IPUSS + i * 4, &reg);
1507 		stats->InPktsUnusedSA += reg;
1508 	}
1509 
1510 	nxp_c45_macsec_read64(phydev, MACSEC_INOD1HS,
1511 			      &stats->InOctetsDecrypted);
1512 	nxp_c45_macsec_read64(phydev, MACSEC_INOV1HS,
1513 			      &stats->InOctetsValidated);
1514 
1515 	nxp_c45_macsec_read32_64(phydev, MACSEC_RXSCIPDS,
1516 				 &stats->InPktsDelayed);
1517 	nxp_c45_macsec_read32_64(phydev, MACSEC_RXSCIPLS,
1518 				 &stats->InPktsLate);
1519 	nxp_c45_macsec_read32_64(phydev, MACSEC_RXSCIPUS,
1520 				 &stats->InPktsUnchecked);
1521 
1522 	return 0;
1523 }
1524 
nxp_c45_mdo_get_rx_sa_stats(struct macsec_context * ctx)1525 static int nxp_c45_mdo_get_rx_sa_stats(struct macsec_context *ctx)
1526 {
1527 	struct phy_device *phydev = ctx->phydev;
1528 	struct nxp_c45_phy *priv = phydev->priv;
1529 	struct macsec_rx_sa_stats *stats;
1530 	struct nxp_c45_secy *phy_secy;
1531 	u8 an = ctx->sa.assoc_num;
1532 	struct nxp_c45_sa *sa;
1533 
1534 	phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
1535 	if (IS_ERR(phy_secy))
1536 		return PTR_ERR(phy_secy);
1537 
1538 	sa = nxp_c45_find_sa(&phy_secy->sa_list, RX_SA, an);
1539 	if (IS_ERR(sa))
1540 		return PTR_ERR(sa);
1541 
1542 	stats = ctx->stats.rx_sa_stats;
1543 	nxp_c45_select_secy(phydev, phy_secy->secy_id);
1544 
1545 	nxp_c45_rx_sa_read_stats(phydev, sa, stats);
1546 	nxp_c45_macsec_read(phydev, MACSEC_RXAN0INUSS + an * 4,
1547 			    &stats->InPktsNotUsingSA);
1548 	nxp_c45_macsec_read(phydev, MACSEC_RXAN0IPUSS + an * 4,
1549 			    &stats->InPktsUnusedSA);
1550 
1551 	return 0;
1552 }
1553 
1554 struct tja11xx_tlv_header {
1555 	struct ethhdr eth;
1556 	u8 subtype;
1557 	u8 len;
1558 	u8 payload[28];
1559 };
1560 
nxp_c45_mdo_insert_tx_tag(struct phy_device * phydev,struct sk_buff * skb)1561 static int nxp_c45_mdo_insert_tx_tag(struct phy_device *phydev,
1562 				     struct sk_buff *skb)
1563 {
1564 	struct tja11xx_tlv_header *tlv;
1565 	struct ethhdr *eth;
1566 
1567 	eth = eth_hdr(skb);
1568 	tlv = skb_push(skb, TJA11XX_TLV_TX_NEEDED_HEADROOM);
1569 	memmove(tlv, eth, sizeof(*eth));
1570 	skb_reset_mac_header(skb);
1571 	tlv->eth.h_proto = htons(ETH_P_TJA11XX_TLV);
1572 	tlv->subtype = 1;
1573 	tlv->len = sizeof(tlv->payload);
1574 	memset(tlv->payload, 0, sizeof(tlv->payload));
1575 
1576 	return 0;
1577 }
1578 
1579 static const struct macsec_ops nxp_c45_macsec_ops = {
1580 	.mdo_dev_open = nxp_c45_mdo_dev_open,
1581 	.mdo_dev_stop = nxp_c45_mdo_dev_stop,
1582 	.mdo_add_secy = nxp_c45_mdo_add_secy,
1583 	.mdo_upd_secy = nxp_c45_mdo_upd_secy,
1584 	.mdo_del_secy = nxp_c45_mdo_del_secy,
1585 	.mdo_add_rxsc = nxp_c45_mdo_add_rxsc,
1586 	.mdo_upd_rxsc = nxp_c45_mdo_upd_rxsc,
1587 	.mdo_del_rxsc = nxp_c45_mdo_del_rxsc,
1588 	.mdo_add_rxsa = nxp_c45_mdo_add_rxsa,
1589 	.mdo_upd_rxsa = nxp_c45_mdo_upd_rxsa,
1590 	.mdo_del_rxsa = nxp_c45_mdo_del_rxsa,
1591 	.mdo_add_txsa = nxp_c45_mdo_add_txsa,
1592 	.mdo_upd_txsa = nxp_c45_mdo_upd_txsa,
1593 	.mdo_del_txsa = nxp_c45_mdo_del_txsa,
1594 	.mdo_get_dev_stats = nxp_c45_mdo_get_dev_stats,
1595 	.mdo_get_tx_sc_stats = nxp_c45_mdo_get_tx_sc_stats,
1596 	.mdo_get_tx_sa_stats = nxp_c45_mdo_get_tx_sa_stats,
1597 	.mdo_get_rx_sc_stats = nxp_c45_mdo_get_rx_sc_stats,
1598 	.mdo_get_rx_sa_stats = nxp_c45_mdo_get_rx_sa_stats,
1599 	.mdo_insert_tx_tag = nxp_c45_mdo_insert_tx_tag,
1600 	.needed_headroom = TJA11XX_TLV_TX_NEEDED_HEADROOM,
1601 	.needed_tailroom = TJA11XX_TLV_NEEDED_TAILROOM,
1602 };
1603 
nxp_c45_macsec_config_init(struct phy_device * phydev)1604 int nxp_c45_macsec_config_init(struct phy_device *phydev)
1605 {
1606 	struct nxp_c45_phy *priv = phydev->priv;
1607 	int ret;
1608 
1609 	if (!priv->macsec)
1610 		return 0;
1611 
1612 	ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
1613 			       MACSEC_EN | ADAPTER_EN);
1614 	if (ret)
1615 		return ret;
1616 
1617 	ret = nxp_c45_macsec_write(phydev, ADPTR_CNTRL, ADPTR_CNTRL_CONFIG_EN |
1618 				   ADPTR_CNTRL_ADPTR_EN);
1619 	if (ret)
1620 		return ret;
1621 
1622 	ret = nxp_c45_macsec_write(phydev, ADPTR_TX_TAG_CNTRL,
1623 				   ADPTR_TX_TAG_CNTRL_ENA);
1624 	if (ret)
1625 		return ret;
1626 
1627 	ret = nxp_c45_macsec_write(phydev, ADPTR_CNTRL, ADPTR_CNTRL_ADPTR_EN);
1628 	if (ret)
1629 		return ret;
1630 
1631 	ret = nxp_c45_macsec_write(phydev, MACSEC_TPNET, PN_WRAP_THRESHOLD);
1632 	if (ret)
1633 		return ret;
1634 
1635 	/* Set MKA filter. */
1636 	ret = nxp_c45_macsec_write(phydev, MACSEC_UPFR0D2, ETH_P_PAE);
1637 	if (ret)
1638 		return ret;
1639 
1640 	ret = nxp_c45_macsec_write(phydev, MACSEC_UPFR0M1, MACSEC_OVP);
1641 	if (ret)
1642 		return ret;
1643 
1644 	ret = nxp_c45_macsec_write(phydev, MACSEC_UPFR0M2, ETYPE_MASK);
1645 	if (ret)
1646 		return ret;
1647 
1648 	ret = nxp_c45_macsec_write(phydev, MACSEC_UPFR0R, MACSEC_UPFR_EN);
1649 
1650 	return ret;
1651 }
1652 
nxp_c45_macsec_probe(struct phy_device * phydev)1653 int nxp_c45_macsec_probe(struct phy_device *phydev)
1654 {
1655 	struct nxp_c45_phy *priv = phydev->priv;
1656 	struct device *dev = &phydev->mdio.dev;
1657 
1658 	priv->macsec = devm_kzalloc(dev, sizeof(*priv->macsec), GFP_KERNEL);
1659 	if (!priv->macsec)
1660 		return -ENOMEM;
1661 
1662 	INIT_LIST_HEAD(&priv->macsec->secy_list);
1663 	phydev->macsec_ops = &nxp_c45_macsec_ops;
1664 
1665 	return 0;
1666 }
1667 
nxp_c45_macsec_remove(struct phy_device * phydev)1668 void nxp_c45_macsec_remove(struct phy_device *phydev)
1669 {
1670 	struct nxp_c45_phy *priv = phydev->priv;
1671 	struct nxp_c45_secy *secy_p, *secy_t;
1672 	struct nxp_c45_sa *sa_p, *sa_t;
1673 	struct list_head *secy_list;
1674 
1675 	if (!priv->macsec)
1676 		return;
1677 
1678 	secy_list = &priv->macsec->secy_list;
1679 	nxp_c45_macsec_en(phydev, false);
1680 
1681 	list_for_each_entry_safe(secy_p, secy_t, secy_list, list) {
1682 		list_for_each_entry_safe(sa_p, sa_t, &secy_p->sa_list, list)
1683 			nxp_c45_sa_free(sa_p);
1684 		nxp_c45_secy_free(secy_p);
1685 	}
1686 }
1687 
nxp_c45_handle_macsec_interrupt(struct phy_device * phydev,irqreturn_t * ret)1688 void nxp_c45_handle_macsec_interrupt(struct phy_device *phydev,
1689 				     irqreturn_t *ret)
1690 {
1691 	struct nxp_c45_phy *priv = phydev->priv;
1692 	struct nxp_c45_secy *secy;
1693 	struct nxp_c45_sa *sa;
1694 	u8 encoding_sa;
1695 	int secy_id;
1696 	u32 reg = 0;
1697 
1698 	if (!priv->macsec)
1699 		return;
1700 
1701 	do {
1702 		nxp_c45_macsec_read(phydev, MACSEC_EVR, &reg);
1703 		if (!reg)
1704 			return;
1705 
1706 		secy_id = MACSEC_REG_SIZE - ffs(reg);
1707 		secy = nxp_c45_find_secy_by_id(&priv->macsec->secy_list,
1708 					       secy_id);
1709 		if (IS_ERR(secy)) {
1710 			WARN_ON(1);
1711 			goto macsec_ack_irq;
1712 		}
1713 
1714 		encoding_sa = secy->secy->tx_sc.encoding_sa;
1715 		phydev_dbg(phydev, "pn_wrapped: TX SC %d, encoding_sa %u\n",
1716 			   secy->secy_id, encoding_sa);
1717 
1718 		sa = nxp_c45_find_sa(&secy->sa_list, TX_SA, encoding_sa);
1719 		if (!IS_ERR(sa))
1720 			macsec_pn_wrapped(secy->secy, sa->sa);
1721 		else
1722 			WARN_ON(1);
1723 
1724 macsec_ack_irq:
1725 		nxp_c45_macsec_write(phydev, MACSEC_EVR,
1726 				     TX_SC_BIT(secy_id));
1727 		*ret = IRQ_HANDLED;
1728 	} while (reg);
1729 }
1730