xref: /linux/drivers/net/phy/nxp-c45-tja11xx.c (revision 8f7aa3d3c7323f4ca2768a9e74ebbe359c4f8f88)
1 // SPDX-License-Identifier: GPL-2.0
2 /* NXP C45 PHY driver
3  * Copyright 2021-2025 NXP
4  * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/ethtool.h>
9 #include <linux/ethtool_netlink.h>
10 #include <linux/kernel.h>
11 #include <linux/mii.h>
12 #include <linux/module.h>
13 #include <linux/of.h>
14 #include <linux/phy.h>
15 #include <linux/processor.h>
16 #include <linux/property.h>
17 #include <linux/ptp_classify.h>
18 #include <linux/net_tstamp.h>
19 
20 #include "nxp-c45-tja11xx.h"
21 
22 /* Same id: TJA1103, TJA1104 */
23 #define PHY_ID_TJA_1103			0x001BB010
24 /* Same id: TJA1120, TJA1121 */
25 #define PHY_ID_TJA_1120			0x001BB031
26 
27 #define VEND1_DEVICE_ID3		0x0004
28 #define TJA1120_DEV_ID3_SILICON_VERSION	GENMASK(15, 12)
29 #define TJA1120_DEV_ID3_SAMPLE_TYPE	GENMASK(11, 8)
30 #define DEVICE_ID3_SAMPLE_TYPE_R	0x9
31 
32 #define VEND1_DEVICE_CONTROL		0x0040
33 #define DEVICE_CONTROL_RESET		BIT(15)
34 #define DEVICE_CONTROL_CONFIG_GLOBAL_EN	BIT(14)
35 #define DEVICE_CONTROL_CONFIG_ALL_EN	BIT(13)
36 
37 #define VEND1_DEVICE_CONFIG		0x0048
38 
39 #define TJA1120_VEND1_EXT_TS_MODE	0x1012
40 
41 #define TJA1120_GLOBAL_INFRA_IRQ_ACK	0x2C08
42 #define TJA1120_GLOBAL_INFRA_IRQ_EN	0x2C0A
43 #define TJA1120_GLOBAL_INFRA_IRQ_STATUS	0x2C0C
44 #define TJA1120_DEV_BOOT_DONE		BIT(1)
45 
46 #define TJA1120_VEND1_PTP_TRIG_DATA_S	0x1070
47 
48 #define TJA1120_EGRESS_TS_DATA_S	0x9060
49 #define TJA1120_EGRESS_TS_END		0x9067
50 #define TJA1120_TS_VALID		BIT(0)
51 #define TJA1120_MORE_TS			BIT(15)
52 
53 #define VEND1_PHY_IRQ_ACK		0x80A0
54 #define VEND1_PHY_IRQ_EN		0x80A1
55 #define VEND1_PHY_IRQ_STATUS		0x80A2
56 #define PHY_IRQ_LINK_EVENT		BIT(1)
57 
58 #define VEND1_ALWAYS_ACCESSIBLE		0x801F
59 #define FUSA_PASS			BIT(4)
60 
61 #define VEND1_PHY_CONTROL		0x8100
62 #define PHY_CONFIG_EN			BIT(14)
63 #define PHY_START_OP			BIT(0)
64 
65 #define VEND1_PHY_CONFIG		0x8108
66 #define PHY_CONFIG_AUTO			BIT(0)
67 
68 #define TJA1120_EPHY_RESETS		0x810A
69 #define EPHY_PCS_RESET			BIT(3)
70 
71 #define VEND1_SIGNAL_QUALITY		0x8320
72 #define SQI_VALID			BIT(14)
73 #define SQI_MASK			GENMASK(2, 0)
74 #define MAX_SQI				SQI_MASK
75 
76 #define CABLE_TEST_ENABLE		BIT(15)
77 #define CABLE_TEST_START		BIT(14)
78 #define CABLE_TEST_OK			0x00
79 #define CABLE_TEST_SHORTED		0x01
80 #define CABLE_TEST_OPEN			0x02
81 #define CABLE_TEST_UNKNOWN		0x07
82 
83 #define VEND1_PORT_CONTROL		0x8040
84 #define PORT_CONTROL_EN			BIT(14)
85 
86 #define VEND1_PORT_ABILITIES		0x8046
87 #define MACSEC_ABILITY			BIT(5)
88 #define PTP_ABILITY			BIT(3)
89 
90 #define VEND1_PORT_FUNC_IRQ_EN		0x807A
91 #define MACSEC_IRQS			BIT(5)
92 #define PTP_IRQS			BIT(3)
93 
94 #define VEND1_PTP_IRQ_ACK		0x9008
95 #define EGR_TS_IRQ			BIT(1)
96 
97 #define VEND1_PORT_INFRA_CONTROL	0xAC00
98 #define PORT_INFRA_CONTROL_EN		BIT(14)
99 
100 #define VEND1_RXID			0xAFCC
101 #define VEND1_TXID			0xAFCD
102 #define ID_ENABLE			BIT(15)
103 
104 #define VEND1_ABILITIES			0xAFC4
105 #define RGMII_ID_ABILITY		BIT(15)
106 #define RGMII_ABILITY			BIT(14)
107 #define RMII_ABILITY			BIT(10)
108 #define REVMII_ABILITY			BIT(9)
109 #define MII_ABILITY			BIT(8)
110 #define SGMII_ABILITY			BIT(0)
111 
112 #define VEND1_MII_BASIC_CONFIG		0xAFC6
113 #define MII_BASIC_CONFIG_REV		BIT(4)
114 #define MII_BASIC_CONFIG_SGMII		0x9
115 #define MII_BASIC_CONFIG_RGMII		0x7
116 #define MII_BASIC_CONFIG_RMII		0x5
117 #define MII_BASIC_CONFIG_MII		0x4
118 
119 #define VEND1_SGMII_BASIC_CONTROL	0xB000
120 #define SGMII_LPM			BIT(11)
121 
122 #define VEND1_SYMBOL_ERROR_CNT_XTD	0x8351
123 #define EXTENDED_CNT_EN			BIT(15)
124 #define VEND1_MONITOR_STATUS		0xAC80
125 #define MONITOR_RESET			BIT(15)
126 #define VEND1_MONITOR_CONFIG		0xAC86
127 #define LOST_FRAMES_CNT_EN		BIT(9)
128 #define ALL_FRAMES_CNT_EN		BIT(8)
129 
130 #define VEND1_SYMBOL_ERROR_COUNTER	0x8350
131 #define VEND1_LINK_DROP_COUNTER		0x8352
132 #define VEND1_LINK_LOSSES_AND_FAILURES	0x8353
133 #define VEND1_RX_PREAMBLE_COUNT		0xAFCE
134 #define VEND1_TX_PREAMBLE_COUNT		0xAFCF
135 #define VEND1_RX_IPG_LENGTH		0xAFD0
136 #define VEND1_TX_IPG_LENGTH		0xAFD1
137 #define COUNTER_EN			BIT(15)
138 
139 #define VEND1_PTP_CONFIG		0x1102
140 #define EXT_TRG_EDGE			BIT(1)
141 
142 #define TJA1120_SYNC_TRIG_FILTER	0x1010
143 #define PTP_TRIG_RISE_TS		BIT(3)
144 #define PTP_TRIG_FALLING_TS		BIT(2)
145 
146 #define CLK_RATE_ADJ_LD			BIT(15)
147 #define CLK_RATE_ADJ_DIR		BIT(14)
148 
149 #define VEND1_RX_TS_INSRT_CTRL		0x114D
150 #define TJA1103_RX_TS_INSRT_MODE2	0x02
151 
152 #define TJA1120_RX_TS_INSRT_CTRL	0x9012
153 #define TJA1120_RX_TS_INSRT_EN		BIT(15)
154 #define TJA1120_TS_INSRT_MODE		BIT(4)
155 
156 #define VEND1_EGR_RING_DATA_0		0x114E
157 #define VEND1_EGR_RING_CTRL		0x1154
158 
159 #define RING_DATA_0_TS_VALID		BIT(15)
160 
161 #define RING_DONE			BIT(0)
162 
163 #define TS_SEC_MASK			GENMASK(1, 0)
164 
165 #define PTP_ENABLE			BIT(3)
166 #define PHY_TEST_ENABLE			BIT(0)
167 
168 #define VEND1_PORT_PTP_CONTROL		0x9000
169 #define PORT_PTP_CONTROL_BYPASS		BIT(11)
170 
171 #define PTP_CLK_PERIOD_100BT1		15ULL
172 #define PTP_CLK_PERIOD_1000BT1		8ULL
173 
174 #define EVENT_MSG_FILT_ALL		0x0F
175 #define EVENT_MSG_FILT_NONE		0x00
176 
177 #define VEND1_GPIO_FUNC_CONFIG_BASE	0x2C40
178 #define GPIO_FUNC_EN			BIT(15)
179 #define GPIO_FUNC_PTP			BIT(6)
180 #define GPIO_SIGNAL_PTP_TRIGGER		0x01
181 #define GPIO_SIGNAL_PPS_OUT		0x12
182 #define GPIO_DISABLE			0
183 #define GPIO_PPS_OUT_CFG		(GPIO_FUNC_EN | GPIO_FUNC_PTP | \
184 	GPIO_SIGNAL_PPS_OUT)
185 #define GPIO_EXTTS_OUT_CFG		(GPIO_FUNC_EN | GPIO_FUNC_PTP | \
186 	GPIO_SIGNAL_PTP_TRIGGER)
187 
188 #define RGMII_PERIOD_PS			8000U
189 #define PS_PER_DEGREE			div_u64(RGMII_PERIOD_PS, 360)
190 #define MIN_ID_PS			1644U
191 #define MAX_ID_PS			2260U
192 #define DEFAULT_ID_PS			2000U
193 
194 #define PPM_TO_SUBNS_INC(ppb, ptp_clk_period) div_u64(GENMASK_ULL(31, 0) * \
195 	(ppb) * (ptp_clk_period), NSEC_PER_SEC)
196 
197 #define NXP_C45_SKB_CB(skb)	((struct nxp_c45_skb_cb *)(skb)->cb)
198 
199 #define TJA11XX_REVERSE_MODE		BIT(0)
200 
201 struct nxp_c45_phy;
202 
203 struct nxp_c45_skb_cb {
204 	struct ptp_header *header;
205 	unsigned int type;
206 };
207 
208 #define NXP_C45_REG_FIELD(_reg, _devad, _offset, _size)	\
209 	((struct nxp_c45_reg_field) {			\
210 		.reg = _reg,				\
211 		.devad =  _devad,			\
212 		.offset = _offset,			\
213 		.size = _size,				\
214 	})
215 
216 struct nxp_c45_reg_field {
217 	u16 reg;
218 	u8 devad;
219 	u8 offset;
220 	u8 size;
221 };
222 
223 struct nxp_c45_hwts {
224 	u32	nsec;
225 	u32	sec;
226 	u8	domain_number;
227 	u16	sequence_id;
228 	u8	msg_type;
229 };
230 
231 struct nxp_c45_regmap {
232 	/* PTP config regs. */
233 	u16 vend1_ptp_clk_period;
234 	u16 vend1_event_msg_filt;
235 
236 	/* LTC bits and regs. */
237 	struct nxp_c45_reg_field ltc_read;
238 	struct nxp_c45_reg_field ltc_write;
239 	struct nxp_c45_reg_field ltc_lock_ctrl;
240 	u16 vend1_ltc_wr_nsec_0;
241 	u16 vend1_ltc_wr_nsec_1;
242 	u16 vend1_ltc_wr_sec_0;
243 	u16 vend1_ltc_wr_sec_1;
244 	u16 vend1_ltc_rd_nsec_0;
245 	u16 vend1_ltc_rd_nsec_1;
246 	u16 vend1_ltc_rd_sec_0;
247 	u16 vend1_ltc_rd_sec_1;
248 	u16 vend1_rate_adj_subns_0;
249 	u16 vend1_rate_adj_subns_1;
250 
251 	/* External trigger reg fields. */
252 	struct nxp_c45_reg_field irq_egr_ts_en;
253 	struct nxp_c45_reg_field irq_egr_ts_status;
254 	struct nxp_c45_reg_field domain_number;
255 	struct nxp_c45_reg_field msg_type;
256 	struct nxp_c45_reg_field sequence_id;
257 	struct nxp_c45_reg_field sec_1_0;
258 	struct nxp_c45_reg_field sec_4_2;
259 	struct nxp_c45_reg_field nsec_15_0;
260 	struct nxp_c45_reg_field nsec_29_16;
261 
262 	/* PPS and EXT Trigger bits and regs. */
263 	struct nxp_c45_reg_field pps_enable;
264 	struct nxp_c45_reg_field pps_polarity;
265 	u16 vend1_ext_trg_data_0;
266 	u16 vend1_ext_trg_data_1;
267 	u16 vend1_ext_trg_data_2;
268 	u16 vend1_ext_trg_data_3;
269 	u16 vend1_ext_trg_ctrl;
270 
271 	/* Cable test reg fields. */
272 	u16 cable_test;
273 	struct nxp_c45_reg_field cable_test_valid;
274 	struct nxp_c45_reg_field cable_test_result;
275 };
276 
277 struct nxp_c45_phy_stats {
278 	const char	*name;
279 	const struct nxp_c45_reg_field counter;
280 };
281 
282 struct nxp_c45_phy_data {
283 	const struct nxp_c45_regmap *regmap;
284 	const struct nxp_c45_phy_stats *stats;
285 	int n_stats;
286 	u8 ptp_clk_period;
287 	bool ext_ts_both_edges;
288 	bool ack_ptp_irq;
289 	void (*counters_enable)(struct phy_device *phydev);
290 	bool (*get_egressts)(struct nxp_c45_phy *priv,
291 			     struct nxp_c45_hwts *hwts);
292 	bool (*get_extts)(struct nxp_c45_phy *priv, struct timespec64 *extts);
293 	void (*ptp_init)(struct phy_device *phydev);
294 	void (*ptp_enable)(struct phy_device *phydev, bool enable);
295 	void (*nmi_handler)(struct phy_device *phydev,
296 			    irqreturn_t *irq_status);
297 };
298 
299 static const
300 struct nxp_c45_phy_data *nxp_c45_get_data(struct phy_device *phydev)
301 {
302 	return phydev->drv->driver_data;
303 }
304 
305 static const
306 struct nxp_c45_regmap *nxp_c45_get_regmap(struct phy_device *phydev)
307 {
308 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
309 
310 	return phy_data->regmap;
311 }
312 
313 static int nxp_c45_read_reg_field(struct phy_device *phydev,
314 				  const struct nxp_c45_reg_field *reg_field)
315 {
316 	u16 mask;
317 	int ret;
318 
319 	if (reg_field->size == 0) {
320 		phydev_err(phydev, "Trying to read a reg field of size 0.\n");
321 		return -EINVAL;
322 	}
323 
324 	ret = phy_read_mmd(phydev, reg_field->devad, reg_field->reg);
325 	if (ret < 0)
326 		return ret;
327 
328 	mask = reg_field->size == 1 ? BIT(reg_field->offset) :
329 		GENMASK(reg_field->offset + reg_field->size - 1,
330 			reg_field->offset);
331 	ret &= mask;
332 	ret >>= reg_field->offset;
333 
334 	return ret;
335 }
336 
337 static int nxp_c45_write_reg_field(struct phy_device *phydev,
338 				   const struct nxp_c45_reg_field *reg_field,
339 				   u16 val)
340 {
341 	u16 mask;
342 	u16 set;
343 
344 	if (reg_field->size == 0) {
345 		phydev_err(phydev, "Trying to write a reg field of size 0.\n");
346 		return -EINVAL;
347 	}
348 
349 	mask = reg_field->size == 1 ? BIT(reg_field->offset) :
350 		GENMASK(reg_field->offset + reg_field->size - 1,
351 			reg_field->offset);
352 	set = val << reg_field->offset;
353 
354 	return phy_modify_mmd_changed(phydev, reg_field->devad,
355 				      reg_field->reg, mask, set);
356 }
357 
358 static int nxp_c45_set_reg_field(struct phy_device *phydev,
359 				 const struct nxp_c45_reg_field *reg_field)
360 {
361 	if (reg_field->size != 1) {
362 		phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
363 		return -EINVAL;
364 	}
365 
366 	return nxp_c45_write_reg_field(phydev, reg_field, 1);
367 }
368 
369 static int nxp_c45_clear_reg_field(struct phy_device *phydev,
370 				   const struct nxp_c45_reg_field *reg_field)
371 {
372 	if (reg_field->size != 1) {
373 		phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
374 		return -EINVAL;
375 	}
376 
377 	return nxp_c45_write_reg_field(phydev, reg_field, 0);
378 }
379 
380 static bool nxp_c45_poll_txts(struct phy_device *phydev)
381 {
382 	return phydev->irq <= 0;
383 }
384 
385 static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
386 				   struct timespec64 *ts,
387 				   struct ptp_system_timestamp *sts)
388 {
389 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
390 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
391 
392 	nxp_c45_set_reg_field(priv->phydev, &regmap->ltc_read);
393 	ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
394 				   regmap->vend1_ltc_rd_nsec_0);
395 	ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
396 				    regmap->vend1_ltc_rd_nsec_1) << 16;
397 	ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
398 				  regmap->vend1_ltc_rd_sec_0);
399 	ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
400 				   regmap->vend1_ltc_rd_sec_1) << 16;
401 
402 	return 0;
403 }
404 
405 static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
406 				  struct timespec64 *ts,
407 				  struct ptp_system_timestamp *sts)
408 {
409 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
410 
411 	mutex_lock(&priv->ptp_lock);
412 	_nxp_c45_ptp_gettimex64(ptp, ts, sts);
413 	mutex_unlock(&priv->ptp_lock);
414 
415 	return 0;
416 }
417 
418 static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
419 				  const struct timespec64 *ts)
420 {
421 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
422 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
423 
424 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_0,
425 		      ts->tv_nsec);
426 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_1,
427 		      ts->tv_nsec >> 16);
428 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_0,
429 		      ts->tv_sec);
430 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_1,
431 		      ts->tv_sec >> 16);
432 	nxp_c45_set_reg_field(priv->phydev, &regmap->ltc_write);
433 
434 	return 0;
435 }
436 
437 static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
438 				 const struct timespec64 *ts)
439 {
440 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
441 
442 	mutex_lock(&priv->ptp_lock);
443 	_nxp_c45_ptp_settime64(ptp, ts);
444 	mutex_unlock(&priv->ptp_lock);
445 
446 	return 0;
447 }
448 
449 static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
450 {
451 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
452 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
453 	const struct nxp_c45_regmap *regmap = data->regmap;
454 	s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
455 	u64 subns_inc_val;
456 	bool inc;
457 
458 	mutex_lock(&priv->ptp_lock);
459 	inc = ppb >= 0;
460 	ppb = abs(ppb);
461 
462 	subns_inc_val = PPM_TO_SUBNS_INC(ppb, data->ptp_clk_period);
463 
464 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
465 		      regmap->vend1_rate_adj_subns_0,
466 		      subns_inc_val);
467 	subns_inc_val >>= 16;
468 	subns_inc_val |= CLK_RATE_ADJ_LD;
469 	if (inc)
470 		subns_inc_val |= CLK_RATE_ADJ_DIR;
471 
472 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
473 		      regmap->vend1_rate_adj_subns_1,
474 		      subns_inc_val);
475 	mutex_unlock(&priv->ptp_lock);
476 
477 	return 0;
478 }
479 
480 static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
481 {
482 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
483 	struct timespec64 now, then;
484 
485 	mutex_lock(&priv->ptp_lock);
486 	then = ns_to_timespec64(delta);
487 	_nxp_c45_ptp_gettimex64(ptp, &now, NULL);
488 	now = timespec64_add(now, then);
489 	_nxp_c45_ptp_settime64(ptp, &now);
490 	mutex_unlock(&priv->ptp_lock);
491 
492 	return 0;
493 }
494 
495 static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
496 				   struct nxp_c45_hwts *hwts)
497 {
498 	ts->tv_nsec = hwts->nsec;
499 	if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
500 		ts->tv_sec -= TS_SEC_MASK + 1;
501 	ts->tv_sec &= ~TS_SEC_MASK;
502 	ts->tv_sec |= hwts->sec & TS_SEC_MASK;
503 }
504 
505 static bool nxp_c45_match_ts(struct ptp_header *header,
506 			     struct nxp_c45_hwts *hwts,
507 			     unsigned int type)
508 {
509 	return ntohs(header->sequence_id) == hwts->sequence_id &&
510 	       ptp_get_msgtype(header, type) == hwts->msg_type &&
511 	       header->domain_number  == hwts->domain_number;
512 }
513 
514 static bool nxp_c45_get_extts(struct nxp_c45_phy *priv,
515 			      struct timespec64 *extts)
516 {
517 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
518 
519 	extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
520 				      regmap->vend1_ext_trg_data_0);
521 	extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
522 				       regmap->vend1_ext_trg_data_1) << 16;
523 	extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
524 				     regmap->vend1_ext_trg_data_2);
525 	extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
526 				      regmap->vend1_ext_trg_data_3) << 16;
527 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
528 		      regmap->vend1_ext_trg_ctrl, RING_DONE);
529 
530 	return true;
531 }
532 
533 static bool tja1120_extts_is_valid(struct phy_device *phydev)
534 {
535 	bool valid;
536 	int reg;
537 
538 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
539 			   TJA1120_VEND1_PTP_TRIG_DATA_S);
540 	valid = !!(reg & TJA1120_TS_VALID);
541 
542 	return valid;
543 }
544 
545 static bool tja1120_get_extts(struct nxp_c45_phy *priv,
546 			      struct timespec64 *extts)
547 {
548 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
549 	struct phy_device *phydev = priv->phydev;
550 	bool more_ts;
551 	bool valid;
552 	u16 reg;
553 
554 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
555 			   regmap->vend1_ext_trg_ctrl);
556 	more_ts = !!(reg & TJA1120_MORE_TS);
557 
558 	valid = tja1120_extts_is_valid(phydev);
559 	if (!valid) {
560 		if (!more_ts)
561 			goto tja1120_get_extts_out;
562 
563 		/* Bug workaround for TJA1120 engineering samples: move the new
564 		 * timestamp from the FIFO to the buffer.
565 		 */
566 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
567 			      regmap->vend1_ext_trg_ctrl, RING_DONE);
568 		valid = tja1120_extts_is_valid(phydev);
569 		if (!valid)
570 			goto tja1120_get_extts_out;
571 	}
572 
573 	nxp_c45_get_extts(priv, extts);
574 tja1120_get_extts_out:
575 	return valid;
576 }
577 
578 static void nxp_c45_read_egress_ts(struct nxp_c45_phy *priv,
579 				   struct nxp_c45_hwts *hwts)
580 {
581 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
582 	struct phy_device *phydev = priv->phydev;
583 
584 	hwts->domain_number =
585 		nxp_c45_read_reg_field(phydev, &regmap->domain_number);
586 	hwts->msg_type =
587 		nxp_c45_read_reg_field(phydev, &regmap->msg_type);
588 	hwts->sequence_id =
589 		nxp_c45_read_reg_field(phydev, &regmap->sequence_id);
590 	hwts->nsec =
591 		nxp_c45_read_reg_field(phydev, &regmap->nsec_15_0);
592 	hwts->nsec |=
593 		nxp_c45_read_reg_field(phydev, &regmap->nsec_29_16) << 16;
594 	hwts->sec = nxp_c45_read_reg_field(phydev, &regmap->sec_1_0);
595 	hwts->sec |= nxp_c45_read_reg_field(phydev, &regmap->sec_4_2) << 2;
596 }
597 
598 static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
599 			       struct nxp_c45_hwts *hwts)
600 {
601 	bool valid;
602 	u16 reg;
603 
604 	mutex_lock(&priv->ptp_lock);
605 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
606 		      RING_DONE);
607 	reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
608 	valid = !!(reg & RING_DATA_0_TS_VALID);
609 	if (!valid)
610 		goto nxp_c45_get_hwtxts_out;
611 
612 	nxp_c45_read_egress_ts(priv, hwts);
613 nxp_c45_get_hwtxts_out:
614 	mutex_unlock(&priv->ptp_lock);
615 	return valid;
616 }
617 
618 static bool tja1120_egress_ts_is_valid(struct phy_device *phydev)
619 {
620 	bool valid;
621 	u16 reg;
622 
623 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S);
624 	valid = !!(reg & TJA1120_TS_VALID);
625 
626 	return valid;
627 }
628 
629 static bool tja1120_get_hwtxts(struct nxp_c45_phy *priv,
630 			       struct nxp_c45_hwts *hwts)
631 {
632 	struct phy_device *phydev = priv->phydev;
633 	bool more_ts;
634 	bool valid;
635 	u16 reg;
636 
637 	mutex_lock(&priv->ptp_lock);
638 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_END);
639 	more_ts = !!(reg & TJA1120_MORE_TS);
640 	valid = tja1120_egress_ts_is_valid(phydev);
641 	if (!valid) {
642 		if (!more_ts)
643 			goto tja1120_get_hwtxts_out;
644 
645 		/* Bug workaround for TJA1120 engineering samples: move the
646 		 * new timestamp from the FIFO to the buffer.
647 		 */
648 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
649 			      TJA1120_EGRESS_TS_END, TJA1120_TS_VALID);
650 		valid = tja1120_egress_ts_is_valid(phydev);
651 		if (!valid)
652 			goto tja1120_get_hwtxts_out;
653 	}
654 	nxp_c45_read_egress_ts(priv, hwts);
655 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S,
656 			   TJA1120_TS_VALID);
657 tja1120_get_hwtxts_out:
658 	mutex_unlock(&priv->ptp_lock);
659 	return valid;
660 }
661 
662 static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
663 				 struct nxp_c45_hwts *txts)
664 {
665 	struct sk_buff *skb, *tmp, *skb_match = NULL;
666 	struct skb_shared_hwtstamps shhwtstamps;
667 	struct timespec64 ts;
668 	unsigned long flags;
669 	bool ts_match;
670 	s64 ts_ns;
671 
672 	spin_lock_irqsave(&priv->tx_queue.lock, flags);
673 	skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
674 		ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
675 					    NXP_C45_SKB_CB(skb)->type);
676 		if (!ts_match)
677 			continue;
678 		skb_match = skb;
679 		__skb_unlink(skb, &priv->tx_queue);
680 		break;
681 	}
682 	spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
683 
684 	if (skb_match) {
685 		nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
686 		nxp_c45_reconstruct_ts(&ts, txts);
687 		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
688 		ts_ns = timespec64_to_ns(&ts);
689 		shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
690 		skb_complete_tx_timestamp(skb_match, &shhwtstamps);
691 	} else {
692 		phydev_warn(priv->phydev,
693 			    "the tx timestamp doesn't match with any skb\n");
694 	}
695 }
696 
697 static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
698 {
699 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
700 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
701 	bool poll_txts = nxp_c45_poll_txts(priv->phydev);
702 	struct skb_shared_hwtstamps *shhwtstamps_rx;
703 	struct ptp_clock_event event;
704 	struct nxp_c45_hwts hwts;
705 	bool reschedule = false;
706 	struct timespec64 ts;
707 	struct sk_buff *skb;
708 	bool ts_valid;
709 	u32 ts_raw;
710 
711 	while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
712 		ts_valid = data->get_egressts(priv, &hwts);
713 		if (unlikely(!ts_valid)) {
714 			/* Still more skbs in the queue */
715 			reschedule = true;
716 			break;
717 		}
718 
719 		nxp_c45_process_txts(priv, &hwts);
720 	}
721 
722 	while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
723 		nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
724 		ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
725 		hwts.sec = ts_raw >> 30;
726 		hwts.nsec = ts_raw & GENMASK(29, 0);
727 		nxp_c45_reconstruct_ts(&ts, &hwts);
728 		shhwtstamps_rx = skb_hwtstamps(skb);
729 		shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
730 		NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
731 		netif_rx(skb);
732 	}
733 
734 	if (priv->extts) {
735 		ts_valid = data->get_extts(priv, &ts);
736 		if (ts_valid && timespec64_compare(&ts, &priv->extts_ts) != 0) {
737 			priv->extts_ts = ts;
738 			event.index = priv->extts_index;
739 			event.type = PTP_CLOCK_EXTTS;
740 			event.timestamp = ns_to_ktime(timespec64_to_ns(&ts));
741 			ptp_clock_event(priv->ptp_clock, &event);
742 		}
743 		reschedule = true;
744 	}
745 
746 	return reschedule ? 1 : -1;
747 }
748 
749 static void nxp_c45_gpio_config(struct nxp_c45_phy *priv,
750 				int pin, u16 pin_cfg)
751 {
752 	struct phy_device *phydev = priv->phydev;
753 
754 	phy_write_mmd(phydev, MDIO_MMD_VEND1,
755 		      VEND1_GPIO_FUNC_CONFIG_BASE + pin, pin_cfg);
756 }
757 
758 static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
759 				 struct ptp_perout_request *perout, int on)
760 {
761 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
762 	struct phy_device *phydev = priv->phydev;
763 	int pin;
764 
765 	pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
766 	if (pin < 0)
767 		return pin;
768 
769 	if (!on) {
770 		nxp_c45_clear_reg_field(priv->phydev,
771 					&regmap->pps_enable);
772 		nxp_c45_clear_reg_field(priv->phydev,
773 					&regmap->pps_polarity);
774 
775 		nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
776 
777 		return 0;
778 	}
779 
780 	/* The PPS signal is fixed to 1 second and is always generated when the
781 	 * seconds counter is incremented. The start time is not configurable.
782 	 * If the clock is adjusted, the PPS signal is automatically readjusted.
783 	 */
784 	if (perout->period.sec != 1 || perout->period.nsec != 0) {
785 		phydev_warn(phydev, "The period can be set only to 1 second.");
786 		return -EINVAL;
787 	}
788 
789 	if (!(perout->flags & PTP_PEROUT_PHASE)) {
790 		if (perout->start.sec != 0 || perout->start.nsec != 0) {
791 			phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds.");
792 			return -EINVAL;
793 		}
794 	} else {
795 		if (perout->phase.nsec != 0 &&
796 		    perout->phase.nsec != (NSEC_PER_SEC >> 1)) {
797 			phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds.");
798 			return -EINVAL;
799 		}
800 
801 		if (perout->phase.nsec == 0)
802 			nxp_c45_clear_reg_field(priv->phydev,
803 						&regmap->pps_polarity);
804 		else
805 			nxp_c45_set_reg_field(priv->phydev,
806 					      &regmap->pps_polarity);
807 	}
808 
809 	nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG);
810 
811 	nxp_c45_set_reg_field(priv->phydev, &regmap->pps_enable);
812 
813 	return 0;
814 }
815 
816 static void nxp_c45_set_rising_or_falling(struct phy_device *phydev,
817 					  struct ptp_extts_request *extts)
818 {
819 	if (extts->flags & PTP_RISING_EDGE)
820 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
821 				   VEND1_PTP_CONFIG, EXT_TRG_EDGE);
822 
823 	if (extts->flags & PTP_FALLING_EDGE)
824 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
825 				 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
826 }
827 
828 static void nxp_c45_set_rising_and_falling(struct phy_device *phydev,
829 					   struct ptp_extts_request *extts)
830 {
831 	/* PTP_EXTTS_REQUEST may have only the PTP_ENABLE_FEATURE flag set. In
832 	 * this case external ts will be enabled on rising edge.
833 	 */
834 	if (extts->flags & PTP_RISING_EDGE ||
835 	    extts->flags == PTP_ENABLE_FEATURE)
836 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
837 				 TJA1120_SYNC_TRIG_FILTER,
838 				 PTP_TRIG_RISE_TS);
839 	else
840 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
841 				   TJA1120_SYNC_TRIG_FILTER,
842 				   PTP_TRIG_RISE_TS);
843 
844 	if (extts->flags & PTP_FALLING_EDGE)
845 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
846 				 TJA1120_SYNC_TRIG_FILTER,
847 				 PTP_TRIG_FALLING_TS);
848 	else
849 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
850 				   TJA1120_SYNC_TRIG_FILTER,
851 				   PTP_TRIG_FALLING_TS);
852 }
853 
854 static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
855 				struct ptp_extts_request *extts, int on)
856 {
857 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
858 	int pin;
859 
860 	/* Sampling on both edges is not supported */
861 	if ((extts->flags & PTP_RISING_EDGE) &&
862 	    (extts->flags & PTP_FALLING_EDGE) &&
863 	    !data->ext_ts_both_edges)
864 		return -EOPNOTSUPP;
865 
866 	pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index);
867 	if (pin < 0)
868 		return pin;
869 
870 	if (!on) {
871 		nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
872 		priv->extts = false;
873 
874 		return 0;
875 	}
876 
877 	if (data->ext_ts_both_edges)
878 		nxp_c45_set_rising_and_falling(priv->phydev, extts);
879 	else
880 		nxp_c45_set_rising_or_falling(priv->phydev, extts);
881 
882 	nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG);
883 	priv->extts = true;
884 	priv->extts_index = extts->index;
885 	ptp_schedule_worker(priv->ptp_clock, 0);
886 
887 	return 0;
888 }
889 
890 static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp,
891 			      struct ptp_clock_request *req, int on)
892 {
893 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
894 
895 	switch (req->type) {
896 	case PTP_CLK_REQ_EXTTS:
897 		return nxp_c45_extts_enable(priv, &req->extts, on);
898 	case PTP_CLK_REQ_PEROUT:
899 		return nxp_c45_perout_enable(priv, &req->perout, on);
900 	default:
901 		return -EOPNOTSUPP;
902 	}
903 }
904 
905 static struct ptp_pin_desc nxp_c45_ptp_pins[] = {
906 	{ "nxp_c45_gpio0", 0, PTP_PF_NONE},
907 	{ "nxp_c45_gpio1", 1, PTP_PF_NONE},
908 	{ "nxp_c45_gpio2", 2, PTP_PF_NONE},
909 	{ "nxp_c45_gpio3", 3, PTP_PF_NONE},
910 	{ "nxp_c45_gpio4", 4, PTP_PF_NONE},
911 	{ "nxp_c45_gpio5", 5, PTP_PF_NONE},
912 	{ "nxp_c45_gpio6", 6, PTP_PF_NONE},
913 	{ "nxp_c45_gpio7", 7, PTP_PF_NONE},
914 	{ "nxp_c45_gpio8", 8, PTP_PF_NONE},
915 	{ "nxp_c45_gpio9", 9, PTP_PF_NONE},
916 	{ "nxp_c45_gpio10", 10, PTP_PF_NONE},
917 	{ "nxp_c45_gpio11", 11, PTP_PF_NONE},
918 };
919 
920 static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
921 				  enum ptp_pin_function func, unsigned int chan)
922 {
923 	if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins))
924 		return -EINVAL;
925 
926 	switch (func) {
927 	case PTP_PF_NONE:
928 	case PTP_PF_PEROUT:
929 	case PTP_PF_EXTTS:
930 		break;
931 	default:
932 		return -EOPNOTSUPP;
933 	}
934 
935 	return 0;
936 }
937 
938 static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
939 {
940 	priv->caps = (struct ptp_clock_info) {
941 		.owner		= THIS_MODULE,
942 		.name		= "NXP C45 PHC",
943 		.max_adj	= 16666666,
944 		.adjfine	= nxp_c45_ptp_adjfine,
945 		.adjtime	= nxp_c45_ptp_adjtime,
946 		.gettimex64	= nxp_c45_ptp_gettimex64,
947 		.settime64	= nxp_c45_ptp_settime64,
948 		.enable		= nxp_c45_ptp_enable,
949 		.verify		= nxp_c45_ptp_verify_pin,
950 		.do_aux_work	= nxp_c45_do_aux_work,
951 		.pin_config	= nxp_c45_ptp_pins,
952 		.n_pins		= ARRAY_SIZE(nxp_c45_ptp_pins),
953 		.n_ext_ts	= 1,
954 		.n_per_out	= 1,
955 		.supported_extts_flags = PTP_RISING_EDGE |
956 					 PTP_FALLING_EDGE |
957 					 PTP_STRICT_FLAGS,
958 		.supported_perout_flags = PTP_PEROUT_PHASE,
959 	};
960 
961 	priv->ptp_clock = ptp_clock_register(&priv->caps,
962 					     &priv->phydev->mdio.dev);
963 
964 	if (IS_ERR(priv->ptp_clock))
965 		return PTR_ERR(priv->ptp_clock);
966 
967 	if (!priv->ptp_clock)
968 		return -ENOMEM;
969 
970 	return 0;
971 }
972 
973 static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
974 			     struct sk_buff *skb, int type)
975 {
976 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
977 						mii_ts);
978 
979 	switch (priv->hwts_tx) {
980 	case HWTSTAMP_TX_ON:
981 		NXP_C45_SKB_CB(skb)->type = type;
982 		NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
983 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
984 		skb_queue_tail(&priv->tx_queue, skb);
985 		if (nxp_c45_poll_txts(priv->phydev))
986 			ptp_schedule_worker(priv->ptp_clock, 0);
987 		break;
988 	case HWTSTAMP_TX_OFF:
989 	default:
990 		kfree_skb(skb);
991 		break;
992 	}
993 }
994 
995 static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
996 			     struct sk_buff *skb, int type)
997 {
998 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
999 						mii_ts);
1000 	struct ptp_header *header = ptp_parse_header(skb, type);
1001 
1002 	if (!header)
1003 		return false;
1004 
1005 	if (!priv->hwts_rx)
1006 		return false;
1007 
1008 	NXP_C45_SKB_CB(skb)->header = header;
1009 	skb_queue_tail(&priv->rx_queue, skb);
1010 	ptp_schedule_worker(priv->ptp_clock, 0);
1011 
1012 	return true;
1013 }
1014 
1015 static int nxp_c45_hwtstamp_get(struct mii_timestamper *mii_ts,
1016 				struct kernel_hwtstamp_config *cfg)
1017 {
1018 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1019 						mii_ts);
1020 
1021 	cfg->tx_type = priv->hwts_tx;
1022 	cfg->rx_filter = priv->hwts_rx ? HWTSTAMP_FILTER_PTP_V2_L2_EVENT
1023 				       : HWTSTAMP_FILTER_NONE;
1024 
1025 	return 0;
1026 }
1027 
1028 static int nxp_c45_hwtstamp_set(struct mii_timestamper *mii_ts,
1029 				struct kernel_hwtstamp_config *cfg,
1030 				struct netlink_ext_ack *extack)
1031 {
1032 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1033 						mii_ts);
1034 	struct phy_device *phydev = priv->phydev;
1035 	const struct nxp_c45_phy_data *data;
1036 
1037 	if (cfg->tx_type < 0 || cfg->tx_type > HWTSTAMP_TX_ON)
1038 		return -ERANGE;
1039 
1040 	data = nxp_c45_get_data(phydev);
1041 	priv->hwts_tx = cfg->tx_type;
1042 
1043 	switch (cfg->rx_filter) {
1044 	case HWTSTAMP_FILTER_NONE:
1045 		priv->hwts_rx = 0;
1046 		break;
1047 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1048 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1049 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1050 		priv->hwts_rx = 1;
1051 		cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1052 		break;
1053 	default:
1054 		return -ERANGE;
1055 	}
1056 
1057 	if (priv->hwts_rx || priv->hwts_tx) {
1058 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
1059 			      data->regmap->vend1_event_msg_filt,
1060 			      EVENT_MSG_FILT_ALL);
1061 		data->ptp_enable(phydev, true);
1062 	} else {
1063 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
1064 			      data->regmap->vend1_event_msg_filt,
1065 			      EVENT_MSG_FILT_NONE);
1066 		data->ptp_enable(phydev, false);
1067 	}
1068 
1069 	if (nxp_c45_poll_txts(priv->phydev))
1070 		goto nxp_c45_no_ptp_irq;
1071 
1072 	if (priv->hwts_tx)
1073 		nxp_c45_set_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1074 	else
1075 		nxp_c45_clear_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1076 
1077 nxp_c45_no_ptp_irq:
1078 	return 0;
1079 }
1080 
1081 static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
1082 			   struct kernel_ethtool_ts_info *ts_info)
1083 {
1084 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1085 						mii_ts);
1086 
1087 	ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1088 			SOF_TIMESTAMPING_RX_HARDWARE |
1089 			SOF_TIMESTAMPING_RAW_HARDWARE;
1090 	ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
1091 	ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1092 	ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1093 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
1094 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
1095 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
1096 
1097 	return 0;
1098 }
1099 
1100 static const struct nxp_c45_phy_stats common_hw_stats[] = {
1101 	{ "phy_link_status_drop_cnt",
1102 		NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 8, 6), },
1103 	{ "phy_link_availability_drop_cnt",
1104 		NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 0, 6), },
1105 	{ "phy_link_loss_cnt",
1106 		NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 10, 6), },
1107 	{ "phy_link_failure_cnt",
1108 		NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 0, 10), },
1109 	{ "phy_symbol_error_cnt",
1110 		NXP_C45_REG_FIELD(0x8350, MDIO_MMD_VEND1, 0, 16) },
1111 };
1112 
1113 static const struct nxp_c45_phy_stats tja1103_hw_stats[] = {
1114 	{ "rx_preamble_count",
1115 		NXP_C45_REG_FIELD(0xAFCE, MDIO_MMD_VEND1, 0, 6), },
1116 	{ "tx_preamble_count",
1117 		NXP_C45_REG_FIELD(0xAFCF, MDIO_MMD_VEND1, 0, 6), },
1118 	{ "rx_ipg_length",
1119 		NXP_C45_REG_FIELD(0xAFD0, MDIO_MMD_VEND1, 0, 9), },
1120 	{ "tx_ipg_length",
1121 		NXP_C45_REG_FIELD(0xAFD1, MDIO_MMD_VEND1, 0, 9), },
1122 };
1123 
1124 static const struct nxp_c45_phy_stats tja1120_hw_stats[] = {
1125 	{ "phy_symbol_error_cnt_ext",
1126 		NXP_C45_REG_FIELD(0x8351, MDIO_MMD_VEND1, 0, 14) },
1127 	{ "tx_frames_xtd",
1128 		NXP_C45_REG_FIELD(0xACA1, MDIO_MMD_VEND1, 0, 8), },
1129 	{ "tx_frames",
1130 		NXP_C45_REG_FIELD(0xACA0, MDIO_MMD_VEND1, 0, 16), },
1131 	{ "rx_frames_xtd",
1132 		NXP_C45_REG_FIELD(0xACA3, MDIO_MMD_VEND1, 0, 8), },
1133 	{ "rx_frames",
1134 		NXP_C45_REG_FIELD(0xACA2, MDIO_MMD_VEND1, 0, 16), },
1135 	{ "tx_lost_frames_xtd",
1136 		NXP_C45_REG_FIELD(0xACA5, MDIO_MMD_VEND1, 0, 8), },
1137 	{ "tx_lost_frames",
1138 		NXP_C45_REG_FIELD(0xACA4, MDIO_MMD_VEND1, 0, 16), },
1139 	{ "rx_lost_frames_xtd",
1140 		NXP_C45_REG_FIELD(0xACA7, MDIO_MMD_VEND1, 0, 8), },
1141 	{ "rx_lost_frames",
1142 		NXP_C45_REG_FIELD(0xACA6, MDIO_MMD_VEND1, 0, 16), },
1143 };
1144 
1145 static int nxp_c45_get_sset_count(struct phy_device *phydev)
1146 {
1147 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1148 
1149 	return ARRAY_SIZE(common_hw_stats) + (phy_data ? phy_data->n_stats : 0);
1150 }
1151 
1152 static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
1153 {
1154 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1155 	size_t count = nxp_c45_get_sset_count(phydev);
1156 	size_t idx;
1157 	size_t i;
1158 
1159 	for (i = 0; i < count; i++) {
1160 		if (i < ARRAY_SIZE(common_hw_stats)) {
1161 			ethtool_puts(&data, common_hw_stats[i].name);
1162 			continue;
1163 		}
1164 		idx = i - ARRAY_SIZE(common_hw_stats);
1165 		ethtool_puts(&data, phy_data->stats[idx].name);
1166 	}
1167 }
1168 
1169 static void nxp_c45_get_stats(struct phy_device *phydev,
1170 			      struct ethtool_stats *stats, u64 *data)
1171 {
1172 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1173 	size_t count = nxp_c45_get_sset_count(phydev);
1174 	const struct nxp_c45_reg_field *reg_field;
1175 	size_t idx;
1176 	size_t i;
1177 	int ret;
1178 
1179 	for (i = 0; i < count; i++) {
1180 		if (i < ARRAY_SIZE(common_hw_stats)) {
1181 			reg_field = &common_hw_stats[i].counter;
1182 		} else {
1183 			idx = i - ARRAY_SIZE(common_hw_stats);
1184 			reg_field = &phy_data->stats[idx].counter;
1185 		}
1186 
1187 		ret = nxp_c45_read_reg_field(phydev, reg_field);
1188 		if (ret < 0)
1189 			data[i] = U64_MAX;
1190 		else
1191 			data[i] = ret;
1192 	}
1193 }
1194 
1195 static int nxp_c45_config_enable(struct phy_device *phydev)
1196 {
1197 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1198 		      DEVICE_CONTROL_CONFIG_GLOBAL_EN |
1199 		      DEVICE_CONTROL_CONFIG_ALL_EN);
1200 	usleep_range(400, 450);
1201 
1202 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
1203 		      PORT_CONTROL_EN);
1204 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1205 		      PHY_CONFIG_EN);
1206 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
1207 		      PORT_INFRA_CONTROL_EN);
1208 
1209 	return 0;
1210 }
1211 
1212 static int nxp_c45_start_op(struct phy_device *phydev)
1213 {
1214 	return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1215 				PHY_START_OP);
1216 }
1217 
1218 static int nxp_c45_config_intr(struct phy_device *phydev)
1219 {
1220 	int ret;
1221 
1222 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
1223 		ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1224 				       VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
1225 		if (ret)
1226 			return ret;
1227 
1228 		return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1229 					VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1230 	}
1231 
1232 	ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1233 				 VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
1234 	if (ret)
1235 		return ret;
1236 
1237 	return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1238 				  VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1239 }
1240 
1241 static int tja1103_config_intr(struct phy_device *phydev)
1242 {
1243 	int ret;
1244 
1245 	/* We can't disable the FUSA IRQ for TJA1103, but we can clean it up. */
1246 	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_ALWAYS_ACCESSIBLE,
1247 			    FUSA_PASS);
1248 	if (ret)
1249 		return ret;
1250 
1251 	return nxp_c45_config_intr(phydev);
1252 }
1253 
1254 static int tja1120_config_intr(struct phy_device *phydev)
1255 {
1256 	int ret;
1257 
1258 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
1259 		ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1260 				       TJA1120_GLOBAL_INFRA_IRQ_EN,
1261 				       TJA1120_DEV_BOOT_DONE);
1262 	else
1263 		ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1264 					 TJA1120_GLOBAL_INFRA_IRQ_EN,
1265 					 TJA1120_DEV_BOOT_DONE);
1266 	if (ret)
1267 		return ret;
1268 
1269 	return nxp_c45_config_intr(phydev);
1270 }
1271 
1272 static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
1273 {
1274 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1275 	struct nxp_c45_phy *priv = phydev->priv;
1276 	irqreturn_t ret = IRQ_NONE;
1277 	struct nxp_c45_hwts hwts;
1278 	int irq;
1279 
1280 	irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
1281 	if (irq & PHY_IRQ_LINK_EVENT) {
1282 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
1283 			      PHY_IRQ_LINK_EVENT);
1284 		phy_trigger_machine(phydev);
1285 		ret = IRQ_HANDLED;
1286 	}
1287 
1288 	irq = nxp_c45_read_reg_field(phydev, &data->regmap->irq_egr_ts_status);
1289 	if (irq) {
1290 		/* If ack_ptp_irq is false, the IRQ bit is self-clear and will
1291 		 * be cleared when the EGR TS FIFO is empty. Otherwise, the
1292 		 * IRQ bit should be cleared before reading the timestamp,
1293 		 */
1294 		if (data->ack_ptp_irq)
1295 			phy_write_mmd(phydev, MDIO_MMD_VEND1,
1296 				      VEND1_PTP_IRQ_ACK, EGR_TS_IRQ);
1297 		while (data->get_egressts(priv, &hwts))
1298 			nxp_c45_process_txts(priv, &hwts);
1299 
1300 		ret = IRQ_HANDLED;
1301 	}
1302 
1303 	data->nmi_handler(phydev, &ret);
1304 	nxp_c45_handle_macsec_interrupt(phydev, &ret);
1305 
1306 	return ret;
1307 }
1308 
1309 static int nxp_c45_soft_reset(struct phy_device *phydev)
1310 {
1311 	int ret;
1312 
1313 	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1314 			    DEVICE_CONTROL_RESET);
1315 	if (ret)
1316 		return ret;
1317 
1318 	usleep_range(2000, 2050);
1319 
1320 	return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
1321 					 VEND1_DEVICE_CONTROL, ret,
1322 					 !(ret & DEVICE_CONTROL_RESET), 20000,
1323 					 240000, false);
1324 }
1325 
1326 static int nxp_c45_cable_test_start(struct phy_device *phydev)
1327 {
1328 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1329 
1330 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1331 			 VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1332 	return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1333 				CABLE_TEST_ENABLE | CABLE_TEST_START);
1334 }
1335 
1336 static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
1337 					 bool *finished)
1338 {
1339 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1340 	int ret;
1341 	u8 cable_test_result;
1342 
1343 	ret = nxp_c45_read_reg_field(phydev, &regmap->cable_test_valid);
1344 	if (!ret) {
1345 		*finished = false;
1346 		return 0;
1347 	}
1348 
1349 	*finished = true;
1350 	cable_test_result = nxp_c45_read_reg_field(phydev,
1351 						   &regmap->cable_test_result);
1352 
1353 	switch (cable_test_result) {
1354 	case CABLE_TEST_OK:
1355 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1356 					ETHTOOL_A_CABLE_RESULT_CODE_OK);
1357 		break;
1358 	case CABLE_TEST_SHORTED:
1359 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1360 					ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
1361 		break;
1362 	case CABLE_TEST_OPEN:
1363 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1364 					ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
1365 		break;
1366 	default:
1367 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1368 					ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
1369 	}
1370 
1371 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1372 			   CABLE_TEST_ENABLE);
1373 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1374 			   VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1375 
1376 	return nxp_c45_start_op(phydev);
1377 }
1378 
1379 static int nxp_c45_get_sqi(struct phy_device *phydev)
1380 {
1381 	int reg;
1382 
1383 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
1384 	if (!(reg & SQI_VALID))
1385 		return -EINVAL;
1386 
1387 	reg &= SQI_MASK;
1388 
1389 	return reg;
1390 }
1391 
1392 static void tja1120_link_change_notify(struct phy_device *phydev)
1393 {
1394 	/* Bug workaround for TJA1120 enegineering samples: fix egress
1395 	 * timestamps lost after link recovery.
1396 	 */
1397 	if (phydev->state == PHY_NOLINK) {
1398 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1399 				 TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1400 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1401 				   TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1402 	}
1403 }
1404 
1405 static int nxp_c45_get_sqi_max(struct phy_device *phydev)
1406 {
1407 	return MAX_SQI;
1408 }
1409 
1410 static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
1411 {
1412 	if (delay < MIN_ID_PS) {
1413 		phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
1414 		return -EINVAL;
1415 	}
1416 
1417 	if (delay > MAX_ID_PS) {
1418 		phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
1419 		return -EINVAL;
1420 	}
1421 
1422 	return 0;
1423 }
1424 
1425 static void nxp_c45_counters_enable(struct phy_device *phydev)
1426 {
1427 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1428 
1429 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
1430 			 COUNTER_EN);
1431 
1432 	data->counters_enable(phydev);
1433 }
1434 
1435 static void nxp_c45_ptp_init(struct phy_device *phydev)
1436 {
1437 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1438 
1439 	phy_write_mmd(phydev, MDIO_MMD_VEND1,
1440 		      data->regmap->vend1_ptp_clk_period,
1441 		      data->ptp_clk_period);
1442 	nxp_c45_clear_reg_field(phydev, &data->regmap->ltc_lock_ctrl);
1443 
1444 	data->ptp_init(phydev);
1445 }
1446 
1447 static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
1448 {
1449 	/* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
1450 	 * To avoid floating point operations we'll multiply by 10
1451 	 * and get 1 decimal point precision.
1452 	 */
1453 	phase_offset_raw *= 10;
1454 	phase_offset_raw -= 738;
1455 	return div_u64(phase_offset_raw, 9);
1456 }
1457 
1458 static void nxp_c45_disable_delays(struct phy_device *phydev)
1459 {
1460 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
1461 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
1462 }
1463 
1464 static void nxp_c45_set_delays(struct phy_device *phydev)
1465 {
1466 	struct nxp_c45_phy *priv = phydev->priv;
1467 	u64 tx_delay = priv->tx_delay;
1468 	u64 rx_delay = priv->rx_delay;
1469 	u64 degree;
1470 
1471 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1472 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1473 		degree = div_u64(tx_delay, PS_PER_DEGREE);
1474 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1475 			      ID_ENABLE | nxp_c45_get_phase_shift(degree));
1476 	} else {
1477 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1478 				   ID_ENABLE);
1479 	}
1480 
1481 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1482 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1483 		degree = div_u64(rx_delay, PS_PER_DEGREE);
1484 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1485 			      ID_ENABLE | nxp_c45_get_phase_shift(degree));
1486 	} else {
1487 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1488 				   ID_ENABLE);
1489 	}
1490 }
1491 
1492 static int nxp_c45_get_delays(struct phy_device *phydev)
1493 {
1494 	struct nxp_c45_phy *priv = phydev->priv;
1495 	int ret;
1496 
1497 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1498 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1499 		ret = device_property_read_u32(&phydev->mdio.dev,
1500 					       "tx-internal-delay-ps",
1501 					       &priv->tx_delay);
1502 		if (ret)
1503 			priv->tx_delay = DEFAULT_ID_PS;
1504 
1505 		ret = nxp_c45_check_delay(phydev, priv->tx_delay);
1506 		if (ret) {
1507 			phydev_err(phydev,
1508 				   "tx-internal-delay-ps invalid value\n");
1509 			return ret;
1510 		}
1511 	}
1512 
1513 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1514 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1515 		ret = device_property_read_u32(&phydev->mdio.dev,
1516 					       "rx-internal-delay-ps",
1517 					       &priv->rx_delay);
1518 		if (ret)
1519 			priv->rx_delay = DEFAULT_ID_PS;
1520 
1521 		ret = nxp_c45_check_delay(phydev, priv->rx_delay);
1522 		if (ret) {
1523 			phydev_err(phydev,
1524 				   "rx-internal-delay-ps invalid value\n");
1525 			return ret;
1526 		}
1527 	}
1528 
1529 	return 0;
1530 }
1531 
1532 static int nxp_c45_set_phy_mode(struct phy_device *phydev)
1533 {
1534 	struct nxp_c45_phy *priv = phydev->priv;
1535 	u16 basic_config;
1536 	int ret;
1537 
1538 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
1539 	phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
1540 
1541 	switch (phydev->interface) {
1542 	case PHY_INTERFACE_MODE_RGMII:
1543 		if (!(ret & RGMII_ABILITY)) {
1544 			phydev_err(phydev, "rgmii mode not supported\n");
1545 			return -EINVAL;
1546 		}
1547 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1548 			      MII_BASIC_CONFIG_RGMII);
1549 		nxp_c45_disable_delays(phydev);
1550 		break;
1551 	case PHY_INTERFACE_MODE_RGMII_ID:
1552 	case PHY_INTERFACE_MODE_RGMII_TXID:
1553 	case PHY_INTERFACE_MODE_RGMII_RXID:
1554 		if (!(ret & RGMII_ID_ABILITY)) {
1555 			phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
1556 			return -EINVAL;
1557 		}
1558 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1559 			      MII_BASIC_CONFIG_RGMII);
1560 		ret = nxp_c45_get_delays(phydev);
1561 		if (ret)
1562 			return ret;
1563 
1564 		nxp_c45_set_delays(phydev);
1565 		break;
1566 	case PHY_INTERFACE_MODE_MII:
1567 		if (!(ret & MII_ABILITY)) {
1568 			phydev_err(phydev, "mii mode not supported\n");
1569 			return -EINVAL;
1570 		}
1571 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1572 			      MII_BASIC_CONFIG_MII);
1573 		break;
1574 	case PHY_INTERFACE_MODE_REVMII:
1575 		if (!(ret & REVMII_ABILITY)) {
1576 			phydev_err(phydev, "rev-mii mode not supported\n");
1577 			return -EINVAL;
1578 		}
1579 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1580 			      MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
1581 		break;
1582 	case PHY_INTERFACE_MODE_RMII:
1583 		if (!(ret & RMII_ABILITY)) {
1584 			phydev_err(phydev, "rmii mode not supported\n");
1585 			return -EINVAL;
1586 		}
1587 
1588 		basic_config = MII_BASIC_CONFIG_RMII;
1589 
1590 		/* This is not PHY_INTERFACE_MODE_REVRMII */
1591 		if (priv->flags & TJA11XX_REVERSE_MODE)
1592 			basic_config |= MII_BASIC_CONFIG_REV;
1593 
1594 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1595 			      basic_config);
1596 		break;
1597 	case PHY_INTERFACE_MODE_SGMII:
1598 		if (!(ret & SGMII_ABILITY)) {
1599 			phydev_err(phydev, "sgmii mode not supported\n");
1600 			return -EINVAL;
1601 		}
1602 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1603 			      MII_BASIC_CONFIG_SGMII);
1604 		break;
1605 	case PHY_INTERFACE_MODE_INTERNAL:
1606 		break;
1607 	default:
1608 		return -EINVAL;
1609 	}
1610 
1611 	return 0;
1612 }
1613 
1614 /* Errata: ES_TJA1120 and ES_TJA1121 Rev. 1.0 — 28 November 2024 Section 3.1 & 3.2 */
1615 static void nxp_c45_tja1120_errata(struct phy_device *phydev)
1616 {
1617 	bool macsec_ability, sgmii_ability;
1618 	int silicon_version, sample_type;
1619 	int phy_abilities;
1620 	int ret = 0;
1621 
1622 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_ID3);
1623 	if (ret < 0)
1624 		return;
1625 
1626 	sample_type = FIELD_GET(TJA1120_DEV_ID3_SAMPLE_TYPE, ret);
1627 	if (sample_type != DEVICE_ID3_SAMPLE_TYPE_R)
1628 		return;
1629 
1630 	silicon_version = FIELD_GET(TJA1120_DEV_ID3_SILICON_VERSION, ret);
1631 
1632 	phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1633 				     VEND1_PORT_ABILITIES);
1634 	macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
1635 	sgmii_ability = !!(phy_abilities & SGMII_ABILITY);
1636 	if ((!macsec_ability && silicon_version == 2) ||
1637 	    (macsec_ability && silicon_version == 1)) {
1638 		/* TJA1120/TJA1121 PHY configuration errata workaround.
1639 		 * Apply PHY writes sequence before link up.
1640 		 */
1641 		if (!macsec_ability) {
1642 			phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x4b95);
1643 			phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0xf3cd);
1644 		} else {
1645 			phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x89c7);
1646 			phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0893);
1647 		}
1648 
1649 		phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x0476, 0x58a0);
1650 
1651 		phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x8921, 0xa3a);
1652 		phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x89F1, 0x16c1);
1653 
1654 		phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x0);
1655 		phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0);
1656 
1657 		if (sgmii_ability) {
1658 			/* TJA1120B/TJA1121B SGMII PCS restart errata workaround.
1659 			 * Put SGMII PCS into power down mode and back up.
1660 			 */
1661 			phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1662 					 VEND1_SGMII_BASIC_CONTROL,
1663 					 SGMII_LPM);
1664 			phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1665 					   VEND1_SGMII_BASIC_CONTROL,
1666 					   SGMII_LPM);
1667 		}
1668 	}
1669 }
1670 
1671 static int nxp_c45_config_init(struct phy_device *phydev)
1672 {
1673 	int ret;
1674 
1675 	ret = nxp_c45_config_enable(phydev);
1676 	if (ret) {
1677 		phydev_err(phydev, "Failed to enable config\n");
1678 		return ret;
1679 	}
1680 
1681 	/* Bug workaround for SJA1110 rev B: enable write access
1682 	 * to MDIO_MMD_PMAPMD
1683 	 */
1684 	phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
1685 	phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
1686 
1687 	if (phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, GENMASK(31, 4)))
1688 		nxp_c45_tja1120_errata(phydev);
1689 
1690 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
1691 			 PHY_CONFIG_AUTO);
1692 
1693 	ret = nxp_c45_set_phy_mode(phydev);
1694 	if (ret)
1695 		return ret;
1696 
1697 	phydev->autoneg = AUTONEG_DISABLE;
1698 
1699 	nxp_c45_counters_enable(phydev);
1700 	nxp_c45_ptp_init(phydev);
1701 	ret = nxp_c45_macsec_config_init(phydev);
1702 	if (ret)
1703 		return ret;
1704 
1705 	return nxp_c45_start_op(phydev);
1706 }
1707 
1708 static int nxp_c45_get_features(struct phy_device *phydev)
1709 {
1710 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported);
1711 	linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, phydev->supported);
1712 
1713 	return genphy_c45_pma_read_abilities(phydev);
1714 }
1715 
1716 static int nxp_c45_parse_dt(struct phy_device *phydev)
1717 {
1718 	struct device_node *node = phydev->mdio.dev.of_node;
1719 	struct nxp_c45_phy *priv = phydev->priv;
1720 
1721 	if (!IS_ENABLED(CONFIG_OF_MDIO))
1722 		return 0;
1723 
1724 	if (of_property_read_bool(node, "nxp,rmii-refclk-out"))
1725 		priv->flags |= TJA11XX_REVERSE_MODE;
1726 
1727 	return 0;
1728 }
1729 
1730 static int nxp_c45_probe(struct phy_device *phydev)
1731 {
1732 	struct nxp_c45_phy *priv;
1733 	bool macsec_ability;
1734 	int phy_abilities;
1735 	bool ptp_ability;
1736 	int ret = 0;
1737 
1738 	priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
1739 	if (!priv)
1740 		return -ENOMEM;
1741 
1742 	skb_queue_head_init(&priv->tx_queue);
1743 	skb_queue_head_init(&priv->rx_queue);
1744 
1745 	priv->phydev = phydev;
1746 
1747 	phydev->priv = priv;
1748 
1749 	nxp_c45_parse_dt(phydev);
1750 
1751 	mutex_init(&priv->ptp_lock);
1752 
1753 	phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1754 				     VEND1_PORT_ABILITIES);
1755 	ptp_ability = !!(phy_abilities & PTP_ABILITY);
1756 	if (!ptp_ability) {
1757 		phydev_dbg(phydev, "the phy does not support PTP");
1758 		goto no_ptp_support;
1759 	}
1760 
1761 	if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
1762 	    IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
1763 		priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
1764 		priv->mii_ts.txtstamp = nxp_c45_txtstamp;
1765 		priv->mii_ts.hwtstamp_set = nxp_c45_hwtstamp_set;
1766 		priv->mii_ts.hwtstamp_get = nxp_c45_hwtstamp_get;
1767 		priv->mii_ts.ts_info = nxp_c45_ts_info;
1768 		phydev->mii_ts = &priv->mii_ts;
1769 		ret = nxp_c45_init_ptp_clock(priv);
1770 
1771 		/* Timestamp selected by default to keep legacy API */
1772 		phydev->default_timestamp = true;
1773 	} else {
1774 		phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
1775 	}
1776 
1777 no_ptp_support:
1778 	macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
1779 	if (!macsec_ability) {
1780 		phydev_info(phydev, "the phy does not support MACsec\n");
1781 		goto no_macsec_support;
1782 	}
1783 
1784 	if (IS_ENABLED(CONFIG_MACSEC)) {
1785 		ret = nxp_c45_macsec_probe(phydev);
1786 		phydev_dbg(phydev, "MACsec support enabled.");
1787 	} else {
1788 		phydev_dbg(phydev, "MACsec support not enabled even if the phy supports it");
1789 	}
1790 
1791 no_macsec_support:
1792 
1793 	return ret;
1794 }
1795 
1796 static void nxp_c45_remove(struct phy_device *phydev)
1797 {
1798 	struct nxp_c45_phy *priv = phydev->priv;
1799 
1800 	if (priv->ptp_clock)
1801 		ptp_clock_unregister(priv->ptp_clock);
1802 
1803 	skb_queue_purge(&priv->tx_queue);
1804 	skb_queue_purge(&priv->rx_queue);
1805 	nxp_c45_macsec_remove(phydev);
1806 }
1807 
1808 static void tja1103_counters_enable(struct phy_device *phydev)
1809 {
1810 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
1811 			 COUNTER_EN);
1812 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
1813 			 COUNTER_EN);
1814 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
1815 			 COUNTER_EN);
1816 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
1817 			 COUNTER_EN);
1818 }
1819 
1820 static void tja1103_ptp_init(struct phy_device *phydev)
1821 {
1822 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
1823 		      TJA1103_RX_TS_INSRT_MODE2);
1824 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
1825 			 PTP_ENABLE);
1826 }
1827 
1828 static void tja1103_ptp_enable(struct phy_device *phydev, bool enable)
1829 {
1830 	if (enable)
1831 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1832 				   VEND1_PORT_PTP_CONTROL,
1833 				   PORT_PTP_CONTROL_BYPASS);
1834 	else
1835 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1836 				 VEND1_PORT_PTP_CONTROL,
1837 				 PORT_PTP_CONTROL_BYPASS);
1838 }
1839 
1840 static void tja1103_nmi_handler(struct phy_device *phydev,
1841 				irqreturn_t *irq_status)
1842 {
1843 	int ret;
1844 
1845 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1846 			   VEND1_ALWAYS_ACCESSIBLE);
1847 	if (ret & FUSA_PASS) {
1848 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
1849 			      VEND1_ALWAYS_ACCESSIBLE,
1850 			      FUSA_PASS);
1851 		*irq_status = IRQ_HANDLED;
1852 	}
1853 }
1854 
1855 static const struct nxp_c45_regmap tja1103_regmap = {
1856 	.vend1_ptp_clk_period	= 0x1104,
1857 	.vend1_event_msg_filt	= 0x1148,
1858 	.pps_enable		=
1859 		NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 3, 1),
1860 	.pps_polarity		=
1861 		NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 2, 1),
1862 	.ltc_lock_ctrl		=
1863 		NXP_C45_REG_FIELD(0x1115, MDIO_MMD_VEND1, 0, 1),
1864 	.ltc_read		=
1865 		NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 2, 1),
1866 	.ltc_write		=
1867 		NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 0, 1),
1868 	.vend1_ltc_wr_nsec_0	= 0x1106,
1869 	.vend1_ltc_wr_nsec_1	= 0x1107,
1870 	.vend1_ltc_wr_sec_0	= 0x1108,
1871 	.vend1_ltc_wr_sec_1	= 0x1109,
1872 	.vend1_ltc_rd_nsec_0	= 0x110A,
1873 	.vend1_ltc_rd_nsec_1	= 0x110B,
1874 	.vend1_ltc_rd_sec_0	= 0x110C,
1875 	.vend1_ltc_rd_sec_1	= 0x110D,
1876 	.vend1_rate_adj_subns_0	= 0x110F,
1877 	.vend1_rate_adj_subns_1	= 0x1110,
1878 	.irq_egr_ts_en		=
1879 		NXP_C45_REG_FIELD(0x1131, MDIO_MMD_VEND1, 0, 1),
1880 	.irq_egr_ts_status	=
1881 		NXP_C45_REG_FIELD(0x1132, MDIO_MMD_VEND1, 0, 1),
1882 	.domain_number		=
1883 		NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 0, 8),
1884 	.msg_type		=
1885 		NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 8, 4),
1886 	.sequence_id		=
1887 		NXP_C45_REG_FIELD(0x114F, MDIO_MMD_VEND1, 0, 16),
1888 	.sec_1_0		=
1889 		NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 14, 2),
1890 	.sec_4_2		=
1891 		NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 12, 3),
1892 	.nsec_15_0		=
1893 		NXP_C45_REG_FIELD(0x1150, MDIO_MMD_VEND1, 0, 16),
1894 	.nsec_29_16		=
1895 		NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 0, 14),
1896 	.vend1_ext_trg_data_0	= 0x1121,
1897 	.vend1_ext_trg_data_1	= 0x1122,
1898 	.vend1_ext_trg_data_2	= 0x1123,
1899 	.vend1_ext_trg_data_3	= 0x1124,
1900 	.vend1_ext_trg_ctrl	= 0x1126,
1901 	.cable_test		= 0x8330,
1902 	.cable_test_valid	=
1903 		NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 13, 1),
1904 	.cable_test_result	=
1905 		NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 0, 3),
1906 };
1907 
1908 static const struct nxp_c45_phy_data tja1103_phy_data = {
1909 	.regmap = &tja1103_regmap,
1910 	.stats = tja1103_hw_stats,
1911 	.n_stats = ARRAY_SIZE(tja1103_hw_stats),
1912 	.ptp_clk_period = PTP_CLK_PERIOD_100BT1,
1913 	.ext_ts_both_edges = false,
1914 	.ack_ptp_irq = false,
1915 	.counters_enable = tja1103_counters_enable,
1916 	.get_egressts = nxp_c45_get_hwtxts,
1917 	.get_extts = nxp_c45_get_extts,
1918 	.ptp_init = tja1103_ptp_init,
1919 	.ptp_enable = tja1103_ptp_enable,
1920 	.nmi_handler = tja1103_nmi_handler,
1921 };
1922 
1923 static void tja1120_counters_enable(struct phy_device *phydev)
1924 {
1925 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_SYMBOL_ERROR_CNT_XTD,
1926 			 EXTENDED_CNT_EN);
1927 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_STATUS,
1928 			 MONITOR_RESET);
1929 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_CONFIG,
1930 			 ALL_FRAMES_CNT_EN | LOST_FRAMES_CNT_EN);
1931 }
1932 
1933 static void tja1120_ptp_init(struct phy_device *phydev)
1934 {
1935 	phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_RX_TS_INSRT_CTRL,
1936 		      TJA1120_RX_TS_INSRT_EN | TJA1120_TS_INSRT_MODE);
1937 	phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_VEND1_EXT_TS_MODE,
1938 		      TJA1120_TS_INSRT_MODE);
1939 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONFIG,
1940 			 PTP_ENABLE);
1941 }
1942 
1943 static void tja1120_ptp_enable(struct phy_device *phydev, bool enable)
1944 {
1945 	if (enable)
1946 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1947 				 VEND1_PORT_FUNC_ENABLES,
1948 				 PTP_ENABLE);
1949 	else
1950 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1951 				   VEND1_PORT_FUNC_ENABLES,
1952 				   PTP_ENABLE);
1953 }
1954 
1955 static void tja1120_nmi_handler(struct phy_device *phydev,
1956 				irqreturn_t *irq_status)
1957 {
1958 	int ret;
1959 
1960 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1961 			   TJA1120_GLOBAL_INFRA_IRQ_STATUS);
1962 	if (ret & TJA1120_DEV_BOOT_DONE) {
1963 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
1964 			      TJA1120_GLOBAL_INFRA_IRQ_ACK,
1965 			      TJA1120_DEV_BOOT_DONE);
1966 		*irq_status = IRQ_HANDLED;
1967 	}
1968 }
1969 
1970 static int nxp_c45_macsec_ability(struct phy_device *phydev)
1971 {
1972 	bool macsec_ability;
1973 	int phy_abilities;
1974 
1975 	phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1976 				     VEND1_PORT_ABILITIES);
1977 	macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
1978 
1979 	return macsec_ability;
1980 }
1981 
1982 static bool tja11xx_phy_id_compare(struct phy_device *phydev,
1983 				   const struct phy_driver *phydrv)
1984 {
1985 	u32 id = phydev->is_c45 ? phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] :
1986 				  phydev->phy_id;
1987 
1988 	return phy_id_compare(id, phydrv->phy_id, phydrv->phy_id_mask);
1989 }
1990 
1991 static int tja11xx_no_macsec_match_phy_device(struct phy_device *phydev,
1992 					      const struct phy_driver *phydrv)
1993 {
1994 	return tja11xx_phy_id_compare(phydev, phydrv) &&
1995 	       !nxp_c45_macsec_ability(phydev);
1996 }
1997 
1998 static int tja11xx_macsec_match_phy_device(struct phy_device *phydev,
1999 					   const struct phy_driver *phydrv)
2000 {
2001 	return tja11xx_phy_id_compare(phydev, phydrv) &&
2002 	       nxp_c45_macsec_ability(phydev);
2003 }
2004 
2005 static const struct nxp_c45_regmap tja1120_regmap = {
2006 	.vend1_ptp_clk_period	= 0x1020,
2007 	.vend1_event_msg_filt	= 0x9010,
2008 	.pps_enable		=
2009 		NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 4, 1),
2010 	.pps_polarity		=
2011 		NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 5, 1),
2012 	.ltc_lock_ctrl		=
2013 		NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 2, 1),
2014 	.ltc_read		=
2015 		NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 1, 1),
2016 	.ltc_write		=
2017 		NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 2, 1),
2018 	.vend1_ltc_wr_nsec_0	= 0x1040,
2019 	.vend1_ltc_wr_nsec_1	= 0x1041,
2020 	.vend1_ltc_wr_sec_0	= 0x1042,
2021 	.vend1_ltc_wr_sec_1	= 0x1043,
2022 	.vend1_ltc_rd_nsec_0	= 0x1048,
2023 	.vend1_ltc_rd_nsec_1	= 0x1049,
2024 	.vend1_ltc_rd_sec_0	= 0x104A,
2025 	.vend1_ltc_rd_sec_1	= 0x104B,
2026 	.vend1_rate_adj_subns_0	= 0x1030,
2027 	.vend1_rate_adj_subns_1	= 0x1031,
2028 	.irq_egr_ts_en		=
2029 		NXP_C45_REG_FIELD(0x900A, MDIO_MMD_VEND1, 1, 1),
2030 	.irq_egr_ts_status	=
2031 		NXP_C45_REG_FIELD(0x900C, MDIO_MMD_VEND1, 1, 1),
2032 	.domain_number		=
2033 		NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 8, 8),
2034 	.msg_type		=
2035 		NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 4, 4),
2036 	.sequence_id		=
2037 		NXP_C45_REG_FIELD(0x9062, MDIO_MMD_VEND1, 0, 16),
2038 	.sec_1_0		=
2039 		NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 0, 2),
2040 	.sec_4_2		=
2041 		NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 2, 3),
2042 	.nsec_15_0		=
2043 		NXP_C45_REG_FIELD(0x9063, MDIO_MMD_VEND1, 0, 16),
2044 	.nsec_29_16		=
2045 		NXP_C45_REG_FIELD(0x9064, MDIO_MMD_VEND1, 0, 14),
2046 	.vend1_ext_trg_data_0	= 0x1071,
2047 	.vend1_ext_trg_data_1	= 0x1072,
2048 	.vend1_ext_trg_data_2	= 0x1073,
2049 	.vend1_ext_trg_data_3	= 0x1074,
2050 	.vend1_ext_trg_ctrl	= 0x1075,
2051 	.cable_test		= 0x8360,
2052 	.cable_test_valid	=
2053 		NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 15, 1),
2054 	.cable_test_result	=
2055 		NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 0, 3),
2056 };
2057 
2058 static const struct nxp_c45_phy_data tja1120_phy_data = {
2059 	.regmap = &tja1120_regmap,
2060 	.stats = tja1120_hw_stats,
2061 	.n_stats = ARRAY_SIZE(tja1120_hw_stats),
2062 	.ptp_clk_period = PTP_CLK_PERIOD_1000BT1,
2063 	.ext_ts_both_edges = true,
2064 	.ack_ptp_irq = true,
2065 	.counters_enable = tja1120_counters_enable,
2066 	.get_egressts = tja1120_get_hwtxts,
2067 	.get_extts = tja1120_get_extts,
2068 	.ptp_init = tja1120_ptp_init,
2069 	.ptp_enable = tja1120_ptp_enable,
2070 	.nmi_handler = tja1120_nmi_handler,
2071 };
2072 
2073 static struct phy_driver nxp_c45_driver[] = {
2074 	{
2075 		PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
2076 		.name			= "NXP C45 TJA1103",
2077 		.get_features		= nxp_c45_get_features,
2078 		.driver_data		= &tja1103_phy_data,
2079 		.probe			= nxp_c45_probe,
2080 		.soft_reset		= nxp_c45_soft_reset,
2081 		.config_aneg		= genphy_c45_config_aneg,
2082 		.config_init		= nxp_c45_config_init,
2083 		.config_intr		= tja1103_config_intr,
2084 		.handle_interrupt	= nxp_c45_handle_interrupt,
2085 		.read_status		= genphy_c45_read_status,
2086 		.suspend		= genphy_c45_pma_suspend,
2087 		.resume			= genphy_c45_pma_resume,
2088 		.get_sset_count		= nxp_c45_get_sset_count,
2089 		.get_strings		= nxp_c45_get_strings,
2090 		.get_stats		= nxp_c45_get_stats,
2091 		.cable_test_start	= nxp_c45_cable_test_start,
2092 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
2093 		.set_loopback		= genphy_c45_loopback,
2094 		.get_sqi		= nxp_c45_get_sqi,
2095 		.get_sqi_max		= nxp_c45_get_sqi_max,
2096 		.remove			= nxp_c45_remove,
2097 		.match_phy_device	= tja11xx_no_macsec_match_phy_device,
2098 	},
2099 	{
2100 		PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
2101 		.name			= "NXP C45 TJA1104",
2102 		.get_features		= nxp_c45_get_features,
2103 		.driver_data		= &tja1103_phy_data,
2104 		.probe			= nxp_c45_probe,
2105 		.soft_reset		= nxp_c45_soft_reset,
2106 		.config_aneg		= genphy_c45_config_aneg,
2107 		.config_init		= nxp_c45_config_init,
2108 		.config_intr		= tja1103_config_intr,
2109 		.handle_interrupt	= nxp_c45_handle_interrupt,
2110 		.read_status		= genphy_c45_read_status,
2111 		.suspend		= genphy_c45_pma_suspend,
2112 		.resume			= genphy_c45_pma_resume,
2113 		.get_sset_count		= nxp_c45_get_sset_count,
2114 		.get_strings		= nxp_c45_get_strings,
2115 		.get_stats		= nxp_c45_get_stats,
2116 		.cable_test_start	= nxp_c45_cable_test_start,
2117 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
2118 		.set_loopback		= genphy_c45_loopback,
2119 		.get_sqi		= nxp_c45_get_sqi,
2120 		.get_sqi_max		= nxp_c45_get_sqi_max,
2121 		.remove			= nxp_c45_remove,
2122 		.match_phy_device	= tja11xx_macsec_match_phy_device,
2123 	},
2124 	{
2125 		PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120),
2126 		.name			= "NXP C45 TJA1120",
2127 		.get_features		= nxp_c45_get_features,
2128 		.driver_data		= &tja1120_phy_data,
2129 		.probe			= nxp_c45_probe,
2130 		.soft_reset		= nxp_c45_soft_reset,
2131 		.config_aneg		= genphy_c45_config_aneg,
2132 		.config_init		= nxp_c45_config_init,
2133 		.config_intr		= tja1120_config_intr,
2134 		.handle_interrupt	= nxp_c45_handle_interrupt,
2135 		.read_status		= genphy_c45_read_status,
2136 		.link_change_notify	= tja1120_link_change_notify,
2137 		.suspend		= genphy_c45_pma_suspend,
2138 		.resume			= genphy_c45_pma_resume,
2139 		.get_sset_count		= nxp_c45_get_sset_count,
2140 		.get_strings		= nxp_c45_get_strings,
2141 		.get_stats		= nxp_c45_get_stats,
2142 		.cable_test_start	= nxp_c45_cable_test_start,
2143 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
2144 		.set_loopback		= genphy_c45_loopback,
2145 		.get_sqi		= nxp_c45_get_sqi,
2146 		.get_sqi_max		= nxp_c45_get_sqi_max,
2147 		.remove			= nxp_c45_remove,
2148 		.match_phy_device	= tja11xx_no_macsec_match_phy_device,
2149 	},
2150 	{
2151 		PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120),
2152 		.name			= "NXP C45 TJA1121",
2153 		.get_features		= nxp_c45_get_features,
2154 		.driver_data		= &tja1120_phy_data,
2155 		.probe			= nxp_c45_probe,
2156 		.soft_reset		= nxp_c45_soft_reset,
2157 		.config_aneg		= genphy_c45_config_aneg,
2158 		.config_init		= nxp_c45_config_init,
2159 		.config_intr		= tja1120_config_intr,
2160 		.handle_interrupt	= nxp_c45_handle_interrupt,
2161 		.read_status		= genphy_c45_read_status,
2162 		.link_change_notify	= tja1120_link_change_notify,
2163 		.suspend		= genphy_c45_pma_suspend,
2164 		.resume			= genphy_c45_pma_resume,
2165 		.get_sset_count		= nxp_c45_get_sset_count,
2166 		.get_strings		= nxp_c45_get_strings,
2167 		.get_stats		= nxp_c45_get_stats,
2168 		.cable_test_start	= nxp_c45_cable_test_start,
2169 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
2170 		.set_loopback		= genphy_c45_loopback,
2171 		.get_sqi		= nxp_c45_get_sqi,
2172 		.get_sqi_max		= nxp_c45_get_sqi_max,
2173 		.remove			= nxp_c45_remove,
2174 		.match_phy_device	= tja11xx_macsec_match_phy_device,
2175 	},
2176 };
2177 
2178 module_phy_driver(nxp_c45_driver);
2179 
2180 static const struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
2181 	{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
2182 	{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120) },
2183 	{ /*sentinel*/ },
2184 };
2185 
2186 MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
2187 
2188 MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
2189 MODULE_DESCRIPTION("NXP C45 PHY driver");
2190 MODULE_LICENSE("GPL v2");
2191