xref: /linux/drivers/net/phy/nxp-c45-tja11xx.c (revision fe259a1bb26ec78842c975d992331705b0c2c2e8)
1 // SPDX-License-Identifier: GPL-2.0
2 /* NXP C45 PHY driver
3  * Copyright 2021-2025 NXP
4  * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/ethtool.h>
9 #include <linux/ethtool_netlink.h>
10 #include <linux/kernel.h>
11 #include <linux/mii.h>
12 #include <linux/module.h>
13 #include <linux/of.h>
14 #include <linux/phy.h>
15 #include <linux/processor.h>
16 #include <linux/property.h>
17 #include <linux/ptp_classify.h>
18 #include <linux/net_tstamp.h>
19 
20 #include "nxp-c45-tja11xx.h"
21 
22 #define PHY_ID_MASK			GENMASK(31, 4)
23 /* Same id: TJA1103, TJA1104 */
24 #define PHY_ID_TJA_1103			0x001BB010
25 /* Same id: TJA1120, TJA1121 */
26 #define PHY_ID_TJA_1120			0x001BB031
27 
28 #define VEND1_DEVICE_ID3		0x0004
29 #define TJA1120_DEV_ID3_SILICON_VERSION	GENMASK(15, 12)
30 #define TJA1120_DEV_ID3_SAMPLE_TYPE	GENMASK(11, 8)
31 #define DEVICE_ID3_SAMPLE_TYPE_R	0x9
32 
33 #define VEND1_DEVICE_CONTROL		0x0040
34 #define DEVICE_CONTROL_RESET		BIT(15)
35 #define DEVICE_CONTROL_CONFIG_GLOBAL_EN	BIT(14)
36 #define DEVICE_CONTROL_CONFIG_ALL_EN	BIT(13)
37 
38 #define VEND1_DEVICE_CONFIG		0x0048
39 
40 #define TJA1120_VEND1_EXT_TS_MODE	0x1012
41 
42 #define TJA1120_GLOBAL_INFRA_IRQ_ACK	0x2C08
43 #define TJA1120_GLOBAL_INFRA_IRQ_EN	0x2C0A
44 #define TJA1120_GLOBAL_INFRA_IRQ_STATUS	0x2C0C
45 #define TJA1120_DEV_BOOT_DONE		BIT(1)
46 
47 #define TJA1120_VEND1_PTP_TRIG_DATA_S	0x1070
48 
49 #define TJA1120_EGRESS_TS_DATA_S	0x9060
50 #define TJA1120_EGRESS_TS_END		0x9067
51 #define TJA1120_TS_VALID		BIT(0)
52 #define TJA1120_MORE_TS			BIT(15)
53 
54 #define VEND1_PHY_IRQ_ACK		0x80A0
55 #define VEND1_PHY_IRQ_EN		0x80A1
56 #define VEND1_PHY_IRQ_STATUS		0x80A2
57 #define PHY_IRQ_LINK_EVENT		BIT(1)
58 
59 #define VEND1_ALWAYS_ACCESSIBLE		0x801F
60 #define FUSA_PASS			BIT(4)
61 
62 #define VEND1_PHY_CONTROL		0x8100
63 #define PHY_CONFIG_EN			BIT(14)
64 #define PHY_START_OP			BIT(0)
65 
66 #define VEND1_PHY_CONFIG		0x8108
67 #define PHY_CONFIG_AUTO			BIT(0)
68 
69 #define TJA1120_EPHY_RESETS		0x810A
70 #define EPHY_PCS_RESET			BIT(3)
71 
72 #define VEND1_SIGNAL_QUALITY		0x8320
73 #define SQI_VALID			BIT(14)
74 #define SQI_MASK			GENMASK(2, 0)
75 #define MAX_SQI				SQI_MASK
76 
77 #define CABLE_TEST_ENABLE		BIT(15)
78 #define CABLE_TEST_START		BIT(14)
79 #define CABLE_TEST_OK			0x00
80 #define CABLE_TEST_SHORTED		0x01
81 #define CABLE_TEST_OPEN			0x02
82 #define CABLE_TEST_UNKNOWN		0x07
83 
84 #define VEND1_PORT_CONTROL		0x8040
85 #define PORT_CONTROL_EN			BIT(14)
86 
87 #define VEND1_PORT_ABILITIES		0x8046
88 #define MACSEC_ABILITY			BIT(5)
89 #define PTP_ABILITY			BIT(3)
90 
91 #define VEND1_PORT_FUNC_IRQ_EN		0x807A
92 #define MACSEC_IRQS			BIT(5)
93 #define PTP_IRQS			BIT(3)
94 
95 #define VEND1_PTP_IRQ_ACK		0x9008
96 #define EGR_TS_IRQ			BIT(1)
97 
98 #define VEND1_PORT_INFRA_CONTROL	0xAC00
99 #define PORT_INFRA_CONTROL_EN		BIT(14)
100 
101 #define VEND1_RXID			0xAFCC
102 #define VEND1_TXID			0xAFCD
103 #define ID_ENABLE			BIT(15)
104 
105 #define VEND1_ABILITIES			0xAFC4
106 #define RGMII_ID_ABILITY		BIT(15)
107 #define RGMII_ABILITY			BIT(14)
108 #define RMII_ABILITY			BIT(10)
109 #define REVMII_ABILITY			BIT(9)
110 #define MII_ABILITY			BIT(8)
111 #define SGMII_ABILITY			BIT(0)
112 
113 #define VEND1_MII_BASIC_CONFIG		0xAFC6
114 #define MII_BASIC_CONFIG_REV		BIT(4)
115 #define MII_BASIC_CONFIG_SGMII		0x9
116 #define MII_BASIC_CONFIG_RGMII		0x7
117 #define MII_BASIC_CONFIG_RMII		0x5
118 #define MII_BASIC_CONFIG_MII		0x4
119 
120 #define VEND1_SGMII_BASIC_CONTROL	0xB000
121 #define SGMII_LPM			BIT(11)
122 
123 #define VEND1_SYMBOL_ERROR_CNT_XTD	0x8351
124 #define EXTENDED_CNT_EN			BIT(15)
125 #define VEND1_MONITOR_STATUS		0xAC80
126 #define MONITOR_RESET			BIT(15)
127 #define VEND1_MONITOR_CONFIG		0xAC86
128 #define LOST_FRAMES_CNT_EN		BIT(9)
129 #define ALL_FRAMES_CNT_EN		BIT(8)
130 
131 #define VEND1_SYMBOL_ERROR_COUNTER	0x8350
132 #define VEND1_LINK_DROP_COUNTER		0x8352
133 #define VEND1_LINK_LOSSES_AND_FAILURES	0x8353
134 #define VEND1_RX_PREAMBLE_COUNT		0xAFCE
135 #define VEND1_TX_PREAMBLE_COUNT		0xAFCF
136 #define VEND1_RX_IPG_LENGTH		0xAFD0
137 #define VEND1_TX_IPG_LENGTH		0xAFD1
138 #define COUNTER_EN			BIT(15)
139 
140 #define VEND1_PTP_CONFIG		0x1102
141 #define EXT_TRG_EDGE			BIT(1)
142 
143 #define TJA1120_SYNC_TRIG_FILTER	0x1010
144 #define PTP_TRIG_RISE_TS		BIT(3)
145 #define PTP_TRIG_FALLING_TS		BIT(2)
146 
147 #define CLK_RATE_ADJ_LD			BIT(15)
148 #define CLK_RATE_ADJ_DIR		BIT(14)
149 
150 #define VEND1_RX_TS_INSRT_CTRL		0x114D
151 #define TJA1103_RX_TS_INSRT_MODE2	0x02
152 
153 #define TJA1120_RX_TS_INSRT_CTRL	0x9012
154 #define TJA1120_RX_TS_INSRT_EN		BIT(15)
155 #define TJA1120_TS_INSRT_MODE		BIT(4)
156 
157 #define VEND1_EGR_RING_DATA_0		0x114E
158 #define VEND1_EGR_RING_CTRL		0x1154
159 
160 #define RING_DATA_0_TS_VALID		BIT(15)
161 
162 #define RING_DONE			BIT(0)
163 
164 #define TS_SEC_MASK			GENMASK(1, 0)
165 
166 #define PTP_ENABLE			BIT(3)
167 #define PHY_TEST_ENABLE			BIT(0)
168 
169 #define VEND1_PORT_PTP_CONTROL		0x9000
170 #define PORT_PTP_CONTROL_BYPASS		BIT(11)
171 
172 #define PTP_CLK_PERIOD_100BT1		15ULL
173 #define PTP_CLK_PERIOD_1000BT1		8ULL
174 
175 #define EVENT_MSG_FILT_ALL		0x0F
176 #define EVENT_MSG_FILT_NONE		0x00
177 
178 #define VEND1_GPIO_FUNC_CONFIG_BASE	0x2C40
179 #define GPIO_FUNC_EN			BIT(15)
180 #define GPIO_FUNC_PTP			BIT(6)
181 #define GPIO_SIGNAL_PTP_TRIGGER		0x01
182 #define GPIO_SIGNAL_PPS_OUT		0x12
183 #define GPIO_DISABLE			0
184 #define GPIO_PPS_OUT_CFG		(GPIO_FUNC_EN | GPIO_FUNC_PTP | \
185 	GPIO_SIGNAL_PPS_OUT)
186 #define GPIO_EXTTS_OUT_CFG		(GPIO_FUNC_EN | GPIO_FUNC_PTP | \
187 	GPIO_SIGNAL_PTP_TRIGGER)
188 
189 #define RGMII_PERIOD_PS			8000U
190 #define PS_PER_DEGREE			div_u64(RGMII_PERIOD_PS, 360)
191 #define MIN_ID_PS			1644U
192 #define MAX_ID_PS			2260U
193 #define DEFAULT_ID_PS			2000U
194 
195 #define PPM_TO_SUBNS_INC(ppb, ptp_clk_period) div_u64(GENMASK_ULL(31, 0) * \
196 	(ppb) * (ptp_clk_period), NSEC_PER_SEC)
197 
198 #define NXP_C45_SKB_CB(skb)	((struct nxp_c45_skb_cb *)(skb)->cb)
199 
200 #define TJA11XX_REVERSE_MODE		BIT(0)
201 
202 struct nxp_c45_phy;
203 
204 struct nxp_c45_skb_cb {
205 	struct ptp_header *header;
206 	unsigned int type;
207 };
208 
209 #define NXP_C45_REG_FIELD(_reg, _devad, _offset, _size)	\
210 	((struct nxp_c45_reg_field) {			\
211 		.reg = _reg,				\
212 		.devad =  _devad,			\
213 		.offset = _offset,			\
214 		.size = _size,				\
215 	})
216 
217 struct nxp_c45_reg_field {
218 	u16 reg;
219 	u8 devad;
220 	u8 offset;
221 	u8 size;
222 };
223 
224 struct nxp_c45_hwts {
225 	u32	nsec;
226 	u32	sec;
227 	u8	domain_number;
228 	u16	sequence_id;
229 	u8	msg_type;
230 };
231 
232 struct nxp_c45_regmap {
233 	/* PTP config regs. */
234 	u16 vend1_ptp_clk_period;
235 	u16 vend1_event_msg_filt;
236 
237 	/* LTC bits and regs. */
238 	struct nxp_c45_reg_field ltc_read;
239 	struct nxp_c45_reg_field ltc_write;
240 	struct nxp_c45_reg_field ltc_lock_ctrl;
241 	u16 vend1_ltc_wr_nsec_0;
242 	u16 vend1_ltc_wr_nsec_1;
243 	u16 vend1_ltc_wr_sec_0;
244 	u16 vend1_ltc_wr_sec_1;
245 	u16 vend1_ltc_rd_nsec_0;
246 	u16 vend1_ltc_rd_nsec_1;
247 	u16 vend1_ltc_rd_sec_0;
248 	u16 vend1_ltc_rd_sec_1;
249 	u16 vend1_rate_adj_subns_0;
250 	u16 vend1_rate_adj_subns_1;
251 
252 	/* External trigger reg fields. */
253 	struct nxp_c45_reg_field irq_egr_ts_en;
254 	struct nxp_c45_reg_field irq_egr_ts_status;
255 	struct nxp_c45_reg_field domain_number;
256 	struct nxp_c45_reg_field msg_type;
257 	struct nxp_c45_reg_field sequence_id;
258 	struct nxp_c45_reg_field sec_1_0;
259 	struct nxp_c45_reg_field sec_4_2;
260 	struct nxp_c45_reg_field nsec_15_0;
261 	struct nxp_c45_reg_field nsec_29_16;
262 
263 	/* PPS and EXT Trigger bits and regs. */
264 	struct nxp_c45_reg_field pps_enable;
265 	struct nxp_c45_reg_field pps_polarity;
266 	u16 vend1_ext_trg_data_0;
267 	u16 vend1_ext_trg_data_1;
268 	u16 vend1_ext_trg_data_2;
269 	u16 vend1_ext_trg_data_3;
270 	u16 vend1_ext_trg_ctrl;
271 
272 	/* Cable test reg fields. */
273 	u16 cable_test;
274 	struct nxp_c45_reg_field cable_test_valid;
275 	struct nxp_c45_reg_field cable_test_result;
276 };
277 
278 struct nxp_c45_phy_stats {
279 	const char	*name;
280 	const struct nxp_c45_reg_field counter;
281 };
282 
283 struct nxp_c45_phy_data {
284 	const struct nxp_c45_regmap *regmap;
285 	const struct nxp_c45_phy_stats *stats;
286 	int n_stats;
287 	u8 ptp_clk_period;
288 	bool ext_ts_both_edges;
289 	bool ack_ptp_irq;
290 	void (*counters_enable)(struct phy_device *phydev);
291 	bool (*get_egressts)(struct nxp_c45_phy *priv,
292 			     struct nxp_c45_hwts *hwts);
293 	bool (*get_extts)(struct nxp_c45_phy *priv, struct timespec64 *extts);
294 	void (*ptp_init)(struct phy_device *phydev);
295 	void (*ptp_enable)(struct phy_device *phydev, bool enable);
296 	void (*nmi_handler)(struct phy_device *phydev,
297 			    irqreturn_t *irq_status);
298 };
299 
300 static const
301 struct nxp_c45_phy_data *nxp_c45_get_data(struct phy_device *phydev)
302 {
303 	return phydev->drv->driver_data;
304 }
305 
306 static const
307 struct nxp_c45_regmap *nxp_c45_get_regmap(struct phy_device *phydev)
308 {
309 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
310 
311 	return phy_data->regmap;
312 }
313 
314 static int nxp_c45_read_reg_field(struct phy_device *phydev,
315 				  const struct nxp_c45_reg_field *reg_field)
316 {
317 	u16 mask;
318 	int ret;
319 
320 	if (reg_field->size == 0) {
321 		phydev_err(phydev, "Trying to read a reg field of size 0.\n");
322 		return -EINVAL;
323 	}
324 
325 	ret = phy_read_mmd(phydev, reg_field->devad, reg_field->reg);
326 	if (ret < 0)
327 		return ret;
328 
329 	mask = reg_field->size == 1 ? BIT(reg_field->offset) :
330 		GENMASK(reg_field->offset + reg_field->size - 1,
331 			reg_field->offset);
332 	ret &= mask;
333 	ret >>= reg_field->offset;
334 
335 	return ret;
336 }
337 
338 static int nxp_c45_write_reg_field(struct phy_device *phydev,
339 				   const struct nxp_c45_reg_field *reg_field,
340 				   u16 val)
341 {
342 	u16 mask;
343 	u16 set;
344 
345 	if (reg_field->size == 0) {
346 		phydev_err(phydev, "Trying to write a reg field of size 0.\n");
347 		return -EINVAL;
348 	}
349 
350 	mask = reg_field->size == 1 ? BIT(reg_field->offset) :
351 		GENMASK(reg_field->offset + reg_field->size - 1,
352 			reg_field->offset);
353 	set = val << reg_field->offset;
354 
355 	return phy_modify_mmd_changed(phydev, reg_field->devad,
356 				      reg_field->reg, mask, set);
357 }
358 
359 static int nxp_c45_set_reg_field(struct phy_device *phydev,
360 				 const struct nxp_c45_reg_field *reg_field)
361 {
362 	if (reg_field->size != 1) {
363 		phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
364 		return -EINVAL;
365 	}
366 
367 	return nxp_c45_write_reg_field(phydev, reg_field, 1);
368 }
369 
370 static int nxp_c45_clear_reg_field(struct phy_device *phydev,
371 				   const struct nxp_c45_reg_field *reg_field)
372 {
373 	if (reg_field->size != 1) {
374 		phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
375 		return -EINVAL;
376 	}
377 
378 	return nxp_c45_write_reg_field(phydev, reg_field, 0);
379 }
380 
381 static bool nxp_c45_poll_txts(struct phy_device *phydev)
382 {
383 	return phydev->irq <= 0;
384 }
385 
386 static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
387 				   struct timespec64 *ts,
388 				   struct ptp_system_timestamp *sts)
389 {
390 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
391 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
392 
393 	nxp_c45_set_reg_field(priv->phydev, &regmap->ltc_read);
394 	ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
395 				   regmap->vend1_ltc_rd_nsec_0);
396 	ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
397 				    regmap->vend1_ltc_rd_nsec_1) << 16;
398 	ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
399 				  regmap->vend1_ltc_rd_sec_0);
400 	ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
401 				   regmap->vend1_ltc_rd_sec_1) << 16;
402 
403 	return 0;
404 }
405 
406 static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
407 				  struct timespec64 *ts,
408 				  struct ptp_system_timestamp *sts)
409 {
410 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
411 
412 	mutex_lock(&priv->ptp_lock);
413 	_nxp_c45_ptp_gettimex64(ptp, ts, sts);
414 	mutex_unlock(&priv->ptp_lock);
415 
416 	return 0;
417 }
418 
419 static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
420 				  const struct timespec64 *ts)
421 {
422 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
423 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
424 
425 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_0,
426 		      ts->tv_nsec);
427 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_1,
428 		      ts->tv_nsec >> 16);
429 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_0,
430 		      ts->tv_sec);
431 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_1,
432 		      ts->tv_sec >> 16);
433 	nxp_c45_set_reg_field(priv->phydev, &regmap->ltc_write);
434 
435 	return 0;
436 }
437 
438 static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
439 				 const struct timespec64 *ts)
440 {
441 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
442 
443 	mutex_lock(&priv->ptp_lock);
444 	_nxp_c45_ptp_settime64(ptp, ts);
445 	mutex_unlock(&priv->ptp_lock);
446 
447 	return 0;
448 }
449 
450 static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
451 {
452 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
453 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
454 	const struct nxp_c45_regmap *regmap = data->regmap;
455 	s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
456 	u64 subns_inc_val;
457 	bool inc;
458 
459 	mutex_lock(&priv->ptp_lock);
460 	inc = ppb >= 0;
461 	ppb = abs(ppb);
462 
463 	subns_inc_val = PPM_TO_SUBNS_INC(ppb, data->ptp_clk_period);
464 
465 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
466 		      regmap->vend1_rate_adj_subns_0,
467 		      subns_inc_val);
468 	subns_inc_val >>= 16;
469 	subns_inc_val |= CLK_RATE_ADJ_LD;
470 	if (inc)
471 		subns_inc_val |= CLK_RATE_ADJ_DIR;
472 
473 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
474 		      regmap->vend1_rate_adj_subns_1,
475 		      subns_inc_val);
476 	mutex_unlock(&priv->ptp_lock);
477 
478 	return 0;
479 }
480 
481 static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
482 {
483 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
484 	struct timespec64 now, then;
485 
486 	mutex_lock(&priv->ptp_lock);
487 	then = ns_to_timespec64(delta);
488 	_nxp_c45_ptp_gettimex64(ptp, &now, NULL);
489 	now = timespec64_add(now, then);
490 	_nxp_c45_ptp_settime64(ptp, &now);
491 	mutex_unlock(&priv->ptp_lock);
492 
493 	return 0;
494 }
495 
496 static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
497 				   struct nxp_c45_hwts *hwts)
498 {
499 	ts->tv_nsec = hwts->nsec;
500 	if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
501 		ts->tv_sec -= TS_SEC_MASK + 1;
502 	ts->tv_sec &= ~TS_SEC_MASK;
503 	ts->tv_sec |= hwts->sec & TS_SEC_MASK;
504 }
505 
506 static bool nxp_c45_match_ts(struct ptp_header *header,
507 			     struct nxp_c45_hwts *hwts,
508 			     unsigned int type)
509 {
510 	return ntohs(header->sequence_id) == hwts->sequence_id &&
511 	       ptp_get_msgtype(header, type) == hwts->msg_type &&
512 	       header->domain_number  == hwts->domain_number;
513 }
514 
515 static bool nxp_c45_get_extts(struct nxp_c45_phy *priv,
516 			      struct timespec64 *extts)
517 {
518 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
519 
520 	extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
521 				      regmap->vend1_ext_trg_data_0);
522 	extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
523 				       regmap->vend1_ext_trg_data_1) << 16;
524 	extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
525 				     regmap->vend1_ext_trg_data_2);
526 	extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
527 				      regmap->vend1_ext_trg_data_3) << 16;
528 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
529 		      regmap->vend1_ext_trg_ctrl, RING_DONE);
530 
531 	return true;
532 }
533 
534 static bool tja1120_extts_is_valid(struct phy_device *phydev)
535 {
536 	bool valid;
537 	int reg;
538 
539 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
540 			   TJA1120_VEND1_PTP_TRIG_DATA_S);
541 	valid = !!(reg & TJA1120_TS_VALID);
542 
543 	return valid;
544 }
545 
546 static bool tja1120_get_extts(struct nxp_c45_phy *priv,
547 			      struct timespec64 *extts)
548 {
549 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
550 	struct phy_device *phydev = priv->phydev;
551 	bool more_ts;
552 	bool valid;
553 	u16 reg;
554 
555 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
556 			   regmap->vend1_ext_trg_ctrl);
557 	more_ts = !!(reg & TJA1120_MORE_TS);
558 
559 	valid = tja1120_extts_is_valid(phydev);
560 	if (!valid) {
561 		if (!more_ts)
562 			goto tja1120_get_extts_out;
563 
564 		/* Bug workaround for TJA1120 engineering samples: move the new
565 		 * timestamp from the FIFO to the buffer.
566 		 */
567 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
568 			      regmap->vend1_ext_trg_ctrl, RING_DONE);
569 		valid = tja1120_extts_is_valid(phydev);
570 		if (!valid)
571 			goto tja1120_get_extts_out;
572 	}
573 
574 	nxp_c45_get_extts(priv, extts);
575 tja1120_get_extts_out:
576 	return valid;
577 }
578 
579 static void nxp_c45_read_egress_ts(struct nxp_c45_phy *priv,
580 				   struct nxp_c45_hwts *hwts)
581 {
582 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
583 	struct phy_device *phydev = priv->phydev;
584 
585 	hwts->domain_number =
586 		nxp_c45_read_reg_field(phydev, &regmap->domain_number);
587 	hwts->msg_type =
588 		nxp_c45_read_reg_field(phydev, &regmap->msg_type);
589 	hwts->sequence_id =
590 		nxp_c45_read_reg_field(phydev, &regmap->sequence_id);
591 	hwts->nsec =
592 		nxp_c45_read_reg_field(phydev, &regmap->nsec_15_0);
593 	hwts->nsec |=
594 		nxp_c45_read_reg_field(phydev, &regmap->nsec_29_16) << 16;
595 	hwts->sec = nxp_c45_read_reg_field(phydev, &regmap->sec_1_0);
596 	hwts->sec |= nxp_c45_read_reg_field(phydev, &regmap->sec_4_2) << 2;
597 }
598 
599 static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
600 			       struct nxp_c45_hwts *hwts)
601 {
602 	bool valid;
603 	u16 reg;
604 
605 	mutex_lock(&priv->ptp_lock);
606 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
607 		      RING_DONE);
608 	reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
609 	valid = !!(reg & RING_DATA_0_TS_VALID);
610 	if (!valid)
611 		goto nxp_c45_get_hwtxts_out;
612 
613 	nxp_c45_read_egress_ts(priv, hwts);
614 nxp_c45_get_hwtxts_out:
615 	mutex_unlock(&priv->ptp_lock);
616 	return valid;
617 }
618 
619 static bool tja1120_egress_ts_is_valid(struct phy_device *phydev)
620 {
621 	bool valid;
622 	u16 reg;
623 
624 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S);
625 	valid = !!(reg & TJA1120_TS_VALID);
626 
627 	return valid;
628 }
629 
630 static bool tja1120_get_hwtxts(struct nxp_c45_phy *priv,
631 			       struct nxp_c45_hwts *hwts)
632 {
633 	struct phy_device *phydev = priv->phydev;
634 	bool more_ts;
635 	bool valid;
636 	u16 reg;
637 
638 	mutex_lock(&priv->ptp_lock);
639 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_END);
640 	more_ts = !!(reg & TJA1120_MORE_TS);
641 	valid = tja1120_egress_ts_is_valid(phydev);
642 	if (!valid) {
643 		if (!more_ts)
644 			goto tja1120_get_hwtxts_out;
645 
646 		/* Bug workaround for TJA1120 engineering samples: move the
647 		 * new timestamp from the FIFO to the buffer.
648 		 */
649 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
650 			      TJA1120_EGRESS_TS_END, TJA1120_TS_VALID);
651 		valid = tja1120_egress_ts_is_valid(phydev);
652 		if (!valid)
653 			goto tja1120_get_hwtxts_out;
654 	}
655 	nxp_c45_read_egress_ts(priv, hwts);
656 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S,
657 			   TJA1120_TS_VALID);
658 tja1120_get_hwtxts_out:
659 	mutex_unlock(&priv->ptp_lock);
660 	return valid;
661 }
662 
663 static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
664 				 struct nxp_c45_hwts *txts)
665 {
666 	struct sk_buff *skb, *tmp, *skb_match = NULL;
667 	struct skb_shared_hwtstamps shhwtstamps;
668 	struct timespec64 ts;
669 	unsigned long flags;
670 	bool ts_match;
671 	s64 ts_ns;
672 
673 	spin_lock_irqsave(&priv->tx_queue.lock, flags);
674 	skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
675 		ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
676 					    NXP_C45_SKB_CB(skb)->type);
677 		if (!ts_match)
678 			continue;
679 		skb_match = skb;
680 		__skb_unlink(skb, &priv->tx_queue);
681 		break;
682 	}
683 	spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
684 
685 	if (skb_match) {
686 		nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
687 		nxp_c45_reconstruct_ts(&ts, txts);
688 		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
689 		ts_ns = timespec64_to_ns(&ts);
690 		shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
691 		skb_complete_tx_timestamp(skb_match, &shhwtstamps);
692 	} else {
693 		phydev_warn(priv->phydev,
694 			    "the tx timestamp doesn't match with any skb\n");
695 	}
696 }
697 
698 static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
699 {
700 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
701 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
702 	bool poll_txts = nxp_c45_poll_txts(priv->phydev);
703 	struct skb_shared_hwtstamps *shhwtstamps_rx;
704 	struct ptp_clock_event event;
705 	struct nxp_c45_hwts hwts;
706 	bool reschedule = false;
707 	struct timespec64 ts;
708 	struct sk_buff *skb;
709 	bool ts_valid;
710 	u32 ts_raw;
711 
712 	while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
713 		ts_valid = data->get_egressts(priv, &hwts);
714 		if (unlikely(!ts_valid)) {
715 			/* Still more skbs in the queue */
716 			reschedule = true;
717 			break;
718 		}
719 
720 		nxp_c45_process_txts(priv, &hwts);
721 	}
722 
723 	while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
724 		nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
725 		ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
726 		hwts.sec = ts_raw >> 30;
727 		hwts.nsec = ts_raw & GENMASK(29, 0);
728 		nxp_c45_reconstruct_ts(&ts, &hwts);
729 		shhwtstamps_rx = skb_hwtstamps(skb);
730 		shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
731 		NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
732 		netif_rx(skb);
733 	}
734 
735 	if (priv->extts) {
736 		ts_valid = data->get_extts(priv, &ts);
737 		if (ts_valid && timespec64_compare(&ts, &priv->extts_ts) != 0) {
738 			priv->extts_ts = ts;
739 			event.index = priv->extts_index;
740 			event.type = PTP_CLOCK_EXTTS;
741 			event.timestamp = ns_to_ktime(timespec64_to_ns(&ts));
742 			ptp_clock_event(priv->ptp_clock, &event);
743 		}
744 		reschedule = true;
745 	}
746 
747 	return reschedule ? 1 : -1;
748 }
749 
750 static void nxp_c45_gpio_config(struct nxp_c45_phy *priv,
751 				int pin, u16 pin_cfg)
752 {
753 	struct phy_device *phydev = priv->phydev;
754 
755 	phy_write_mmd(phydev, MDIO_MMD_VEND1,
756 		      VEND1_GPIO_FUNC_CONFIG_BASE + pin, pin_cfg);
757 }
758 
759 static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
760 				 struct ptp_perout_request *perout, int on)
761 {
762 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
763 	struct phy_device *phydev = priv->phydev;
764 	int pin;
765 
766 	pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
767 	if (pin < 0)
768 		return pin;
769 
770 	if (!on) {
771 		nxp_c45_clear_reg_field(priv->phydev,
772 					&regmap->pps_enable);
773 		nxp_c45_clear_reg_field(priv->phydev,
774 					&regmap->pps_polarity);
775 
776 		nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
777 
778 		return 0;
779 	}
780 
781 	/* The PPS signal is fixed to 1 second and is always generated when the
782 	 * seconds counter is incremented. The start time is not configurable.
783 	 * If the clock is adjusted, the PPS signal is automatically readjusted.
784 	 */
785 	if (perout->period.sec != 1 || perout->period.nsec != 0) {
786 		phydev_warn(phydev, "The period can be set only to 1 second.");
787 		return -EINVAL;
788 	}
789 
790 	if (!(perout->flags & PTP_PEROUT_PHASE)) {
791 		if (perout->start.sec != 0 || perout->start.nsec != 0) {
792 			phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds.");
793 			return -EINVAL;
794 		}
795 	} else {
796 		if (perout->phase.nsec != 0 &&
797 		    perout->phase.nsec != (NSEC_PER_SEC >> 1)) {
798 			phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds.");
799 			return -EINVAL;
800 		}
801 
802 		if (perout->phase.nsec == 0)
803 			nxp_c45_clear_reg_field(priv->phydev,
804 						&regmap->pps_polarity);
805 		else
806 			nxp_c45_set_reg_field(priv->phydev,
807 					      &regmap->pps_polarity);
808 	}
809 
810 	nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG);
811 
812 	nxp_c45_set_reg_field(priv->phydev, &regmap->pps_enable);
813 
814 	return 0;
815 }
816 
817 static void nxp_c45_set_rising_or_falling(struct phy_device *phydev,
818 					  struct ptp_extts_request *extts)
819 {
820 	if (extts->flags & PTP_RISING_EDGE)
821 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
822 				   VEND1_PTP_CONFIG, EXT_TRG_EDGE);
823 
824 	if (extts->flags & PTP_FALLING_EDGE)
825 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
826 				 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
827 }
828 
829 static void nxp_c45_set_rising_and_falling(struct phy_device *phydev,
830 					   struct ptp_extts_request *extts)
831 {
832 	/* PTP_EXTTS_REQUEST may have only the PTP_ENABLE_FEATURE flag set. In
833 	 * this case external ts will be enabled on rising edge.
834 	 */
835 	if (extts->flags & PTP_RISING_EDGE ||
836 	    extts->flags == PTP_ENABLE_FEATURE)
837 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
838 				 TJA1120_SYNC_TRIG_FILTER,
839 				 PTP_TRIG_RISE_TS);
840 	else
841 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
842 				   TJA1120_SYNC_TRIG_FILTER,
843 				   PTP_TRIG_RISE_TS);
844 
845 	if (extts->flags & PTP_FALLING_EDGE)
846 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
847 				 TJA1120_SYNC_TRIG_FILTER,
848 				 PTP_TRIG_FALLING_TS);
849 	else
850 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
851 				   TJA1120_SYNC_TRIG_FILTER,
852 				   PTP_TRIG_FALLING_TS);
853 }
854 
855 static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
856 				struct ptp_extts_request *extts, int on)
857 {
858 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
859 	int pin;
860 
861 	/* Sampling on both edges is not supported */
862 	if ((extts->flags & PTP_RISING_EDGE) &&
863 	    (extts->flags & PTP_FALLING_EDGE) &&
864 	    !data->ext_ts_both_edges)
865 		return -EOPNOTSUPP;
866 
867 	pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index);
868 	if (pin < 0)
869 		return pin;
870 
871 	if (!on) {
872 		nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
873 		priv->extts = false;
874 
875 		return 0;
876 	}
877 
878 	if (data->ext_ts_both_edges)
879 		nxp_c45_set_rising_and_falling(priv->phydev, extts);
880 	else
881 		nxp_c45_set_rising_or_falling(priv->phydev, extts);
882 
883 	nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG);
884 	priv->extts = true;
885 	priv->extts_index = extts->index;
886 	ptp_schedule_worker(priv->ptp_clock, 0);
887 
888 	return 0;
889 }
890 
891 static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp,
892 			      struct ptp_clock_request *req, int on)
893 {
894 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
895 
896 	switch (req->type) {
897 	case PTP_CLK_REQ_EXTTS:
898 		return nxp_c45_extts_enable(priv, &req->extts, on);
899 	case PTP_CLK_REQ_PEROUT:
900 		return nxp_c45_perout_enable(priv, &req->perout, on);
901 	default:
902 		return -EOPNOTSUPP;
903 	}
904 }
905 
906 static struct ptp_pin_desc nxp_c45_ptp_pins[] = {
907 	{ "nxp_c45_gpio0", 0, PTP_PF_NONE},
908 	{ "nxp_c45_gpio1", 1, PTP_PF_NONE},
909 	{ "nxp_c45_gpio2", 2, PTP_PF_NONE},
910 	{ "nxp_c45_gpio3", 3, PTP_PF_NONE},
911 	{ "nxp_c45_gpio4", 4, PTP_PF_NONE},
912 	{ "nxp_c45_gpio5", 5, PTP_PF_NONE},
913 	{ "nxp_c45_gpio6", 6, PTP_PF_NONE},
914 	{ "nxp_c45_gpio7", 7, PTP_PF_NONE},
915 	{ "nxp_c45_gpio8", 8, PTP_PF_NONE},
916 	{ "nxp_c45_gpio9", 9, PTP_PF_NONE},
917 	{ "nxp_c45_gpio10", 10, PTP_PF_NONE},
918 	{ "nxp_c45_gpio11", 11, PTP_PF_NONE},
919 };
920 
921 static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
922 				  enum ptp_pin_function func, unsigned int chan)
923 {
924 	if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins))
925 		return -EINVAL;
926 
927 	switch (func) {
928 	case PTP_PF_NONE:
929 	case PTP_PF_PEROUT:
930 	case PTP_PF_EXTTS:
931 		break;
932 	default:
933 		return -EOPNOTSUPP;
934 	}
935 
936 	return 0;
937 }
938 
939 static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
940 {
941 	priv->caps = (struct ptp_clock_info) {
942 		.owner		= THIS_MODULE,
943 		.name		= "NXP C45 PHC",
944 		.max_adj	= 16666666,
945 		.adjfine	= nxp_c45_ptp_adjfine,
946 		.adjtime	= nxp_c45_ptp_adjtime,
947 		.gettimex64	= nxp_c45_ptp_gettimex64,
948 		.settime64	= nxp_c45_ptp_settime64,
949 		.enable		= nxp_c45_ptp_enable,
950 		.verify		= nxp_c45_ptp_verify_pin,
951 		.do_aux_work	= nxp_c45_do_aux_work,
952 		.pin_config	= nxp_c45_ptp_pins,
953 		.n_pins		= ARRAY_SIZE(nxp_c45_ptp_pins),
954 		.n_ext_ts	= 1,
955 		.n_per_out	= 1,
956 		.supported_extts_flags = PTP_RISING_EDGE |
957 					 PTP_FALLING_EDGE |
958 					 PTP_STRICT_FLAGS,
959 		.supported_perout_flags = PTP_PEROUT_PHASE,
960 	};
961 
962 	priv->ptp_clock = ptp_clock_register(&priv->caps,
963 					     &priv->phydev->mdio.dev);
964 
965 	if (IS_ERR(priv->ptp_clock))
966 		return PTR_ERR(priv->ptp_clock);
967 
968 	if (!priv->ptp_clock)
969 		return -ENOMEM;
970 
971 	return 0;
972 }
973 
974 static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
975 			     struct sk_buff *skb, int type)
976 {
977 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
978 						mii_ts);
979 
980 	switch (priv->hwts_tx) {
981 	case HWTSTAMP_TX_ON:
982 		NXP_C45_SKB_CB(skb)->type = type;
983 		NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
984 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
985 		skb_queue_tail(&priv->tx_queue, skb);
986 		if (nxp_c45_poll_txts(priv->phydev))
987 			ptp_schedule_worker(priv->ptp_clock, 0);
988 		break;
989 	case HWTSTAMP_TX_OFF:
990 	default:
991 		kfree_skb(skb);
992 		break;
993 	}
994 }
995 
996 static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
997 			     struct sk_buff *skb, int type)
998 {
999 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1000 						mii_ts);
1001 	struct ptp_header *header = ptp_parse_header(skb, type);
1002 
1003 	if (!header)
1004 		return false;
1005 
1006 	if (!priv->hwts_rx)
1007 		return false;
1008 
1009 	NXP_C45_SKB_CB(skb)->header = header;
1010 	skb_queue_tail(&priv->rx_queue, skb);
1011 	ptp_schedule_worker(priv->ptp_clock, 0);
1012 
1013 	return true;
1014 }
1015 
1016 static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
1017 			    struct kernel_hwtstamp_config *cfg,
1018 			    struct netlink_ext_ack *extack)
1019 {
1020 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1021 						mii_ts);
1022 	struct phy_device *phydev = priv->phydev;
1023 	const struct nxp_c45_phy_data *data;
1024 
1025 	if (cfg->tx_type < 0 || cfg->tx_type > HWTSTAMP_TX_ON)
1026 		return -ERANGE;
1027 
1028 	data = nxp_c45_get_data(phydev);
1029 	priv->hwts_tx = cfg->tx_type;
1030 
1031 	switch (cfg->rx_filter) {
1032 	case HWTSTAMP_FILTER_NONE:
1033 		priv->hwts_rx = 0;
1034 		break;
1035 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1036 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1037 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1038 		priv->hwts_rx = 1;
1039 		cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1040 		break;
1041 	default:
1042 		return -ERANGE;
1043 	}
1044 
1045 	if (priv->hwts_rx || priv->hwts_tx) {
1046 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
1047 			      data->regmap->vend1_event_msg_filt,
1048 			      EVENT_MSG_FILT_ALL);
1049 		data->ptp_enable(phydev, true);
1050 	} else {
1051 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
1052 			      data->regmap->vend1_event_msg_filt,
1053 			      EVENT_MSG_FILT_NONE);
1054 		data->ptp_enable(phydev, false);
1055 	}
1056 
1057 	if (nxp_c45_poll_txts(priv->phydev))
1058 		goto nxp_c45_no_ptp_irq;
1059 
1060 	if (priv->hwts_tx)
1061 		nxp_c45_set_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1062 	else
1063 		nxp_c45_clear_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1064 
1065 nxp_c45_no_ptp_irq:
1066 	return 0;
1067 }
1068 
1069 static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
1070 			   struct kernel_ethtool_ts_info *ts_info)
1071 {
1072 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1073 						mii_ts);
1074 
1075 	ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1076 			SOF_TIMESTAMPING_RX_HARDWARE |
1077 			SOF_TIMESTAMPING_RAW_HARDWARE;
1078 	ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
1079 	ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1080 	ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1081 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
1082 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
1083 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
1084 
1085 	return 0;
1086 }
1087 
1088 static const struct nxp_c45_phy_stats common_hw_stats[] = {
1089 	{ "phy_link_status_drop_cnt",
1090 		NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 8, 6), },
1091 	{ "phy_link_availability_drop_cnt",
1092 		NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 0, 6), },
1093 	{ "phy_link_loss_cnt",
1094 		NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 10, 6), },
1095 	{ "phy_link_failure_cnt",
1096 		NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 0, 10), },
1097 	{ "phy_symbol_error_cnt",
1098 		NXP_C45_REG_FIELD(0x8350, MDIO_MMD_VEND1, 0, 16) },
1099 };
1100 
1101 static const struct nxp_c45_phy_stats tja1103_hw_stats[] = {
1102 	{ "rx_preamble_count",
1103 		NXP_C45_REG_FIELD(0xAFCE, MDIO_MMD_VEND1, 0, 6), },
1104 	{ "tx_preamble_count",
1105 		NXP_C45_REG_FIELD(0xAFCF, MDIO_MMD_VEND1, 0, 6), },
1106 	{ "rx_ipg_length",
1107 		NXP_C45_REG_FIELD(0xAFD0, MDIO_MMD_VEND1, 0, 9), },
1108 	{ "tx_ipg_length",
1109 		NXP_C45_REG_FIELD(0xAFD1, MDIO_MMD_VEND1, 0, 9), },
1110 };
1111 
1112 static const struct nxp_c45_phy_stats tja1120_hw_stats[] = {
1113 	{ "phy_symbol_error_cnt_ext",
1114 		NXP_C45_REG_FIELD(0x8351, MDIO_MMD_VEND1, 0, 14) },
1115 	{ "tx_frames_xtd",
1116 		NXP_C45_REG_FIELD(0xACA1, MDIO_MMD_VEND1, 0, 8), },
1117 	{ "tx_frames",
1118 		NXP_C45_REG_FIELD(0xACA0, MDIO_MMD_VEND1, 0, 16), },
1119 	{ "rx_frames_xtd",
1120 		NXP_C45_REG_FIELD(0xACA3, MDIO_MMD_VEND1, 0, 8), },
1121 	{ "rx_frames",
1122 		NXP_C45_REG_FIELD(0xACA2, MDIO_MMD_VEND1, 0, 16), },
1123 	{ "tx_lost_frames_xtd",
1124 		NXP_C45_REG_FIELD(0xACA5, MDIO_MMD_VEND1, 0, 8), },
1125 	{ "tx_lost_frames",
1126 		NXP_C45_REG_FIELD(0xACA4, MDIO_MMD_VEND1, 0, 16), },
1127 	{ "rx_lost_frames_xtd",
1128 		NXP_C45_REG_FIELD(0xACA7, MDIO_MMD_VEND1, 0, 8), },
1129 	{ "rx_lost_frames",
1130 		NXP_C45_REG_FIELD(0xACA6, MDIO_MMD_VEND1, 0, 16), },
1131 };
1132 
1133 static int nxp_c45_get_sset_count(struct phy_device *phydev)
1134 {
1135 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1136 
1137 	return ARRAY_SIZE(common_hw_stats) + (phy_data ? phy_data->n_stats : 0);
1138 }
1139 
1140 static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
1141 {
1142 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1143 	size_t count = nxp_c45_get_sset_count(phydev);
1144 	size_t idx;
1145 	size_t i;
1146 
1147 	for (i = 0; i < count; i++) {
1148 		if (i < ARRAY_SIZE(common_hw_stats)) {
1149 			ethtool_puts(&data, common_hw_stats[i].name);
1150 			continue;
1151 		}
1152 		idx = i - ARRAY_SIZE(common_hw_stats);
1153 		ethtool_puts(&data, phy_data->stats[idx].name);
1154 	}
1155 }
1156 
1157 static void nxp_c45_get_stats(struct phy_device *phydev,
1158 			      struct ethtool_stats *stats, u64 *data)
1159 {
1160 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1161 	size_t count = nxp_c45_get_sset_count(phydev);
1162 	const struct nxp_c45_reg_field *reg_field;
1163 	size_t idx;
1164 	size_t i;
1165 	int ret;
1166 
1167 	for (i = 0; i < count; i++) {
1168 		if (i < ARRAY_SIZE(common_hw_stats)) {
1169 			reg_field = &common_hw_stats[i].counter;
1170 		} else {
1171 			idx = i - ARRAY_SIZE(common_hw_stats);
1172 			reg_field = &phy_data->stats[idx].counter;
1173 		}
1174 
1175 		ret = nxp_c45_read_reg_field(phydev, reg_field);
1176 		if (ret < 0)
1177 			data[i] = U64_MAX;
1178 		else
1179 			data[i] = ret;
1180 	}
1181 }
1182 
1183 static int nxp_c45_config_enable(struct phy_device *phydev)
1184 {
1185 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1186 		      DEVICE_CONTROL_CONFIG_GLOBAL_EN |
1187 		      DEVICE_CONTROL_CONFIG_ALL_EN);
1188 	usleep_range(400, 450);
1189 
1190 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
1191 		      PORT_CONTROL_EN);
1192 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1193 		      PHY_CONFIG_EN);
1194 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
1195 		      PORT_INFRA_CONTROL_EN);
1196 
1197 	return 0;
1198 }
1199 
1200 static int nxp_c45_start_op(struct phy_device *phydev)
1201 {
1202 	return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1203 				PHY_START_OP);
1204 }
1205 
1206 static int nxp_c45_config_intr(struct phy_device *phydev)
1207 {
1208 	int ret;
1209 
1210 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
1211 		ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1212 				       VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
1213 		if (ret)
1214 			return ret;
1215 
1216 		return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1217 					VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1218 	}
1219 
1220 	ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1221 				 VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
1222 	if (ret)
1223 		return ret;
1224 
1225 	return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1226 				  VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1227 }
1228 
1229 static int tja1103_config_intr(struct phy_device *phydev)
1230 {
1231 	int ret;
1232 
1233 	/* We can't disable the FUSA IRQ for TJA1103, but we can clean it up. */
1234 	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_ALWAYS_ACCESSIBLE,
1235 			    FUSA_PASS);
1236 	if (ret)
1237 		return ret;
1238 
1239 	return nxp_c45_config_intr(phydev);
1240 }
1241 
1242 static int tja1120_config_intr(struct phy_device *phydev)
1243 {
1244 	int ret;
1245 
1246 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
1247 		ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1248 				       TJA1120_GLOBAL_INFRA_IRQ_EN,
1249 				       TJA1120_DEV_BOOT_DONE);
1250 	else
1251 		ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1252 					 TJA1120_GLOBAL_INFRA_IRQ_EN,
1253 					 TJA1120_DEV_BOOT_DONE);
1254 	if (ret)
1255 		return ret;
1256 
1257 	return nxp_c45_config_intr(phydev);
1258 }
1259 
1260 static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
1261 {
1262 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1263 	struct nxp_c45_phy *priv = phydev->priv;
1264 	irqreturn_t ret = IRQ_NONE;
1265 	struct nxp_c45_hwts hwts;
1266 	int irq;
1267 
1268 	irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
1269 	if (irq & PHY_IRQ_LINK_EVENT) {
1270 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
1271 			      PHY_IRQ_LINK_EVENT);
1272 		phy_trigger_machine(phydev);
1273 		ret = IRQ_HANDLED;
1274 	}
1275 
1276 	irq = nxp_c45_read_reg_field(phydev, &data->regmap->irq_egr_ts_status);
1277 	if (irq) {
1278 		/* If ack_ptp_irq is false, the IRQ bit is self-clear and will
1279 		 * be cleared when the EGR TS FIFO is empty. Otherwise, the
1280 		 * IRQ bit should be cleared before reading the timestamp,
1281 		 */
1282 		if (data->ack_ptp_irq)
1283 			phy_write_mmd(phydev, MDIO_MMD_VEND1,
1284 				      VEND1_PTP_IRQ_ACK, EGR_TS_IRQ);
1285 		while (data->get_egressts(priv, &hwts))
1286 			nxp_c45_process_txts(priv, &hwts);
1287 
1288 		ret = IRQ_HANDLED;
1289 	}
1290 
1291 	data->nmi_handler(phydev, &ret);
1292 	nxp_c45_handle_macsec_interrupt(phydev, &ret);
1293 
1294 	return ret;
1295 }
1296 
1297 static int nxp_c45_soft_reset(struct phy_device *phydev)
1298 {
1299 	int ret;
1300 
1301 	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1302 			    DEVICE_CONTROL_RESET);
1303 	if (ret)
1304 		return ret;
1305 
1306 	usleep_range(2000, 2050);
1307 
1308 	return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
1309 					 VEND1_DEVICE_CONTROL, ret,
1310 					 !(ret & DEVICE_CONTROL_RESET), 20000,
1311 					 240000, false);
1312 }
1313 
1314 static int nxp_c45_cable_test_start(struct phy_device *phydev)
1315 {
1316 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1317 
1318 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1319 			 VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1320 	return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1321 				CABLE_TEST_ENABLE | CABLE_TEST_START);
1322 }
1323 
1324 static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
1325 					 bool *finished)
1326 {
1327 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1328 	int ret;
1329 	u8 cable_test_result;
1330 
1331 	ret = nxp_c45_read_reg_field(phydev, &regmap->cable_test_valid);
1332 	if (!ret) {
1333 		*finished = false;
1334 		return 0;
1335 	}
1336 
1337 	*finished = true;
1338 	cable_test_result = nxp_c45_read_reg_field(phydev,
1339 						   &regmap->cable_test_result);
1340 
1341 	switch (cable_test_result) {
1342 	case CABLE_TEST_OK:
1343 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1344 					ETHTOOL_A_CABLE_RESULT_CODE_OK);
1345 		break;
1346 	case CABLE_TEST_SHORTED:
1347 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1348 					ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
1349 		break;
1350 	case CABLE_TEST_OPEN:
1351 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1352 					ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
1353 		break;
1354 	default:
1355 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1356 					ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
1357 	}
1358 
1359 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1360 			   CABLE_TEST_ENABLE);
1361 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1362 			   VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1363 
1364 	return nxp_c45_start_op(phydev);
1365 }
1366 
1367 static int nxp_c45_get_sqi(struct phy_device *phydev)
1368 {
1369 	int reg;
1370 
1371 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
1372 	if (!(reg & SQI_VALID))
1373 		return -EINVAL;
1374 
1375 	reg &= SQI_MASK;
1376 
1377 	return reg;
1378 }
1379 
1380 static void tja1120_link_change_notify(struct phy_device *phydev)
1381 {
1382 	/* Bug workaround for TJA1120 enegineering samples: fix egress
1383 	 * timestamps lost after link recovery.
1384 	 */
1385 	if (phydev->state == PHY_NOLINK) {
1386 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1387 				 TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1388 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1389 				   TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1390 	}
1391 }
1392 
1393 static int nxp_c45_get_sqi_max(struct phy_device *phydev)
1394 {
1395 	return MAX_SQI;
1396 }
1397 
1398 static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
1399 {
1400 	if (delay < MIN_ID_PS) {
1401 		phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
1402 		return -EINVAL;
1403 	}
1404 
1405 	if (delay > MAX_ID_PS) {
1406 		phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
1407 		return -EINVAL;
1408 	}
1409 
1410 	return 0;
1411 }
1412 
1413 static void nxp_c45_counters_enable(struct phy_device *phydev)
1414 {
1415 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1416 
1417 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
1418 			 COUNTER_EN);
1419 
1420 	data->counters_enable(phydev);
1421 }
1422 
1423 static void nxp_c45_ptp_init(struct phy_device *phydev)
1424 {
1425 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1426 
1427 	phy_write_mmd(phydev, MDIO_MMD_VEND1,
1428 		      data->regmap->vend1_ptp_clk_period,
1429 		      data->ptp_clk_period);
1430 	nxp_c45_clear_reg_field(phydev, &data->regmap->ltc_lock_ctrl);
1431 
1432 	data->ptp_init(phydev);
1433 }
1434 
1435 static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
1436 {
1437 	/* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
1438 	 * To avoid floating point operations we'll multiply by 10
1439 	 * and get 1 decimal point precision.
1440 	 */
1441 	phase_offset_raw *= 10;
1442 	phase_offset_raw -= 738;
1443 	return div_u64(phase_offset_raw, 9);
1444 }
1445 
1446 static void nxp_c45_disable_delays(struct phy_device *phydev)
1447 {
1448 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
1449 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
1450 }
1451 
1452 static void nxp_c45_set_delays(struct phy_device *phydev)
1453 {
1454 	struct nxp_c45_phy *priv = phydev->priv;
1455 	u64 tx_delay = priv->tx_delay;
1456 	u64 rx_delay = priv->rx_delay;
1457 	u64 degree;
1458 
1459 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1460 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1461 		degree = div_u64(tx_delay, PS_PER_DEGREE);
1462 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1463 			      ID_ENABLE | nxp_c45_get_phase_shift(degree));
1464 	} else {
1465 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1466 				   ID_ENABLE);
1467 	}
1468 
1469 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1470 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1471 		degree = div_u64(rx_delay, PS_PER_DEGREE);
1472 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1473 			      ID_ENABLE | nxp_c45_get_phase_shift(degree));
1474 	} else {
1475 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1476 				   ID_ENABLE);
1477 	}
1478 }
1479 
1480 static int nxp_c45_get_delays(struct phy_device *phydev)
1481 {
1482 	struct nxp_c45_phy *priv = phydev->priv;
1483 	int ret;
1484 
1485 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1486 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1487 		ret = device_property_read_u32(&phydev->mdio.dev,
1488 					       "tx-internal-delay-ps",
1489 					       &priv->tx_delay);
1490 		if (ret)
1491 			priv->tx_delay = DEFAULT_ID_PS;
1492 
1493 		ret = nxp_c45_check_delay(phydev, priv->tx_delay);
1494 		if (ret) {
1495 			phydev_err(phydev,
1496 				   "tx-internal-delay-ps invalid value\n");
1497 			return ret;
1498 		}
1499 	}
1500 
1501 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1502 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1503 		ret = device_property_read_u32(&phydev->mdio.dev,
1504 					       "rx-internal-delay-ps",
1505 					       &priv->rx_delay);
1506 		if (ret)
1507 			priv->rx_delay = DEFAULT_ID_PS;
1508 
1509 		ret = nxp_c45_check_delay(phydev, priv->rx_delay);
1510 		if (ret) {
1511 			phydev_err(phydev,
1512 				   "rx-internal-delay-ps invalid value\n");
1513 			return ret;
1514 		}
1515 	}
1516 
1517 	return 0;
1518 }
1519 
1520 static int nxp_c45_set_phy_mode(struct phy_device *phydev)
1521 {
1522 	struct nxp_c45_phy *priv = phydev->priv;
1523 	u16 basic_config;
1524 	int ret;
1525 
1526 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
1527 	phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
1528 
1529 	switch (phydev->interface) {
1530 	case PHY_INTERFACE_MODE_RGMII:
1531 		if (!(ret & RGMII_ABILITY)) {
1532 			phydev_err(phydev, "rgmii mode not supported\n");
1533 			return -EINVAL;
1534 		}
1535 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1536 			      MII_BASIC_CONFIG_RGMII);
1537 		nxp_c45_disable_delays(phydev);
1538 		break;
1539 	case PHY_INTERFACE_MODE_RGMII_ID:
1540 	case PHY_INTERFACE_MODE_RGMII_TXID:
1541 	case PHY_INTERFACE_MODE_RGMII_RXID:
1542 		if (!(ret & RGMII_ID_ABILITY)) {
1543 			phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
1544 			return -EINVAL;
1545 		}
1546 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1547 			      MII_BASIC_CONFIG_RGMII);
1548 		ret = nxp_c45_get_delays(phydev);
1549 		if (ret)
1550 			return ret;
1551 
1552 		nxp_c45_set_delays(phydev);
1553 		break;
1554 	case PHY_INTERFACE_MODE_MII:
1555 		if (!(ret & MII_ABILITY)) {
1556 			phydev_err(phydev, "mii mode not supported\n");
1557 			return -EINVAL;
1558 		}
1559 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1560 			      MII_BASIC_CONFIG_MII);
1561 		break;
1562 	case PHY_INTERFACE_MODE_REVMII:
1563 		if (!(ret & REVMII_ABILITY)) {
1564 			phydev_err(phydev, "rev-mii mode not supported\n");
1565 			return -EINVAL;
1566 		}
1567 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1568 			      MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
1569 		break;
1570 	case PHY_INTERFACE_MODE_RMII:
1571 		if (!(ret & RMII_ABILITY)) {
1572 			phydev_err(phydev, "rmii mode not supported\n");
1573 			return -EINVAL;
1574 		}
1575 
1576 		basic_config = MII_BASIC_CONFIG_RMII;
1577 
1578 		/* This is not PHY_INTERFACE_MODE_REVRMII */
1579 		if (priv->flags & TJA11XX_REVERSE_MODE)
1580 			basic_config |= MII_BASIC_CONFIG_REV;
1581 
1582 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1583 			      basic_config);
1584 		break;
1585 	case PHY_INTERFACE_MODE_SGMII:
1586 		if (!(ret & SGMII_ABILITY)) {
1587 			phydev_err(phydev, "sgmii mode not supported\n");
1588 			return -EINVAL;
1589 		}
1590 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1591 			      MII_BASIC_CONFIG_SGMII);
1592 		break;
1593 	case PHY_INTERFACE_MODE_INTERNAL:
1594 		break;
1595 	default:
1596 		return -EINVAL;
1597 	}
1598 
1599 	return 0;
1600 }
1601 
1602 /* Errata: ES_TJA1120 and ES_TJA1121 Rev. 1.0 — 28 November 2024 Section 3.1 & 3.2 */
1603 static void nxp_c45_tja1120_errata(struct phy_device *phydev)
1604 {
1605 	bool macsec_ability, sgmii_ability;
1606 	int silicon_version, sample_type;
1607 	int phy_abilities;
1608 	int ret = 0;
1609 
1610 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_ID3);
1611 	if (ret < 0)
1612 		return;
1613 
1614 	sample_type = FIELD_GET(TJA1120_DEV_ID3_SAMPLE_TYPE, ret);
1615 	if (sample_type != DEVICE_ID3_SAMPLE_TYPE_R)
1616 		return;
1617 
1618 	silicon_version = FIELD_GET(TJA1120_DEV_ID3_SILICON_VERSION, ret);
1619 
1620 	phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1621 				     VEND1_PORT_ABILITIES);
1622 	macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
1623 	sgmii_ability = !!(phy_abilities & SGMII_ABILITY);
1624 	if ((!macsec_ability && silicon_version == 2) ||
1625 	    (macsec_ability && silicon_version == 1)) {
1626 		/* TJA1120/TJA1121 PHY configuration errata workaround.
1627 		 * Apply PHY writes sequence before link up.
1628 		 */
1629 		if (!macsec_ability) {
1630 			phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x4b95);
1631 			phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0xf3cd);
1632 		} else {
1633 			phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x89c7);
1634 			phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0893);
1635 		}
1636 
1637 		phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x0476, 0x58a0);
1638 
1639 		phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x8921, 0xa3a);
1640 		phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x89F1, 0x16c1);
1641 
1642 		phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x0);
1643 		phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0);
1644 
1645 		if (sgmii_ability) {
1646 			/* TJA1120B/TJA1121B SGMII PCS restart errata workaround.
1647 			 * Put SGMII PCS into power down mode and back up.
1648 			 */
1649 			phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1650 					 VEND1_SGMII_BASIC_CONTROL,
1651 					 SGMII_LPM);
1652 			phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1653 					   VEND1_SGMII_BASIC_CONTROL,
1654 					   SGMII_LPM);
1655 		}
1656 	}
1657 }
1658 
1659 static int nxp_c45_config_init(struct phy_device *phydev)
1660 {
1661 	int ret;
1662 
1663 	ret = nxp_c45_config_enable(phydev);
1664 	if (ret) {
1665 		phydev_err(phydev, "Failed to enable config\n");
1666 		return ret;
1667 	}
1668 
1669 	/* Bug workaround for SJA1110 rev B: enable write access
1670 	 * to MDIO_MMD_PMAPMD
1671 	 */
1672 	phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
1673 	phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
1674 
1675 	if (phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, GENMASK(31, 4)))
1676 		nxp_c45_tja1120_errata(phydev);
1677 
1678 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
1679 			 PHY_CONFIG_AUTO);
1680 
1681 	ret = nxp_c45_set_phy_mode(phydev);
1682 	if (ret)
1683 		return ret;
1684 
1685 	phydev->autoneg = AUTONEG_DISABLE;
1686 
1687 	nxp_c45_counters_enable(phydev);
1688 	nxp_c45_ptp_init(phydev);
1689 	ret = nxp_c45_macsec_config_init(phydev);
1690 	if (ret)
1691 		return ret;
1692 
1693 	return nxp_c45_start_op(phydev);
1694 }
1695 
1696 static int nxp_c45_get_features(struct phy_device *phydev)
1697 {
1698 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported);
1699 	linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, phydev->supported);
1700 
1701 	return genphy_c45_pma_read_abilities(phydev);
1702 }
1703 
1704 static int nxp_c45_parse_dt(struct phy_device *phydev)
1705 {
1706 	struct device_node *node = phydev->mdio.dev.of_node;
1707 	struct nxp_c45_phy *priv = phydev->priv;
1708 
1709 	if (!IS_ENABLED(CONFIG_OF_MDIO))
1710 		return 0;
1711 
1712 	if (of_property_read_bool(node, "nxp,rmii-refclk-out"))
1713 		priv->flags |= TJA11XX_REVERSE_MODE;
1714 
1715 	return 0;
1716 }
1717 
1718 static int nxp_c45_probe(struct phy_device *phydev)
1719 {
1720 	struct nxp_c45_phy *priv;
1721 	bool macsec_ability;
1722 	int phy_abilities;
1723 	bool ptp_ability;
1724 	int ret = 0;
1725 
1726 	priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
1727 	if (!priv)
1728 		return -ENOMEM;
1729 
1730 	skb_queue_head_init(&priv->tx_queue);
1731 	skb_queue_head_init(&priv->rx_queue);
1732 
1733 	priv->phydev = phydev;
1734 
1735 	phydev->priv = priv;
1736 
1737 	nxp_c45_parse_dt(phydev);
1738 
1739 	mutex_init(&priv->ptp_lock);
1740 
1741 	phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1742 				     VEND1_PORT_ABILITIES);
1743 	ptp_ability = !!(phy_abilities & PTP_ABILITY);
1744 	if (!ptp_ability) {
1745 		phydev_dbg(phydev, "the phy does not support PTP");
1746 		goto no_ptp_support;
1747 	}
1748 
1749 	if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
1750 	    IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
1751 		priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
1752 		priv->mii_ts.txtstamp = nxp_c45_txtstamp;
1753 		priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
1754 		priv->mii_ts.ts_info = nxp_c45_ts_info;
1755 		phydev->mii_ts = &priv->mii_ts;
1756 		ret = nxp_c45_init_ptp_clock(priv);
1757 
1758 		/* Timestamp selected by default to keep legacy API */
1759 		phydev->default_timestamp = true;
1760 	} else {
1761 		phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
1762 	}
1763 
1764 no_ptp_support:
1765 	macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
1766 	if (!macsec_ability) {
1767 		phydev_info(phydev, "the phy does not support MACsec\n");
1768 		goto no_macsec_support;
1769 	}
1770 
1771 	if (IS_ENABLED(CONFIG_MACSEC)) {
1772 		ret = nxp_c45_macsec_probe(phydev);
1773 		phydev_dbg(phydev, "MACsec support enabled.");
1774 	} else {
1775 		phydev_dbg(phydev, "MACsec support not enabled even if the phy supports it");
1776 	}
1777 
1778 no_macsec_support:
1779 
1780 	return ret;
1781 }
1782 
1783 static void nxp_c45_remove(struct phy_device *phydev)
1784 {
1785 	struct nxp_c45_phy *priv = phydev->priv;
1786 
1787 	if (priv->ptp_clock)
1788 		ptp_clock_unregister(priv->ptp_clock);
1789 
1790 	skb_queue_purge(&priv->tx_queue);
1791 	skb_queue_purge(&priv->rx_queue);
1792 	nxp_c45_macsec_remove(phydev);
1793 }
1794 
1795 static void tja1103_counters_enable(struct phy_device *phydev)
1796 {
1797 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
1798 			 COUNTER_EN);
1799 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
1800 			 COUNTER_EN);
1801 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
1802 			 COUNTER_EN);
1803 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
1804 			 COUNTER_EN);
1805 }
1806 
1807 static void tja1103_ptp_init(struct phy_device *phydev)
1808 {
1809 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
1810 		      TJA1103_RX_TS_INSRT_MODE2);
1811 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
1812 			 PTP_ENABLE);
1813 }
1814 
1815 static void tja1103_ptp_enable(struct phy_device *phydev, bool enable)
1816 {
1817 	if (enable)
1818 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1819 				   VEND1_PORT_PTP_CONTROL,
1820 				   PORT_PTP_CONTROL_BYPASS);
1821 	else
1822 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1823 				 VEND1_PORT_PTP_CONTROL,
1824 				 PORT_PTP_CONTROL_BYPASS);
1825 }
1826 
1827 static void tja1103_nmi_handler(struct phy_device *phydev,
1828 				irqreturn_t *irq_status)
1829 {
1830 	int ret;
1831 
1832 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1833 			   VEND1_ALWAYS_ACCESSIBLE);
1834 	if (ret & FUSA_PASS) {
1835 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
1836 			      VEND1_ALWAYS_ACCESSIBLE,
1837 			      FUSA_PASS);
1838 		*irq_status = IRQ_HANDLED;
1839 	}
1840 }
1841 
1842 static const struct nxp_c45_regmap tja1103_regmap = {
1843 	.vend1_ptp_clk_period	= 0x1104,
1844 	.vend1_event_msg_filt	= 0x1148,
1845 	.pps_enable		=
1846 		NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 3, 1),
1847 	.pps_polarity		=
1848 		NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 2, 1),
1849 	.ltc_lock_ctrl		=
1850 		NXP_C45_REG_FIELD(0x1115, MDIO_MMD_VEND1, 0, 1),
1851 	.ltc_read		=
1852 		NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 2, 1),
1853 	.ltc_write		=
1854 		NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 0, 1),
1855 	.vend1_ltc_wr_nsec_0	= 0x1106,
1856 	.vend1_ltc_wr_nsec_1	= 0x1107,
1857 	.vend1_ltc_wr_sec_0	= 0x1108,
1858 	.vend1_ltc_wr_sec_1	= 0x1109,
1859 	.vend1_ltc_rd_nsec_0	= 0x110A,
1860 	.vend1_ltc_rd_nsec_1	= 0x110B,
1861 	.vend1_ltc_rd_sec_0	= 0x110C,
1862 	.vend1_ltc_rd_sec_1	= 0x110D,
1863 	.vend1_rate_adj_subns_0	= 0x110F,
1864 	.vend1_rate_adj_subns_1	= 0x1110,
1865 	.irq_egr_ts_en		=
1866 		NXP_C45_REG_FIELD(0x1131, MDIO_MMD_VEND1, 0, 1),
1867 	.irq_egr_ts_status	=
1868 		NXP_C45_REG_FIELD(0x1132, MDIO_MMD_VEND1, 0, 1),
1869 	.domain_number		=
1870 		NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 0, 8),
1871 	.msg_type		=
1872 		NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 8, 4),
1873 	.sequence_id		=
1874 		NXP_C45_REG_FIELD(0x114F, MDIO_MMD_VEND1, 0, 16),
1875 	.sec_1_0		=
1876 		NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 14, 2),
1877 	.sec_4_2		=
1878 		NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 12, 3),
1879 	.nsec_15_0		=
1880 		NXP_C45_REG_FIELD(0x1150, MDIO_MMD_VEND1, 0, 16),
1881 	.nsec_29_16		=
1882 		NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 0, 14),
1883 	.vend1_ext_trg_data_0	= 0x1121,
1884 	.vend1_ext_trg_data_1	= 0x1122,
1885 	.vend1_ext_trg_data_2	= 0x1123,
1886 	.vend1_ext_trg_data_3	= 0x1124,
1887 	.vend1_ext_trg_ctrl	= 0x1126,
1888 	.cable_test		= 0x8330,
1889 	.cable_test_valid	=
1890 		NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 13, 1),
1891 	.cable_test_result	=
1892 		NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 0, 3),
1893 };
1894 
1895 static const struct nxp_c45_phy_data tja1103_phy_data = {
1896 	.regmap = &tja1103_regmap,
1897 	.stats = tja1103_hw_stats,
1898 	.n_stats = ARRAY_SIZE(tja1103_hw_stats),
1899 	.ptp_clk_period = PTP_CLK_PERIOD_100BT1,
1900 	.ext_ts_both_edges = false,
1901 	.ack_ptp_irq = false,
1902 	.counters_enable = tja1103_counters_enable,
1903 	.get_egressts = nxp_c45_get_hwtxts,
1904 	.get_extts = nxp_c45_get_extts,
1905 	.ptp_init = tja1103_ptp_init,
1906 	.ptp_enable = tja1103_ptp_enable,
1907 	.nmi_handler = tja1103_nmi_handler,
1908 };
1909 
1910 static void tja1120_counters_enable(struct phy_device *phydev)
1911 {
1912 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_SYMBOL_ERROR_CNT_XTD,
1913 			 EXTENDED_CNT_EN);
1914 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_STATUS,
1915 			 MONITOR_RESET);
1916 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_CONFIG,
1917 			 ALL_FRAMES_CNT_EN | LOST_FRAMES_CNT_EN);
1918 }
1919 
1920 static void tja1120_ptp_init(struct phy_device *phydev)
1921 {
1922 	phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_RX_TS_INSRT_CTRL,
1923 		      TJA1120_RX_TS_INSRT_EN | TJA1120_TS_INSRT_MODE);
1924 	phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_VEND1_EXT_TS_MODE,
1925 		      TJA1120_TS_INSRT_MODE);
1926 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONFIG,
1927 			 PTP_ENABLE);
1928 }
1929 
1930 static void tja1120_ptp_enable(struct phy_device *phydev, bool enable)
1931 {
1932 	if (enable)
1933 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1934 				 VEND1_PORT_FUNC_ENABLES,
1935 				 PTP_ENABLE);
1936 	else
1937 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1938 				   VEND1_PORT_FUNC_ENABLES,
1939 				   PTP_ENABLE);
1940 }
1941 
1942 static void tja1120_nmi_handler(struct phy_device *phydev,
1943 				irqreturn_t *irq_status)
1944 {
1945 	int ret;
1946 
1947 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1948 			   TJA1120_GLOBAL_INFRA_IRQ_STATUS);
1949 	if (ret & TJA1120_DEV_BOOT_DONE) {
1950 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
1951 			      TJA1120_GLOBAL_INFRA_IRQ_ACK,
1952 			      TJA1120_DEV_BOOT_DONE);
1953 		*irq_status = IRQ_HANDLED;
1954 	}
1955 }
1956 
1957 static int nxp_c45_macsec_ability(struct phy_device *phydev)
1958 {
1959 	bool macsec_ability;
1960 	int phy_abilities;
1961 
1962 	phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1963 				     VEND1_PORT_ABILITIES);
1964 	macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
1965 
1966 	return macsec_ability;
1967 }
1968 
1969 static int tja1103_match_phy_device(struct phy_device *phydev)
1970 {
1971 	return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) &&
1972 	       !nxp_c45_macsec_ability(phydev);
1973 }
1974 
1975 static int tja1104_match_phy_device(struct phy_device *phydev)
1976 {
1977 	return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) &&
1978 	       nxp_c45_macsec_ability(phydev);
1979 }
1980 
1981 static int tja1120_match_phy_device(struct phy_device *phydev)
1982 {
1983 	return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, PHY_ID_MASK) &&
1984 	       !nxp_c45_macsec_ability(phydev);
1985 }
1986 
1987 static int tja1121_match_phy_device(struct phy_device *phydev)
1988 {
1989 	return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, PHY_ID_MASK) &&
1990 	       nxp_c45_macsec_ability(phydev);
1991 }
1992 
1993 static const struct nxp_c45_regmap tja1120_regmap = {
1994 	.vend1_ptp_clk_period	= 0x1020,
1995 	.vend1_event_msg_filt	= 0x9010,
1996 	.pps_enable		=
1997 		NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 4, 1),
1998 	.pps_polarity		=
1999 		NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 5, 1),
2000 	.ltc_lock_ctrl		=
2001 		NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 2, 1),
2002 	.ltc_read		=
2003 		NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 1, 1),
2004 	.ltc_write		=
2005 		NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 2, 1),
2006 	.vend1_ltc_wr_nsec_0	= 0x1040,
2007 	.vend1_ltc_wr_nsec_1	= 0x1041,
2008 	.vend1_ltc_wr_sec_0	= 0x1042,
2009 	.vend1_ltc_wr_sec_1	= 0x1043,
2010 	.vend1_ltc_rd_nsec_0	= 0x1048,
2011 	.vend1_ltc_rd_nsec_1	= 0x1049,
2012 	.vend1_ltc_rd_sec_0	= 0x104A,
2013 	.vend1_ltc_rd_sec_1	= 0x104B,
2014 	.vend1_rate_adj_subns_0	= 0x1030,
2015 	.vend1_rate_adj_subns_1	= 0x1031,
2016 	.irq_egr_ts_en		=
2017 		NXP_C45_REG_FIELD(0x900A, MDIO_MMD_VEND1, 1, 1),
2018 	.irq_egr_ts_status	=
2019 		NXP_C45_REG_FIELD(0x900C, MDIO_MMD_VEND1, 1, 1),
2020 	.domain_number		=
2021 		NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 8, 8),
2022 	.msg_type		=
2023 		NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 4, 4),
2024 	.sequence_id		=
2025 		NXP_C45_REG_FIELD(0x9062, MDIO_MMD_VEND1, 0, 16),
2026 	.sec_1_0		=
2027 		NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 0, 2),
2028 	.sec_4_2		=
2029 		NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 2, 3),
2030 	.nsec_15_0		=
2031 		NXP_C45_REG_FIELD(0x9063, MDIO_MMD_VEND1, 0, 16),
2032 	.nsec_29_16		=
2033 		NXP_C45_REG_FIELD(0x9064, MDIO_MMD_VEND1, 0, 14),
2034 	.vend1_ext_trg_data_0	= 0x1071,
2035 	.vend1_ext_trg_data_1	= 0x1072,
2036 	.vend1_ext_trg_data_2	= 0x1073,
2037 	.vend1_ext_trg_data_3	= 0x1074,
2038 	.vend1_ext_trg_ctrl	= 0x1075,
2039 	.cable_test		= 0x8360,
2040 	.cable_test_valid	=
2041 		NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 15, 1),
2042 	.cable_test_result	=
2043 		NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 0, 3),
2044 };
2045 
2046 static const struct nxp_c45_phy_data tja1120_phy_data = {
2047 	.regmap = &tja1120_regmap,
2048 	.stats = tja1120_hw_stats,
2049 	.n_stats = ARRAY_SIZE(tja1120_hw_stats),
2050 	.ptp_clk_period = PTP_CLK_PERIOD_1000BT1,
2051 	.ext_ts_both_edges = true,
2052 	.ack_ptp_irq = true,
2053 	.counters_enable = tja1120_counters_enable,
2054 	.get_egressts = tja1120_get_hwtxts,
2055 	.get_extts = tja1120_get_extts,
2056 	.ptp_init = tja1120_ptp_init,
2057 	.ptp_enable = tja1120_ptp_enable,
2058 	.nmi_handler = tja1120_nmi_handler,
2059 };
2060 
2061 static struct phy_driver nxp_c45_driver[] = {
2062 	{
2063 		.name			= "NXP C45 TJA1103",
2064 		.get_features		= nxp_c45_get_features,
2065 		.driver_data		= &tja1103_phy_data,
2066 		.probe			= nxp_c45_probe,
2067 		.soft_reset		= nxp_c45_soft_reset,
2068 		.config_aneg		= genphy_c45_config_aneg,
2069 		.config_init		= nxp_c45_config_init,
2070 		.config_intr		= tja1103_config_intr,
2071 		.handle_interrupt	= nxp_c45_handle_interrupt,
2072 		.read_status		= genphy_c45_read_status,
2073 		.suspend		= genphy_c45_pma_suspend,
2074 		.resume			= genphy_c45_pma_resume,
2075 		.get_sset_count		= nxp_c45_get_sset_count,
2076 		.get_strings		= nxp_c45_get_strings,
2077 		.get_stats		= nxp_c45_get_stats,
2078 		.cable_test_start	= nxp_c45_cable_test_start,
2079 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
2080 		.set_loopback		= genphy_c45_loopback,
2081 		.get_sqi		= nxp_c45_get_sqi,
2082 		.get_sqi_max		= nxp_c45_get_sqi_max,
2083 		.remove			= nxp_c45_remove,
2084 		.match_phy_device	= tja1103_match_phy_device,
2085 	},
2086 	{
2087 		.name			= "NXP C45 TJA1104",
2088 		.get_features		= nxp_c45_get_features,
2089 		.driver_data		= &tja1103_phy_data,
2090 		.probe			= nxp_c45_probe,
2091 		.soft_reset		= nxp_c45_soft_reset,
2092 		.config_aneg		= genphy_c45_config_aneg,
2093 		.config_init		= nxp_c45_config_init,
2094 		.config_intr		= tja1103_config_intr,
2095 		.handle_interrupt	= nxp_c45_handle_interrupt,
2096 		.read_status		= genphy_c45_read_status,
2097 		.suspend		= genphy_c45_pma_suspend,
2098 		.resume			= genphy_c45_pma_resume,
2099 		.get_sset_count		= nxp_c45_get_sset_count,
2100 		.get_strings		= nxp_c45_get_strings,
2101 		.get_stats		= nxp_c45_get_stats,
2102 		.cable_test_start	= nxp_c45_cable_test_start,
2103 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
2104 		.set_loopback		= genphy_c45_loopback,
2105 		.get_sqi		= nxp_c45_get_sqi,
2106 		.get_sqi_max		= nxp_c45_get_sqi_max,
2107 		.remove			= nxp_c45_remove,
2108 		.match_phy_device	= tja1104_match_phy_device,
2109 	},
2110 	{
2111 		.name			= "NXP C45 TJA1120",
2112 		.get_features		= nxp_c45_get_features,
2113 		.driver_data		= &tja1120_phy_data,
2114 		.probe			= nxp_c45_probe,
2115 		.soft_reset		= nxp_c45_soft_reset,
2116 		.config_aneg		= genphy_c45_config_aneg,
2117 		.config_init		= nxp_c45_config_init,
2118 		.config_intr		= tja1120_config_intr,
2119 		.handle_interrupt	= nxp_c45_handle_interrupt,
2120 		.read_status		= genphy_c45_read_status,
2121 		.link_change_notify	= tja1120_link_change_notify,
2122 		.suspend		= genphy_c45_pma_suspend,
2123 		.resume			= genphy_c45_pma_resume,
2124 		.get_sset_count		= nxp_c45_get_sset_count,
2125 		.get_strings		= nxp_c45_get_strings,
2126 		.get_stats		= nxp_c45_get_stats,
2127 		.cable_test_start	= nxp_c45_cable_test_start,
2128 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
2129 		.set_loopback		= genphy_c45_loopback,
2130 		.get_sqi		= nxp_c45_get_sqi,
2131 		.get_sqi_max		= nxp_c45_get_sqi_max,
2132 		.remove			= nxp_c45_remove,
2133 		.match_phy_device	= tja1120_match_phy_device,
2134 	},
2135 	{
2136 		.name			= "NXP C45 TJA1121",
2137 		.get_features		= nxp_c45_get_features,
2138 		.driver_data		= &tja1120_phy_data,
2139 		.probe			= nxp_c45_probe,
2140 		.soft_reset		= nxp_c45_soft_reset,
2141 		.config_aneg		= genphy_c45_config_aneg,
2142 		.config_init		= nxp_c45_config_init,
2143 		.config_intr		= tja1120_config_intr,
2144 		.handle_interrupt	= nxp_c45_handle_interrupt,
2145 		.read_status		= genphy_c45_read_status,
2146 		.link_change_notify	= tja1120_link_change_notify,
2147 		.suspend		= genphy_c45_pma_suspend,
2148 		.resume			= genphy_c45_pma_resume,
2149 		.get_sset_count		= nxp_c45_get_sset_count,
2150 		.get_strings		= nxp_c45_get_strings,
2151 		.get_stats		= nxp_c45_get_stats,
2152 		.cable_test_start	= nxp_c45_cable_test_start,
2153 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
2154 		.set_loopback		= genphy_c45_loopback,
2155 		.get_sqi		= nxp_c45_get_sqi,
2156 		.get_sqi_max		= nxp_c45_get_sqi_max,
2157 		.remove			= nxp_c45_remove,
2158 		.match_phy_device	= tja1121_match_phy_device,
2159 	},
2160 };
2161 
2162 module_phy_driver(nxp_c45_driver);
2163 
2164 static const struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
2165 	{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
2166 	{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120) },
2167 	{ /*sentinel*/ },
2168 };
2169 
2170 MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
2171 
2172 MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
2173 MODULE_DESCRIPTION("NXP C45 PHY driver");
2174 MODULE_LICENSE("GPL v2");
2175