xref: /linux/drivers/net/phy/nxp-c45-tja11xx.c (revision 1cc3462159babb69c84c39cb1b4e262aef3ea325)
1 // SPDX-License-Identifier: GPL-2.0
2 /* NXP C45 PHY driver
3  * Copyright 2021-2025 NXP
4  * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/ethtool.h>
9 #include <linux/ethtool_netlink.h>
10 #include <linux/kernel.h>
11 #include <linux/mii.h>
12 #include <linux/module.h>
13 #include <linux/of.h>
14 #include <linux/phy.h>
15 #include <linux/processor.h>
16 #include <linux/property.h>
17 #include <linux/ptp_classify.h>
18 #include <linux/net_tstamp.h>
19 
20 #include "nxp-c45-tja11xx.h"
21 
22 #define PHY_ID_MASK			GENMASK(31, 4)
23 /* Same id: TJA1103, TJA1104 */
24 #define PHY_ID_TJA_1103			0x001BB010
25 /* Same id: TJA1120, TJA1121 */
26 #define PHY_ID_TJA_1120			0x001BB031
27 
28 #define VEND1_DEVICE_CONTROL		0x0040
29 #define DEVICE_CONTROL_RESET		BIT(15)
30 #define DEVICE_CONTROL_CONFIG_GLOBAL_EN	BIT(14)
31 #define DEVICE_CONTROL_CONFIG_ALL_EN	BIT(13)
32 
33 #define VEND1_DEVICE_CONFIG		0x0048
34 
35 #define TJA1120_VEND1_EXT_TS_MODE	0x1012
36 
37 #define TJA1120_GLOBAL_INFRA_IRQ_ACK	0x2C08
38 #define TJA1120_GLOBAL_INFRA_IRQ_EN	0x2C0A
39 #define TJA1120_GLOBAL_INFRA_IRQ_STATUS	0x2C0C
40 #define TJA1120_DEV_BOOT_DONE		BIT(1)
41 
42 #define TJA1120_VEND1_PTP_TRIG_DATA_S	0x1070
43 
44 #define TJA1120_EGRESS_TS_DATA_S	0x9060
45 #define TJA1120_EGRESS_TS_END		0x9067
46 #define TJA1120_TS_VALID		BIT(0)
47 #define TJA1120_MORE_TS			BIT(15)
48 
49 #define VEND1_PHY_IRQ_ACK		0x80A0
50 #define VEND1_PHY_IRQ_EN		0x80A1
51 #define VEND1_PHY_IRQ_STATUS		0x80A2
52 #define PHY_IRQ_LINK_EVENT		BIT(1)
53 
54 #define VEND1_ALWAYS_ACCESSIBLE		0x801F
55 #define FUSA_PASS			BIT(4)
56 
57 #define VEND1_PHY_CONTROL		0x8100
58 #define PHY_CONFIG_EN			BIT(14)
59 #define PHY_START_OP			BIT(0)
60 
61 #define VEND1_PHY_CONFIG		0x8108
62 #define PHY_CONFIG_AUTO			BIT(0)
63 
64 #define TJA1120_EPHY_RESETS		0x810A
65 #define EPHY_PCS_RESET			BIT(3)
66 
67 #define VEND1_SIGNAL_QUALITY		0x8320
68 #define SQI_VALID			BIT(14)
69 #define SQI_MASK			GENMASK(2, 0)
70 #define MAX_SQI				SQI_MASK
71 
72 #define CABLE_TEST_ENABLE		BIT(15)
73 #define CABLE_TEST_START		BIT(14)
74 #define CABLE_TEST_OK			0x00
75 #define CABLE_TEST_SHORTED		0x01
76 #define CABLE_TEST_OPEN			0x02
77 #define CABLE_TEST_UNKNOWN		0x07
78 
79 #define VEND1_PORT_CONTROL		0x8040
80 #define PORT_CONTROL_EN			BIT(14)
81 
82 #define VEND1_PORT_ABILITIES		0x8046
83 #define MACSEC_ABILITY			BIT(5)
84 #define PTP_ABILITY			BIT(3)
85 
86 #define VEND1_PORT_FUNC_IRQ_EN		0x807A
87 #define MACSEC_IRQS			BIT(5)
88 #define PTP_IRQS			BIT(3)
89 
90 #define VEND1_PTP_IRQ_ACK		0x9008
91 #define EGR_TS_IRQ			BIT(1)
92 
93 #define VEND1_PORT_INFRA_CONTROL	0xAC00
94 #define PORT_INFRA_CONTROL_EN		BIT(14)
95 
96 #define VEND1_RXID			0xAFCC
97 #define VEND1_TXID			0xAFCD
98 #define ID_ENABLE			BIT(15)
99 
100 #define VEND1_ABILITIES			0xAFC4
101 #define RGMII_ID_ABILITY		BIT(15)
102 #define RGMII_ABILITY			BIT(14)
103 #define RMII_ABILITY			BIT(10)
104 #define REVMII_ABILITY			BIT(9)
105 #define MII_ABILITY			BIT(8)
106 #define SGMII_ABILITY			BIT(0)
107 
108 #define VEND1_MII_BASIC_CONFIG		0xAFC6
109 #define MII_BASIC_CONFIG_REV		BIT(4)
110 #define MII_BASIC_CONFIG_SGMII		0x9
111 #define MII_BASIC_CONFIG_RGMII		0x7
112 #define MII_BASIC_CONFIG_RMII		0x5
113 #define MII_BASIC_CONFIG_MII		0x4
114 
115 #define VEND1_SYMBOL_ERROR_CNT_XTD	0x8351
116 #define EXTENDED_CNT_EN			BIT(15)
117 #define VEND1_MONITOR_STATUS		0xAC80
118 #define MONITOR_RESET			BIT(15)
119 #define VEND1_MONITOR_CONFIG		0xAC86
120 #define LOST_FRAMES_CNT_EN		BIT(9)
121 #define ALL_FRAMES_CNT_EN		BIT(8)
122 
123 #define VEND1_SYMBOL_ERROR_COUNTER	0x8350
124 #define VEND1_LINK_DROP_COUNTER		0x8352
125 #define VEND1_LINK_LOSSES_AND_FAILURES	0x8353
126 #define VEND1_RX_PREAMBLE_COUNT		0xAFCE
127 #define VEND1_TX_PREAMBLE_COUNT		0xAFCF
128 #define VEND1_RX_IPG_LENGTH		0xAFD0
129 #define VEND1_TX_IPG_LENGTH		0xAFD1
130 #define COUNTER_EN			BIT(15)
131 
132 #define VEND1_PTP_CONFIG		0x1102
133 #define EXT_TRG_EDGE			BIT(1)
134 
135 #define TJA1120_SYNC_TRIG_FILTER	0x1010
136 #define PTP_TRIG_RISE_TS		BIT(3)
137 #define PTP_TRIG_FALLING_TS		BIT(2)
138 
139 #define CLK_RATE_ADJ_LD			BIT(15)
140 #define CLK_RATE_ADJ_DIR		BIT(14)
141 
142 #define VEND1_RX_TS_INSRT_CTRL		0x114D
143 #define TJA1103_RX_TS_INSRT_MODE2	0x02
144 
145 #define TJA1120_RX_TS_INSRT_CTRL	0x9012
146 #define TJA1120_RX_TS_INSRT_EN		BIT(15)
147 #define TJA1120_TS_INSRT_MODE		BIT(4)
148 
149 #define VEND1_EGR_RING_DATA_0		0x114E
150 #define VEND1_EGR_RING_CTRL		0x1154
151 
152 #define RING_DATA_0_TS_VALID		BIT(15)
153 
154 #define RING_DONE			BIT(0)
155 
156 #define TS_SEC_MASK			GENMASK(1, 0)
157 
158 #define PTP_ENABLE			BIT(3)
159 #define PHY_TEST_ENABLE			BIT(0)
160 
161 #define VEND1_PORT_PTP_CONTROL		0x9000
162 #define PORT_PTP_CONTROL_BYPASS		BIT(11)
163 
164 #define PTP_CLK_PERIOD_100BT1		15ULL
165 #define PTP_CLK_PERIOD_1000BT1		8ULL
166 
167 #define EVENT_MSG_FILT_ALL		0x0F
168 #define EVENT_MSG_FILT_NONE		0x00
169 
170 #define VEND1_GPIO_FUNC_CONFIG_BASE	0x2C40
171 #define GPIO_FUNC_EN			BIT(15)
172 #define GPIO_FUNC_PTP			BIT(6)
173 #define GPIO_SIGNAL_PTP_TRIGGER		0x01
174 #define GPIO_SIGNAL_PPS_OUT		0x12
175 #define GPIO_DISABLE			0
176 #define GPIO_PPS_OUT_CFG		(GPIO_FUNC_EN | GPIO_FUNC_PTP | \
177 	GPIO_SIGNAL_PPS_OUT)
178 #define GPIO_EXTTS_OUT_CFG		(GPIO_FUNC_EN | GPIO_FUNC_PTP | \
179 	GPIO_SIGNAL_PTP_TRIGGER)
180 
181 #define RGMII_PERIOD_PS			8000U
182 #define PS_PER_DEGREE			div_u64(RGMII_PERIOD_PS, 360)
183 #define MIN_ID_PS			1644U
184 #define MAX_ID_PS			2260U
185 #define DEFAULT_ID_PS			2000U
186 
187 #define PPM_TO_SUBNS_INC(ppb, ptp_clk_period) div_u64(GENMASK_ULL(31, 0) * \
188 	(ppb) * (ptp_clk_period), NSEC_PER_SEC)
189 
190 #define NXP_C45_SKB_CB(skb)	((struct nxp_c45_skb_cb *)(skb)->cb)
191 
192 #define TJA11XX_REVERSE_MODE		BIT(0)
193 
194 struct nxp_c45_phy;
195 
196 struct nxp_c45_skb_cb {
197 	struct ptp_header *header;
198 	unsigned int type;
199 };
200 
201 #define NXP_C45_REG_FIELD(_reg, _devad, _offset, _size)	\
202 	((struct nxp_c45_reg_field) {			\
203 		.reg = _reg,				\
204 		.devad =  _devad,			\
205 		.offset = _offset,			\
206 		.size = _size,				\
207 	})
208 
209 struct nxp_c45_reg_field {
210 	u16 reg;
211 	u8 devad;
212 	u8 offset;
213 	u8 size;
214 };
215 
216 struct nxp_c45_hwts {
217 	u32	nsec;
218 	u32	sec;
219 	u8	domain_number;
220 	u16	sequence_id;
221 	u8	msg_type;
222 };
223 
224 struct nxp_c45_regmap {
225 	/* PTP config regs. */
226 	u16 vend1_ptp_clk_period;
227 	u16 vend1_event_msg_filt;
228 
229 	/* LTC bits and regs. */
230 	struct nxp_c45_reg_field ltc_read;
231 	struct nxp_c45_reg_field ltc_write;
232 	struct nxp_c45_reg_field ltc_lock_ctrl;
233 	u16 vend1_ltc_wr_nsec_0;
234 	u16 vend1_ltc_wr_nsec_1;
235 	u16 vend1_ltc_wr_sec_0;
236 	u16 vend1_ltc_wr_sec_1;
237 	u16 vend1_ltc_rd_nsec_0;
238 	u16 vend1_ltc_rd_nsec_1;
239 	u16 vend1_ltc_rd_sec_0;
240 	u16 vend1_ltc_rd_sec_1;
241 	u16 vend1_rate_adj_subns_0;
242 	u16 vend1_rate_adj_subns_1;
243 
244 	/* External trigger reg fields. */
245 	struct nxp_c45_reg_field irq_egr_ts_en;
246 	struct nxp_c45_reg_field irq_egr_ts_status;
247 	struct nxp_c45_reg_field domain_number;
248 	struct nxp_c45_reg_field msg_type;
249 	struct nxp_c45_reg_field sequence_id;
250 	struct nxp_c45_reg_field sec_1_0;
251 	struct nxp_c45_reg_field sec_4_2;
252 	struct nxp_c45_reg_field nsec_15_0;
253 	struct nxp_c45_reg_field nsec_29_16;
254 
255 	/* PPS and EXT Trigger bits and regs. */
256 	struct nxp_c45_reg_field pps_enable;
257 	struct nxp_c45_reg_field pps_polarity;
258 	u16 vend1_ext_trg_data_0;
259 	u16 vend1_ext_trg_data_1;
260 	u16 vend1_ext_trg_data_2;
261 	u16 vend1_ext_trg_data_3;
262 	u16 vend1_ext_trg_ctrl;
263 
264 	/* Cable test reg fields. */
265 	u16 cable_test;
266 	struct nxp_c45_reg_field cable_test_valid;
267 	struct nxp_c45_reg_field cable_test_result;
268 };
269 
270 struct nxp_c45_phy_stats {
271 	const char	*name;
272 	const struct nxp_c45_reg_field counter;
273 };
274 
275 struct nxp_c45_phy_data {
276 	const struct nxp_c45_regmap *regmap;
277 	const struct nxp_c45_phy_stats *stats;
278 	int n_stats;
279 	u8 ptp_clk_period;
280 	bool ext_ts_both_edges;
281 	bool ack_ptp_irq;
282 	void (*counters_enable)(struct phy_device *phydev);
283 	bool (*get_egressts)(struct nxp_c45_phy *priv,
284 			     struct nxp_c45_hwts *hwts);
285 	bool (*get_extts)(struct nxp_c45_phy *priv, struct timespec64 *extts);
286 	void (*ptp_init)(struct phy_device *phydev);
287 	void (*ptp_enable)(struct phy_device *phydev, bool enable);
288 	void (*nmi_handler)(struct phy_device *phydev,
289 			    irqreturn_t *irq_status);
290 };
291 
292 static const
293 struct nxp_c45_phy_data *nxp_c45_get_data(struct phy_device *phydev)
294 {
295 	return phydev->drv->driver_data;
296 }
297 
298 static const
299 struct nxp_c45_regmap *nxp_c45_get_regmap(struct phy_device *phydev)
300 {
301 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
302 
303 	return phy_data->regmap;
304 }
305 
306 static int nxp_c45_read_reg_field(struct phy_device *phydev,
307 				  const struct nxp_c45_reg_field *reg_field)
308 {
309 	u16 mask;
310 	int ret;
311 
312 	if (reg_field->size == 0) {
313 		phydev_err(phydev, "Trying to read a reg field of size 0.\n");
314 		return -EINVAL;
315 	}
316 
317 	ret = phy_read_mmd(phydev, reg_field->devad, reg_field->reg);
318 	if (ret < 0)
319 		return ret;
320 
321 	mask = reg_field->size == 1 ? BIT(reg_field->offset) :
322 		GENMASK(reg_field->offset + reg_field->size - 1,
323 			reg_field->offset);
324 	ret &= mask;
325 	ret >>= reg_field->offset;
326 
327 	return ret;
328 }
329 
330 static int nxp_c45_write_reg_field(struct phy_device *phydev,
331 				   const struct nxp_c45_reg_field *reg_field,
332 				   u16 val)
333 {
334 	u16 mask;
335 	u16 set;
336 
337 	if (reg_field->size == 0) {
338 		phydev_err(phydev, "Trying to write a reg field of size 0.\n");
339 		return -EINVAL;
340 	}
341 
342 	mask = reg_field->size == 1 ? BIT(reg_field->offset) :
343 		GENMASK(reg_field->offset + reg_field->size - 1,
344 			reg_field->offset);
345 	set = val << reg_field->offset;
346 
347 	return phy_modify_mmd_changed(phydev, reg_field->devad,
348 				      reg_field->reg, mask, set);
349 }
350 
351 static int nxp_c45_set_reg_field(struct phy_device *phydev,
352 				 const struct nxp_c45_reg_field *reg_field)
353 {
354 	if (reg_field->size != 1) {
355 		phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
356 		return -EINVAL;
357 	}
358 
359 	return nxp_c45_write_reg_field(phydev, reg_field, 1);
360 }
361 
362 static int nxp_c45_clear_reg_field(struct phy_device *phydev,
363 				   const struct nxp_c45_reg_field *reg_field)
364 {
365 	if (reg_field->size != 1) {
366 		phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
367 		return -EINVAL;
368 	}
369 
370 	return nxp_c45_write_reg_field(phydev, reg_field, 0);
371 }
372 
373 static bool nxp_c45_poll_txts(struct phy_device *phydev)
374 {
375 	return phydev->irq <= 0;
376 }
377 
378 static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
379 				   struct timespec64 *ts,
380 				   struct ptp_system_timestamp *sts)
381 {
382 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
383 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
384 
385 	nxp_c45_set_reg_field(priv->phydev, &regmap->ltc_read);
386 	ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
387 				   regmap->vend1_ltc_rd_nsec_0);
388 	ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
389 				    regmap->vend1_ltc_rd_nsec_1) << 16;
390 	ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
391 				  regmap->vend1_ltc_rd_sec_0);
392 	ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
393 				   regmap->vend1_ltc_rd_sec_1) << 16;
394 
395 	return 0;
396 }
397 
398 static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
399 				  struct timespec64 *ts,
400 				  struct ptp_system_timestamp *sts)
401 {
402 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
403 
404 	mutex_lock(&priv->ptp_lock);
405 	_nxp_c45_ptp_gettimex64(ptp, ts, sts);
406 	mutex_unlock(&priv->ptp_lock);
407 
408 	return 0;
409 }
410 
411 static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
412 				  const struct timespec64 *ts)
413 {
414 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
415 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
416 
417 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_0,
418 		      ts->tv_nsec);
419 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_1,
420 		      ts->tv_nsec >> 16);
421 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_0,
422 		      ts->tv_sec);
423 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_1,
424 		      ts->tv_sec >> 16);
425 	nxp_c45_set_reg_field(priv->phydev, &regmap->ltc_write);
426 
427 	return 0;
428 }
429 
430 static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
431 				 const struct timespec64 *ts)
432 {
433 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
434 
435 	mutex_lock(&priv->ptp_lock);
436 	_nxp_c45_ptp_settime64(ptp, ts);
437 	mutex_unlock(&priv->ptp_lock);
438 
439 	return 0;
440 }
441 
442 static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
443 {
444 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
445 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
446 	const struct nxp_c45_regmap *regmap = data->regmap;
447 	s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
448 	u64 subns_inc_val;
449 	bool inc;
450 
451 	mutex_lock(&priv->ptp_lock);
452 	inc = ppb >= 0;
453 	ppb = abs(ppb);
454 
455 	subns_inc_val = PPM_TO_SUBNS_INC(ppb, data->ptp_clk_period);
456 
457 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
458 		      regmap->vend1_rate_adj_subns_0,
459 		      subns_inc_val);
460 	subns_inc_val >>= 16;
461 	subns_inc_val |= CLK_RATE_ADJ_LD;
462 	if (inc)
463 		subns_inc_val |= CLK_RATE_ADJ_DIR;
464 
465 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
466 		      regmap->vend1_rate_adj_subns_1,
467 		      subns_inc_val);
468 	mutex_unlock(&priv->ptp_lock);
469 
470 	return 0;
471 }
472 
473 static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
474 {
475 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
476 	struct timespec64 now, then;
477 
478 	mutex_lock(&priv->ptp_lock);
479 	then = ns_to_timespec64(delta);
480 	_nxp_c45_ptp_gettimex64(ptp, &now, NULL);
481 	now = timespec64_add(now, then);
482 	_nxp_c45_ptp_settime64(ptp, &now);
483 	mutex_unlock(&priv->ptp_lock);
484 
485 	return 0;
486 }
487 
488 static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
489 				   struct nxp_c45_hwts *hwts)
490 {
491 	ts->tv_nsec = hwts->nsec;
492 	if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
493 		ts->tv_sec -= TS_SEC_MASK + 1;
494 	ts->tv_sec &= ~TS_SEC_MASK;
495 	ts->tv_sec |= hwts->sec & TS_SEC_MASK;
496 }
497 
498 static bool nxp_c45_match_ts(struct ptp_header *header,
499 			     struct nxp_c45_hwts *hwts,
500 			     unsigned int type)
501 {
502 	return ntohs(header->sequence_id) == hwts->sequence_id &&
503 	       ptp_get_msgtype(header, type) == hwts->msg_type &&
504 	       header->domain_number  == hwts->domain_number;
505 }
506 
507 static bool nxp_c45_get_extts(struct nxp_c45_phy *priv,
508 			      struct timespec64 *extts)
509 {
510 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
511 
512 	extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
513 				      regmap->vend1_ext_trg_data_0);
514 	extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
515 				       regmap->vend1_ext_trg_data_1) << 16;
516 	extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
517 				     regmap->vend1_ext_trg_data_2);
518 	extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
519 				      regmap->vend1_ext_trg_data_3) << 16;
520 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
521 		      regmap->vend1_ext_trg_ctrl, RING_DONE);
522 
523 	return true;
524 }
525 
526 static bool tja1120_extts_is_valid(struct phy_device *phydev)
527 {
528 	bool valid;
529 	int reg;
530 
531 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
532 			   TJA1120_VEND1_PTP_TRIG_DATA_S);
533 	valid = !!(reg & TJA1120_TS_VALID);
534 
535 	return valid;
536 }
537 
538 static bool tja1120_get_extts(struct nxp_c45_phy *priv,
539 			      struct timespec64 *extts)
540 {
541 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
542 	struct phy_device *phydev = priv->phydev;
543 	bool more_ts;
544 	bool valid;
545 	u16 reg;
546 
547 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
548 			   regmap->vend1_ext_trg_ctrl);
549 	more_ts = !!(reg & TJA1120_MORE_TS);
550 
551 	valid = tja1120_extts_is_valid(phydev);
552 	if (!valid) {
553 		if (!more_ts)
554 			goto tja1120_get_extts_out;
555 
556 		/* Bug workaround for TJA1120 engineering samples: move the new
557 		 * timestamp from the FIFO to the buffer.
558 		 */
559 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
560 			      regmap->vend1_ext_trg_ctrl, RING_DONE);
561 		valid = tja1120_extts_is_valid(phydev);
562 		if (!valid)
563 			goto tja1120_get_extts_out;
564 	}
565 
566 	nxp_c45_get_extts(priv, extts);
567 tja1120_get_extts_out:
568 	return valid;
569 }
570 
571 static void nxp_c45_read_egress_ts(struct nxp_c45_phy *priv,
572 				   struct nxp_c45_hwts *hwts)
573 {
574 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
575 	struct phy_device *phydev = priv->phydev;
576 
577 	hwts->domain_number =
578 		nxp_c45_read_reg_field(phydev, &regmap->domain_number);
579 	hwts->msg_type =
580 		nxp_c45_read_reg_field(phydev, &regmap->msg_type);
581 	hwts->sequence_id =
582 		nxp_c45_read_reg_field(phydev, &regmap->sequence_id);
583 	hwts->nsec =
584 		nxp_c45_read_reg_field(phydev, &regmap->nsec_15_0);
585 	hwts->nsec |=
586 		nxp_c45_read_reg_field(phydev, &regmap->nsec_29_16) << 16;
587 	hwts->sec = nxp_c45_read_reg_field(phydev, &regmap->sec_1_0);
588 	hwts->sec |= nxp_c45_read_reg_field(phydev, &regmap->sec_4_2) << 2;
589 }
590 
591 static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
592 			       struct nxp_c45_hwts *hwts)
593 {
594 	bool valid;
595 	u16 reg;
596 
597 	mutex_lock(&priv->ptp_lock);
598 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
599 		      RING_DONE);
600 	reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
601 	valid = !!(reg & RING_DATA_0_TS_VALID);
602 	if (!valid)
603 		goto nxp_c45_get_hwtxts_out;
604 
605 	nxp_c45_read_egress_ts(priv, hwts);
606 nxp_c45_get_hwtxts_out:
607 	mutex_unlock(&priv->ptp_lock);
608 	return valid;
609 }
610 
611 static bool tja1120_egress_ts_is_valid(struct phy_device *phydev)
612 {
613 	bool valid;
614 	u16 reg;
615 
616 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S);
617 	valid = !!(reg & TJA1120_TS_VALID);
618 
619 	return valid;
620 }
621 
622 static bool tja1120_get_hwtxts(struct nxp_c45_phy *priv,
623 			       struct nxp_c45_hwts *hwts)
624 {
625 	struct phy_device *phydev = priv->phydev;
626 	bool more_ts;
627 	bool valid;
628 	u16 reg;
629 
630 	mutex_lock(&priv->ptp_lock);
631 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_END);
632 	more_ts = !!(reg & TJA1120_MORE_TS);
633 	valid = tja1120_egress_ts_is_valid(phydev);
634 	if (!valid) {
635 		if (!more_ts)
636 			goto tja1120_get_hwtxts_out;
637 
638 		/* Bug workaround for TJA1120 engineering samples: move the
639 		 * new timestamp from the FIFO to the buffer.
640 		 */
641 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
642 			      TJA1120_EGRESS_TS_END, TJA1120_TS_VALID);
643 		valid = tja1120_egress_ts_is_valid(phydev);
644 		if (!valid)
645 			goto tja1120_get_hwtxts_out;
646 	}
647 	nxp_c45_read_egress_ts(priv, hwts);
648 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S,
649 			   TJA1120_TS_VALID);
650 tja1120_get_hwtxts_out:
651 	mutex_unlock(&priv->ptp_lock);
652 	return valid;
653 }
654 
655 static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
656 				 struct nxp_c45_hwts *txts)
657 {
658 	struct sk_buff *skb, *tmp, *skb_match = NULL;
659 	struct skb_shared_hwtstamps shhwtstamps;
660 	struct timespec64 ts;
661 	unsigned long flags;
662 	bool ts_match;
663 	s64 ts_ns;
664 
665 	spin_lock_irqsave(&priv->tx_queue.lock, flags);
666 	skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
667 		ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
668 					    NXP_C45_SKB_CB(skb)->type);
669 		if (!ts_match)
670 			continue;
671 		skb_match = skb;
672 		__skb_unlink(skb, &priv->tx_queue);
673 		break;
674 	}
675 	spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
676 
677 	if (skb_match) {
678 		nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
679 		nxp_c45_reconstruct_ts(&ts, txts);
680 		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
681 		ts_ns = timespec64_to_ns(&ts);
682 		shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
683 		skb_complete_tx_timestamp(skb_match, &shhwtstamps);
684 	} else {
685 		phydev_warn(priv->phydev,
686 			    "the tx timestamp doesn't match with any skb\n");
687 	}
688 }
689 
690 static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
691 {
692 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
693 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
694 	bool poll_txts = nxp_c45_poll_txts(priv->phydev);
695 	struct skb_shared_hwtstamps *shhwtstamps_rx;
696 	struct ptp_clock_event event;
697 	struct nxp_c45_hwts hwts;
698 	bool reschedule = false;
699 	struct timespec64 ts;
700 	struct sk_buff *skb;
701 	bool ts_valid;
702 	u32 ts_raw;
703 
704 	while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
705 		ts_valid = data->get_egressts(priv, &hwts);
706 		if (unlikely(!ts_valid)) {
707 			/* Still more skbs in the queue */
708 			reschedule = true;
709 			break;
710 		}
711 
712 		nxp_c45_process_txts(priv, &hwts);
713 	}
714 
715 	while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
716 		nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
717 		ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
718 		hwts.sec = ts_raw >> 30;
719 		hwts.nsec = ts_raw & GENMASK(29, 0);
720 		nxp_c45_reconstruct_ts(&ts, &hwts);
721 		shhwtstamps_rx = skb_hwtstamps(skb);
722 		shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
723 		NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
724 		netif_rx(skb);
725 	}
726 
727 	if (priv->extts) {
728 		ts_valid = data->get_extts(priv, &ts);
729 		if (ts_valid && timespec64_compare(&ts, &priv->extts_ts) != 0) {
730 			priv->extts_ts = ts;
731 			event.index = priv->extts_index;
732 			event.type = PTP_CLOCK_EXTTS;
733 			event.timestamp = ns_to_ktime(timespec64_to_ns(&ts));
734 			ptp_clock_event(priv->ptp_clock, &event);
735 		}
736 		reschedule = true;
737 	}
738 
739 	return reschedule ? 1 : -1;
740 }
741 
742 static void nxp_c45_gpio_config(struct nxp_c45_phy *priv,
743 				int pin, u16 pin_cfg)
744 {
745 	struct phy_device *phydev = priv->phydev;
746 
747 	phy_write_mmd(phydev, MDIO_MMD_VEND1,
748 		      VEND1_GPIO_FUNC_CONFIG_BASE + pin, pin_cfg);
749 }
750 
751 static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
752 				 struct ptp_perout_request *perout, int on)
753 {
754 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
755 	struct phy_device *phydev = priv->phydev;
756 	int pin;
757 
758 	if (perout->flags & ~PTP_PEROUT_PHASE)
759 		return -EOPNOTSUPP;
760 
761 	pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
762 	if (pin < 0)
763 		return pin;
764 
765 	if (!on) {
766 		nxp_c45_clear_reg_field(priv->phydev,
767 					&regmap->pps_enable);
768 		nxp_c45_clear_reg_field(priv->phydev,
769 					&regmap->pps_polarity);
770 
771 		nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
772 
773 		return 0;
774 	}
775 
776 	/* The PPS signal is fixed to 1 second and is always generated when the
777 	 * seconds counter is incremented. The start time is not configurable.
778 	 * If the clock is adjusted, the PPS signal is automatically readjusted.
779 	 */
780 	if (perout->period.sec != 1 || perout->period.nsec != 0) {
781 		phydev_warn(phydev, "The period can be set only to 1 second.");
782 		return -EINVAL;
783 	}
784 
785 	if (!(perout->flags & PTP_PEROUT_PHASE)) {
786 		if (perout->start.sec != 0 || perout->start.nsec != 0) {
787 			phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds.");
788 			return -EINVAL;
789 		}
790 	} else {
791 		if (perout->phase.nsec != 0 &&
792 		    perout->phase.nsec != (NSEC_PER_SEC >> 1)) {
793 			phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds.");
794 			return -EINVAL;
795 		}
796 
797 		if (perout->phase.nsec == 0)
798 			nxp_c45_clear_reg_field(priv->phydev,
799 						&regmap->pps_polarity);
800 		else
801 			nxp_c45_set_reg_field(priv->phydev,
802 					      &regmap->pps_polarity);
803 	}
804 
805 	nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG);
806 
807 	nxp_c45_set_reg_field(priv->phydev, &regmap->pps_enable);
808 
809 	return 0;
810 }
811 
812 static void nxp_c45_set_rising_or_falling(struct phy_device *phydev,
813 					  struct ptp_extts_request *extts)
814 {
815 	if (extts->flags & PTP_RISING_EDGE)
816 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
817 				   VEND1_PTP_CONFIG, EXT_TRG_EDGE);
818 
819 	if (extts->flags & PTP_FALLING_EDGE)
820 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
821 				 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
822 }
823 
824 static void nxp_c45_set_rising_and_falling(struct phy_device *phydev,
825 					   struct ptp_extts_request *extts)
826 {
827 	/* PTP_EXTTS_REQUEST may have only the PTP_ENABLE_FEATURE flag set. In
828 	 * this case external ts will be enabled on rising edge.
829 	 */
830 	if (extts->flags & PTP_RISING_EDGE ||
831 	    extts->flags == PTP_ENABLE_FEATURE)
832 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
833 				 TJA1120_SYNC_TRIG_FILTER,
834 				 PTP_TRIG_RISE_TS);
835 	else
836 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
837 				   TJA1120_SYNC_TRIG_FILTER,
838 				   PTP_TRIG_RISE_TS);
839 
840 	if (extts->flags & PTP_FALLING_EDGE)
841 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
842 				 TJA1120_SYNC_TRIG_FILTER,
843 				 PTP_TRIG_FALLING_TS);
844 	else
845 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
846 				   TJA1120_SYNC_TRIG_FILTER,
847 				   PTP_TRIG_FALLING_TS);
848 }
849 
850 static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
851 				struct ptp_extts_request *extts, int on)
852 {
853 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
854 	int pin;
855 
856 	if (extts->flags & ~(PTP_ENABLE_FEATURE |
857 			      PTP_RISING_EDGE |
858 			      PTP_FALLING_EDGE |
859 			      PTP_STRICT_FLAGS))
860 		return -EOPNOTSUPP;
861 
862 	/* Sampling on both edges is not supported */
863 	if ((extts->flags & PTP_RISING_EDGE) &&
864 	    (extts->flags & PTP_FALLING_EDGE) &&
865 	    !data->ext_ts_both_edges)
866 		return -EOPNOTSUPP;
867 
868 	pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index);
869 	if (pin < 0)
870 		return pin;
871 
872 	if (!on) {
873 		nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
874 		priv->extts = false;
875 
876 		return 0;
877 	}
878 
879 	if (data->ext_ts_both_edges)
880 		nxp_c45_set_rising_and_falling(priv->phydev, extts);
881 	else
882 		nxp_c45_set_rising_or_falling(priv->phydev, extts);
883 
884 	nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG);
885 	priv->extts = true;
886 	priv->extts_index = extts->index;
887 	ptp_schedule_worker(priv->ptp_clock, 0);
888 
889 	return 0;
890 }
891 
892 static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp,
893 			      struct ptp_clock_request *req, int on)
894 {
895 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
896 
897 	switch (req->type) {
898 	case PTP_CLK_REQ_EXTTS:
899 		return nxp_c45_extts_enable(priv, &req->extts, on);
900 	case PTP_CLK_REQ_PEROUT:
901 		return nxp_c45_perout_enable(priv, &req->perout, on);
902 	default:
903 		return -EOPNOTSUPP;
904 	}
905 }
906 
907 static struct ptp_pin_desc nxp_c45_ptp_pins[] = {
908 	{ "nxp_c45_gpio0", 0, PTP_PF_NONE},
909 	{ "nxp_c45_gpio1", 1, PTP_PF_NONE},
910 	{ "nxp_c45_gpio2", 2, PTP_PF_NONE},
911 	{ "nxp_c45_gpio3", 3, PTP_PF_NONE},
912 	{ "nxp_c45_gpio4", 4, PTP_PF_NONE},
913 	{ "nxp_c45_gpio5", 5, PTP_PF_NONE},
914 	{ "nxp_c45_gpio6", 6, PTP_PF_NONE},
915 	{ "nxp_c45_gpio7", 7, PTP_PF_NONE},
916 	{ "nxp_c45_gpio8", 8, PTP_PF_NONE},
917 	{ "nxp_c45_gpio9", 9, PTP_PF_NONE},
918 	{ "nxp_c45_gpio10", 10, PTP_PF_NONE},
919 	{ "nxp_c45_gpio11", 11, PTP_PF_NONE},
920 };
921 
922 static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
923 				  enum ptp_pin_function func, unsigned int chan)
924 {
925 	if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins))
926 		return -EINVAL;
927 
928 	switch (func) {
929 	case PTP_PF_NONE:
930 	case PTP_PF_PEROUT:
931 	case PTP_PF_EXTTS:
932 		break;
933 	default:
934 		return -EOPNOTSUPP;
935 	}
936 
937 	return 0;
938 }
939 
940 static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
941 {
942 	priv->caps = (struct ptp_clock_info) {
943 		.owner		= THIS_MODULE,
944 		.name		= "NXP C45 PHC",
945 		.max_adj	= 16666666,
946 		.adjfine	= nxp_c45_ptp_adjfine,
947 		.adjtime	= nxp_c45_ptp_adjtime,
948 		.gettimex64	= nxp_c45_ptp_gettimex64,
949 		.settime64	= nxp_c45_ptp_settime64,
950 		.enable		= nxp_c45_ptp_enable,
951 		.verify		= nxp_c45_ptp_verify_pin,
952 		.do_aux_work	= nxp_c45_do_aux_work,
953 		.pin_config	= nxp_c45_ptp_pins,
954 		.n_pins		= ARRAY_SIZE(nxp_c45_ptp_pins),
955 		.n_ext_ts	= 1,
956 		.n_per_out	= 1,
957 	};
958 
959 	priv->ptp_clock = ptp_clock_register(&priv->caps,
960 					     &priv->phydev->mdio.dev);
961 
962 	if (IS_ERR(priv->ptp_clock))
963 		return PTR_ERR(priv->ptp_clock);
964 
965 	if (!priv->ptp_clock)
966 		return -ENOMEM;
967 
968 	return 0;
969 }
970 
971 static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
972 			     struct sk_buff *skb, int type)
973 {
974 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
975 						mii_ts);
976 
977 	switch (priv->hwts_tx) {
978 	case HWTSTAMP_TX_ON:
979 		NXP_C45_SKB_CB(skb)->type = type;
980 		NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
981 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
982 		skb_queue_tail(&priv->tx_queue, skb);
983 		if (nxp_c45_poll_txts(priv->phydev))
984 			ptp_schedule_worker(priv->ptp_clock, 0);
985 		break;
986 	case HWTSTAMP_TX_OFF:
987 	default:
988 		kfree_skb(skb);
989 		break;
990 	}
991 }
992 
993 static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
994 			     struct sk_buff *skb, int type)
995 {
996 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
997 						mii_ts);
998 	struct ptp_header *header = ptp_parse_header(skb, type);
999 
1000 	if (!header)
1001 		return false;
1002 
1003 	if (!priv->hwts_rx)
1004 		return false;
1005 
1006 	NXP_C45_SKB_CB(skb)->header = header;
1007 	skb_queue_tail(&priv->rx_queue, skb);
1008 	ptp_schedule_worker(priv->ptp_clock, 0);
1009 
1010 	return true;
1011 }
1012 
1013 static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
1014 			    struct kernel_hwtstamp_config *cfg,
1015 			    struct netlink_ext_ack *extack)
1016 {
1017 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1018 						mii_ts);
1019 	struct phy_device *phydev = priv->phydev;
1020 	const struct nxp_c45_phy_data *data;
1021 
1022 	if (cfg->tx_type < 0 || cfg->tx_type > HWTSTAMP_TX_ON)
1023 		return -ERANGE;
1024 
1025 	data = nxp_c45_get_data(phydev);
1026 	priv->hwts_tx = cfg->tx_type;
1027 
1028 	switch (cfg->rx_filter) {
1029 	case HWTSTAMP_FILTER_NONE:
1030 		priv->hwts_rx = 0;
1031 		break;
1032 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1033 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1034 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1035 		priv->hwts_rx = 1;
1036 		cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1037 		break;
1038 	default:
1039 		return -ERANGE;
1040 	}
1041 
1042 	if (priv->hwts_rx || priv->hwts_tx) {
1043 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
1044 			      data->regmap->vend1_event_msg_filt,
1045 			      EVENT_MSG_FILT_ALL);
1046 		data->ptp_enable(phydev, true);
1047 	} else {
1048 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
1049 			      data->regmap->vend1_event_msg_filt,
1050 			      EVENT_MSG_FILT_NONE);
1051 		data->ptp_enable(phydev, false);
1052 	}
1053 
1054 	if (nxp_c45_poll_txts(priv->phydev))
1055 		goto nxp_c45_no_ptp_irq;
1056 
1057 	if (priv->hwts_tx)
1058 		nxp_c45_set_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1059 	else
1060 		nxp_c45_clear_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1061 
1062 nxp_c45_no_ptp_irq:
1063 	return 0;
1064 }
1065 
1066 static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
1067 			   struct kernel_ethtool_ts_info *ts_info)
1068 {
1069 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1070 						mii_ts);
1071 
1072 	ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1073 			SOF_TIMESTAMPING_RX_HARDWARE |
1074 			SOF_TIMESTAMPING_RAW_HARDWARE;
1075 	ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
1076 	ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1077 	ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1078 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
1079 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
1080 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
1081 
1082 	return 0;
1083 }
1084 
1085 static const struct nxp_c45_phy_stats common_hw_stats[] = {
1086 	{ "phy_link_status_drop_cnt",
1087 		NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 8, 6), },
1088 	{ "phy_link_availability_drop_cnt",
1089 		NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 0, 6), },
1090 	{ "phy_link_loss_cnt",
1091 		NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 10, 6), },
1092 	{ "phy_link_failure_cnt",
1093 		NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 0, 10), },
1094 	{ "phy_symbol_error_cnt",
1095 		NXP_C45_REG_FIELD(0x8350, MDIO_MMD_VEND1, 0, 16) },
1096 };
1097 
1098 static const struct nxp_c45_phy_stats tja1103_hw_stats[] = {
1099 	{ "rx_preamble_count",
1100 		NXP_C45_REG_FIELD(0xAFCE, MDIO_MMD_VEND1, 0, 6), },
1101 	{ "tx_preamble_count",
1102 		NXP_C45_REG_FIELD(0xAFCF, MDIO_MMD_VEND1, 0, 6), },
1103 	{ "rx_ipg_length",
1104 		NXP_C45_REG_FIELD(0xAFD0, MDIO_MMD_VEND1, 0, 9), },
1105 	{ "tx_ipg_length",
1106 		NXP_C45_REG_FIELD(0xAFD1, MDIO_MMD_VEND1, 0, 9), },
1107 };
1108 
1109 static const struct nxp_c45_phy_stats tja1120_hw_stats[] = {
1110 	{ "phy_symbol_error_cnt_ext",
1111 		NXP_C45_REG_FIELD(0x8351, MDIO_MMD_VEND1, 0, 14) },
1112 	{ "tx_frames_xtd",
1113 		NXP_C45_REG_FIELD(0xACA1, MDIO_MMD_VEND1, 0, 8), },
1114 	{ "tx_frames",
1115 		NXP_C45_REG_FIELD(0xACA0, MDIO_MMD_VEND1, 0, 16), },
1116 	{ "rx_frames_xtd",
1117 		NXP_C45_REG_FIELD(0xACA3, MDIO_MMD_VEND1, 0, 8), },
1118 	{ "rx_frames",
1119 		NXP_C45_REG_FIELD(0xACA2, MDIO_MMD_VEND1, 0, 16), },
1120 	{ "tx_lost_frames_xtd",
1121 		NXP_C45_REG_FIELD(0xACA5, MDIO_MMD_VEND1, 0, 8), },
1122 	{ "tx_lost_frames",
1123 		NXP_C45_REG_FIELD(0xACA4, MDIO_MMD_VEND1, 0, 16), },
1124 	{ "rx_lost_frames_xtd",
1125 		NXP_C45_REG_FIELD(0xACA7, MDIO_MMD_VEND1, 0, 8), },
1126 	{ "rx_lost_frames",
1127 		NXP_C45_REG_FIELD(0xACA6, MDIO_MMD_VEND1, 0, 16), },
1128 };
1129 
1130 static int nxp_c45_get_sset_count(struct phy_device *phydev)
1131 {
1132 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1133 
1134 	return ARRAY_SIZE(common_hw_stats) + (phy_data ? phy_data->n_stats : 0);
1135 }
1136 
1137 static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
1138 {
1139 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1140 	size_t count = nxp_c45_get_sset_count(phydev);
1141 	size_t idx;
1142 	size_t i;
1143 
1144 	for (i = 0; i < count; i++) {
1145 		if (i < ARRAY_SIZE(common_hw_stats)) {
1146 			ethtool_puts(&data, common_hw_stats[i].name);
1147 			continue;
1148 		}
1149 		idx = i - ARRAY_SIZE(common_hw_stats);
1150 		ethtool_puts(&data, phy_data->stats[idx].name);
1151 	}
1152 }
1153 
1154 static void nxp_c45_get_stats(struct phy_device *phydev,
1155 			      struct ethtool_stats *stats, u64 *data)
1156 {
1157 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1158 	size_t count = nxp_c45_get_sset_count(phydev);
1159 	const struct nxp_c45_reg_field *reg_field;
1160 	size_t idx;
1161 	size_t i;
1162 	int ret;
1163 
1164 	for (i = 0; i < count; i++) {
1165 		if (i < ARRAY_SIZE(common_hw_stats)) {
1166 			reg_field = &common_hw_stats[i].counter;
1167 		} else {
1168 			idx = i - ARRAY_SIZE(common_hw_stats);
1169 			reg_field = &phy_data->stats[idx].counter;
1170 		}
1171 
1172 		ret = nxp_c45_read_reg_field(phydev, reg_field);
1173 		if (ret < 0)
1174 			data[i] = U64_MAX;
1175 		else
1176 			data[i] = ret;
1177 	}
1178 }
1179 
1180 static int nxp_c45_config_enable(struct phy_device *phydev)
1181 {
1182 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1183 		      DEVICE_CONTROL_CONFIG_GLOBAL_EN |
1184 		      DEVICE_CONTROL_CONFIG_ALL_EN);
1185 	usleep_range(400, 450);
1186 
1187 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
1188 		      PORT_CONTROL_EN);
1189 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1190 		      PHY_CONFIG_EN);
1191 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
1192 		      PORT_INFRA_CONTROL_EN);
1193 
1194 	return 0;
1195 }
1196 
1197 static int nxp_c45_start_op(struct phy_device *phydev)
1198 {
1199 	return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1200 				PHY_START_OP);
1201 }
1202 
1203 static int nxp_c45_config_intr(struct phy_device *phydev)
1204 {
1205 	int ret;
1206 
1207 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
1208 		ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1209 				       VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
1210 		if (ret)
1211 			return ret;
1212 
1213 		return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1214 					VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1215 	}
1216 
1217 	ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1218 				 VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
1219 	if (ret)
1220 		return ret;
1221 
1222 	return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1223 				  VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1224 }
1225 
1226 static int tja1103_config_intr(struct phy_device *phydev)
1227 {
1228 	int ret;
1229 
1230 	/* We can't disable the FUSA IRQ for TJA1103, but we can clean it up. */
1231 	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_ALWAYS_ACCESSIBLE,
1232 			    FUSA_PASS);
1233 	if (ret)
1234 		return ret;
1235 
1236 	return nxp_c45_config_intr(phydev);
1237 }
1238 
1239 static int tja1120_config_intr(struct phy_device *phydev)
1240 {
1241 	int ret;
1242 
1243 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
1244 		ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1245 				       TJA1120_GLOBAL_INFRA_IRQ_EN,
1246 				       TJA1120_DEV_BOOT_DONE);
1247 	else
1248 		ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1249 					 TJA1120_GLOBAL_INFRA_IRQ_EN,
1250 					 TJA1120_DEV_BOOT_DONE);
1251 	if (ret)
1252 		return ret;
1253 
1254 	return nxp_c45_config_intr(phydev);
1255 }
1256 
1257 static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
1258 {
1259 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1260 	struct nxp_c45_phy *priv = phydev->priv;
1261 	irqreturn_t ret = IRQ_NONE;
1262 	struct nxp_c45_hwts hwts;
1263 	int irq;
1264 
1265 	irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
1266 	if (irq & PHY_IRQ_LINK_EVENT) {
1267 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
1268 			      PHY_IRQ_LINK_EVENT);
1269 		phy_trigger_machine(phydev);
1270 		ret = IRQ_HANDLED;
1271 	}
1272 
1273 	irq = nxp_c45_read_reg_field(phydev, &data->regmap->irq_egr_ts_status);
1274 	if (irq) {
1275 		/* If ack_ptp_irq is false, the IRQ bit is self-clear and will
1276 		 * be cleared when the EGR TS FIFO is empty. Otherwise, the
1277 		 * IRQ bit should be cleared before reading the timestamp,
1278 		 */
1279 		if (data->ack_ptp_irq)
1280 			phy_write_mmd(phydev, MDIO_MMD_VEND1,
1281 				      VEND1_PTP_IRQ_ACK, EGR_TS_IRQ);
1282 		while (data->get_egressts(priv, &hwts))
1283 			nxp_c45_process_txts(priv, &hwts);
1284 
1285 		ret = IRQ_HANDLED;
1286 	}
1287 
1288 	data->nmi_handler(phydev, &ret);
1289 	nxp_c45_handle_macsec_interrupt(phydev, &ret);
1290 
1291 	return ret;
1292 }
1293 
1294 static int nxp_c45_soft_reset(struct phy_device *phydev)
1295 {
1296 	int ret;
1297 
1298 	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1299 			    DEVICE_CONTROL_RESET);
1300 	if (ret)
1301 		return ret;
1302 
1303 	usleep_range(2000, 2050);
1304 
1305 	return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
1306 					 VEND1_DEVICE_CONTROL, ret,
1307 					 !(ret & DEVICE_CONTROL_RESET), 20000,
1308 					 240000, false);
1309 }
1310 
1311 static int nxp_c45_cable_test_start(struct phy_device *phydev)
1312 {
1313 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1314 
1315 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1316 			 VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1317 	return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1318 				CABLE_TEST_ENABLE | CABLE_TEST_START);
1319 }
1320 
1321 static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
1322 					 bool *finished)
1323 {
1324 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1325 	int ret;
1326 	u8 cable_test_result;
1327 
1328 	ret = nxp_c45_read_reg_field(phydev, &regmap->cable_test_valid);
1329 	if (!ret) {
1330 		*finished = false;
1331 		return 0;
1332 	}
1333 
1334 	*finished = true;
1335 	cable_test_result = nxp_c45_read_reg_field(phydev,
1336 						   &regmap->cable_test_result);
1337 
1338 	switch (cable_test_result) {
1339 	case CABLE_TEST_OK:
1340 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1341 					ETHTOOL_A_CABLE_RESULT_CODE_OK);
1342 		break;
1343 	case CABLE_TEST_SHORTED:
1344 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1345 					ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
1346 		break;
1347 	case CABLE_TEST_OPEN:
1348 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1349 					ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
1350 		break;
1351 	default:
1352 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1353 					ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
1354 	}
1355 
1356 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1357 			   CABLE_TEST_ENABLE);
1358 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1359 			   VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1360 
1361 	return nxp_c45_start_op(phydev);
1362 }
1363 
1364 static int nxp_c45_get_sqi(struct phy_device *phydev)
1365 {
1366 	int reg;
1367 
1368 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
1369 	if (!(reg & SQI_VALID))
1370 		return -EINVAL;
1371 
1372 	reg &= SQI_MASK;
1373 
1374 	return reg;
1375 }
1376 
1377 static void tja1120_link_change_notify(struct phy_device *phydev)
1378 {
1379 	/* Bug workaround for TJA1120 enegineering samples: fix egress
1380 	 * timestamps lost after link recovery.
1381 	 */
1382 	if (phydev->state == PHY_NOLINK) {
1383 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1384 				 TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1385 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1386 				   TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1387 	}
1388 }
1389 
1390 static int nxp_c45_get_sqi_max(struct phy_device *phydev)
1391 {
1392 	return MAX_SQI;
1393 }
1394 
1395 static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
1396 {
1397 	if (delay < MIN_ID_PS) {
1398 		phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
1399 		return -EINVAL;
1400 	}
1401 
1402 	if (delay > MAX_ID_PS) {
1403 		phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
1404 		return -EINVAL;
1405 	}
1406 
1407 	return 0;
1408 }
1409 
1410 static void nxp_c45_counters_enable(struct phy_device *phydev)
1411 {
1412 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1413 
1414 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
1415 			 COUNTER_EN);
1416 
1417 	data->counters_enable(phydev);
1418 }
1419 
1420 static void nxp_c45_ptp_init(struct phy_device *phydev)
1421 {
1422 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1423 
1424 	phy_write_mmd(phydev, MDIO_MMD_VEND1,
1425 		      data->regmap->vend1_ptp_clk_period,
1426 		      data->ptp_clk_period);
1427 	nxp_c45_clear_reg_field(phydev, &data->regmap->ltc_lock_ctrl);
1428 
1429 	data->ptp_init(phydev);
1430 }
1431 
1432 static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
1433 {
1434 	/* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
1435 	 * To avoid floating point operations we'll multiply by 10
1436 	 * and get 1 decimal point precision.
1437 	 */
1438 	phase_offset_raw *= 10;
1439 	phase_offset_raw -= 738;
1440 	return div_u64(phase_offset_raw, 9);
1441 }
1442 
1443 static void nxp_c45_disable_delays(struct phy_device *phydev)
1444 {
1445 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
1446 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
1447 }
1448 
1449 static void nxp_c45_set_delays(struct phy_device *phydev)
1450 {
1451 	struct nxp_c45_phy *priv = phydev->priv;
1452 	u64 tx_delay = priv->tx_delay;
1453 	u64 rx_delay = priv->rx_delay;
1454 	u64 degree;
1455 
1456 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1457 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1458 		degree = div_u64(tx_delay, PS_PER_DEGREE);
1459 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1460 			      ID_ENABLE | nxp_c45_get_phase_shift(degree));
1461 	} else {
1462 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1463 				   ID_ENABLE);
1464 	}
1465 
1466 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1467 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1468 		degree = div_u64(rx_delay, PS_PER_DEGREE);
1469 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1470 			      ID_ENABLE | nxp_c45_get_phase_shift(degree));
1471 	} else {
1472 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1473 				   ID_ENABLE);
1474 	}
1475 }
1476 
1477 static int nxp_c45_get_delays(struct phy_device *phydev)
1478 {
1479 	struct nxp_c45_phy *priv = phydev->priv;
1480 	int ret;
1481 
1482 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1483 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1484 		ret = device_property_read_u32(&phydev->mdio.dev,
1485 					       "tx-internal-delay-ps",
1486 					       &priv->tx_delay);
1487 		if (ret)
1488 			priv->tx_delay = DEFAULT_ID_PS;
1489 
1490 		ret = nxp_c45_check_delay(phydev, priv->tx_delay);
1491 		if (ret) {
1492 			phydev_err(phydev,
1493 				   "tx-internal-delay-ps invalid value\n");
1494 			return ret;
1495 		}
1496 	}
1497 
1498 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1499 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1500 		ret = device_property_read_u32(&phydev->mdio.dev,
1501 					       "rx-internal-delay-ps",
1502 					       &priv->rx_delay);
1503 		if (ret)
1504 			priv->rx_delay = DEFAULT_ID_PS;
1505 
1506 		ret = nxp_c45_check_delay(phydev, priv->rx_delay);
1507 		if (ret) {
1508 			phydev_err(phydev,
1509 				   "rx-internal-delay-ps invalid value\n");
1510 			return ret;
1511 		}
1512 	}
1513 
1514 	return 0;
1515 }
1516 
1517 static int nxp_c45_set_phy_mode(struct phy_device *phydev)
1518 {
1519 	struct nxp_c45_phy *priv = phydev->priv;
1520 	u16 basic_config;
1521 	int ret;
1522 
1523 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
1524 	phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
1525 
1526 	switch (phydev->interface) {
1527 	case PHY_INTERFACE_MODE_RGMII:
1528 		if (!(ret & RGMII_ABILITY)) {
1529 			phydev_err(phydev, "rgmii mode not supported\n");
1530 			return -EINVAL;
1531 		}
1532 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1533 			      MII_BASIC_CONFIG_RGMII);
1534 		nxp_c45_disable_delays(phydev);
1535 		break;
1536 	case PHY_INTERFACE_MODE_RGMII_ID:
1537 	case PHY_INTERFACE_MODE_RGMII_TXID:
1538 	case PHY_INTERFACE_MODE_RGMII_RXID:
1539 		if (!(ret & RGMII_ID_ABILITY)) {
1540 			phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
1541 			return -EINVAL;
1542 		}
1543 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1544 			      MII_BASIC_CONFIG_RGMII);
1545 		ret = nxp_c45_get_delays(phydev);
1546 		if (ret)
1547 			return ret;
1548 
1549 		nxp_c45_set_delays(phydev);
1550 		break;
1551 	case PHY_INTERFACE_MODE_MII:
1552 		if (!(ret & MII_ABILITY)) {
1553 			phydev_err(phydev, "mii mode not supported\n");
1554 			return -EINVAL;
1555 		}
1556 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1557 			      MII_BASIC_CONFIG_MII);
1558 		break;
1559 	case PHY_INTERFACE_MODE_REVMII:
1560 		if (!(ret & REVMII_ABILITY)) {
1561 			phydev_err(phydev, "rev-mii mode not supported\n");
1562 			return -EINVAL;
1563 		}
1564 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1565 			      MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
1566 		break;
1567 	case PHY_INTERFACE_MODE_RMII:
1568 		if (!(ret & RMII_ABILITY)) {
1569 			phydev_err(phydev, "rmii mode not supported\n");
1570 			return -EINVAL;
1571 		}
1572 
1573 		basic_config = MII_BASIC_CONFIG_RMII;
1574 
1575 		/* This is not PHY_INTERFACE_MODE_REVRMII */
1576 		if (priv->flags & TJA11XX_REVERSE_MODE)
1577 			basic_config |= MII_BASIC_CONFIG_REV;
1578 
1579 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1580 			      basic_config);
1581 		break;
1582 	case PHY_INTERFACE_MODE_SGMII:
1583 		if (!(ret & SGMII_ABILITY)) {
1584 			phydev_err(phydev, "sgmii mode not supported\n");
1585 			return -EINVAL;
1586 		}
1587 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1588 			      MII_BASIC_CONFIG_SGMII);
1589 		break;
1590 	case PHY_INTERFACE_MODE_INTERNAL:
1591 		break;
1592 	default:
1593 		return -EINVAL;
1594 	}
1595 
1596 	return 0;
1597 }
1598 
1599 static int nxp_c45_config_init(struct phy_device *phydev)
1600 {
1601 	int ret;
1602 
1603 	ret = nxp_c45_config_enable(phydev);
1604 	if (ret) {
1605 		phydev_err(phydev, "Failed to enable config\n");
1606 		return ret;
1607 	}
1608 
1609 	/* Bug workaround for SJA1110 rev B: enable write access
1610 	 * to MDIO_MMD_PMAPMD
1611 	 */
1612 	phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
1613 	phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
1614 
1615 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
1616 			 PHY_CONFIG_AUTO);
1617 
1618 	ret = nxp_c45_set_phy_mode(phydev);
1619 	if (ret)
1620 		return ret;
1621 
1622 	phydev->autoneg = AUTONEG_DISABLE;
1623 
1624 	nxp_c45_counters_enable(phydev);
1625 	nxp_c45_ptp_init(phydev);
1626 	ret = nxp_c45_macsec_config_init(phydev);
1627 	if (ret)
1628 		return ret;
1629 
1630 	return nxp_c45_start_op(phydev);
1631 }
1632 
1633 static int nxp_c45_get_features(struct phy_device *phydev)
1634 {
1635 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported);
1636 	linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, phydev->supported);
1637 
1638 	return genphy_c45_pma_read_abilities(phydev);
1639 }
1640 
1641 static int nxp_c45_parse_dt(struct phy_device *phydev)
1642 {
1643 	struct device_node *node = phydev->mdio.dev.of_node;
1644 	struct nxp_c45_phy *priv = phydev->priv;
1645 
1646 	if (!IS_ENABLED(CONFIG_OF_MDIO))
1647 		return 0;
1648 
1649 	if (of_property_read_bool(node, "nxp,rmii-refclk-out"))
1650 		priv->flags |= TJA11XX_REVERSE_MODE;
1651 
1652 	return 0;
1653 }
1654 
1655 static int nxp_c45_probe(struct phy_device *phydev)
1656 {
1657 	struct nxp_c45_phy *priv;
1658 	bool macsec_ability;
1659 	int phy_abilities;
1660 	bool ptp_ability;
1661 	int ret = 0;
1662 
1663 	priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
1664 	if (!priv)
1665 		return -ENOMEM;
1666 
1667 	skb_queue_head_init(&priv->tx_queue);
1668 	skb_queue_head_init(&priv->rx_queue);
1669 
1670 	priv->phydev = phydev;
1671 
1672 	phydev->priv = priv;
1673 
1674 	nxp_c45_parse_dt(phydev);
1675 
1676 	mutex_init(&priv->ptp_lock);
1677 
1678 	phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1679 				     VEND1_PORT_ABILITIES);
1680 	ptp_ability = !!(phy_abilities & PTP_ABILITY);
1681 	if (!ptp_ability) {
1682 		phydev_dbg(phydev, "the phy does not support PTP");
1683 		goto no_ptp_support;
1684 	}
1685 
1686 	if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
1687 	    IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
1688 		priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
1689 		priv->mii_ts.txtstamp = nxp_c45_txtstamp;
1690 		priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
1691 		priv->mii_ts.ts_info = nxp_c45_ts_info;
1692 		phydev->mii_ts = &priv->mii_ts;
1693 		ret = nxp_c45_init_ptp_clock(priv);
1694 
1695 		/* Timestamp selected by default to keep legacy API */
1696 		phydev->default_timestamp = true;
1697 	} else {
1698 		phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
1699 	}
1700 
1701 no_ptp_support:
1702 	macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
1703 	if (!macsec_ability) {
1704 		phydev_info(phydev, "the phy does not support MACsec\n");
1705 		goto no_macsec_support;
1706 	}
1707 
1708 	if (IS_ENABLED(CONFIG_MACSEC)) {
1709 		ret = nxp_c45_macsec_probe(phydev);
1710 		phydev_dbg(phydev, "MACsec support enabled.");
1711 	} else {
1712 		phydev_dbg(phydev, "MACsec support not enabled even if the phy supports it");
1713 	}
1714 
1715 no_macsec_support:
1716 
1717 	return ret;
1718 }
1719 
1720 static void nxp_c45_remove(struct phy_device *phydev)
1721 {
1722 	struct nxp_c45_phy *priv = phydev->priv;
1723 
1724 	if (priv->ptp_clock)
1725 		ptp_clock_unregister(priv->ptp_clock);
1726 
1727 	skb_queue_purge(&priv->tx_queue);
1728 	skb_queue_purge(&priv->rx_queue);
1729 	nxp_c45_macsec_remove(phydev);
1730 }
1731 
1732 static void tja1103_counters_enable(struct phy_device *phydev)
1733 {
1734 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
1735 			 COUNTER_EN);
1736 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
1737 			 COUNTER_EN);
1738 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
1739 			 COUNTER_EN);
1740 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
1741 			 COUNTER_EN);
1742 }
1743 
1744 static void tja1103_ptp_init(struct phy_device *phydev)
1745 {
1746 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
1747 		      TJA1103_RX_TS_INSRT_MODE2);
1748 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
1749 			 PTP_ENABLE);
1750 }
1751 
1752 static void tja1103_ptp_enable(struct phy_device *phydev, bool enable)
1753 {
1754 	if (enable)
1755 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1756 				   VEND1_PORT_PTP_CONTROL,
1757 				   PORT_PTP_CONTROL_BYPASS);
1758 	else
1759 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1760 				 VEND1_PORT_PTP_CONTROL,
1761 				 PORT_PTP_CONTROL_BYPASS);
1762 }
1763 
1764 static void tja1103_nmi_handler(struct phy_device *phydev,
1765 				irqreturn_t *irq_status)
1766 {
1767 	int ret;
1768 
1769 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1770 			   VEND1_ALWAYS_ACCESSIBLE);
1771 	if (ret & FUSA_PASS) {
1772 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
1773 			      VEND1_ALWAYS_ACCESSIBLE,
1774 			      FUSA_PASS);
1775 		*irq_status = IRQ_HANDLED;
1776 	}
1777 }
1778 
1779 static const struct nxp_c45_regmap tja1103_regmap = {
1780 	.vend1_ptp_clk_period	= 0x1104,
1781 	.vend1_event_msg_filt	= 0x1148,
1782 	.pps_enable		=
1783 		NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 3, 1),
1784 	.pps_polarity		=
1785 		NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 2, 1),
1786 	.ltc_lock_ctrl		=
1787 		NXP_C45_REG_FIELD(0x1115, MDIO_MMD_VEND1, 0, 1),
1788 	.ltc_read		=
1789 		NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 2, 1),
1790 	.ltc_write		=
1791 		NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 0, 1),
1792 	.vend1_ltc_wr_nsec_0	= 0x1106,
1793 	.vend1_ltc_wr_nsec_1	= 0x1107,
1794 	.vend1_ltc_wr_sec_0	= 0x1108,
1795 	.vend1_ltc_wr_sec_1	= 0x1109,
1796 	.vend1_ltc_rd_nsec_0	= 0x110A,
1797 	.vend1_ltc_rd_nsec_1	= 0x110B,
1798 	.vend1_ltc_rd_sec_0	= 0x110C,
1799 	.vend1_ltc_rd_sec_1	= 0x110D,
1800 	.vend1_rate_adj_subns_0	= 0x110F,
1801 	.vend1_rate_adj_subns_1	= 0x1110,
1802 	.irq_egr_ts_en		=
1803 		NXP_C45_REG_FIELD(0x1131, MDIO_MMD_VEND1, 0, 1),
1804 	.irq_egr_ts_status	=
1805 		NXP_C45_REG_FIELD(0x1132, MDIO_MMD_VEND1, 0, 1),
1806 	.domain_number		=
1807 		NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 0, 8),
1808 	.msg_type		=
1809 		NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 8, 4),
1810 	.sequence_id		=
1811 		NXP_C45_REG_FIELD(0x114F, MDIO_MMD_VEND1, 0, 16),
1812 	.sec_1_0		=
1813 		NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 14, 2),
1814 	.sec_4_2		=
1815 		NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 12, 3),
1816 	.nsec_15_0		=
1817 		NXP_C45_REG_FIELD(0x1150, MDIO_MMD_VEND1, 0, 16),
1818 	.nsec_29_16		=
1819 		NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 0, 14),
1820 	.vend1_ext_trg_data_0	= 0x1121,
1821 	.vend1_ext_trg_data_1	= 0x1122,
1822 	.vend1_ext_trg_data_2	= 0x1123,
1823 	.vend1_ext_trg_data_3	= 0x1124,
1824 	.vend1_ext_trg_ctrl	= 0x1126,
1825 	.cable_test		= 0x8330,
1826 	.cable_test_valid	=
1827 		NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 13, 1),
1828 	.cable_test_result	=
1829 		NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 0, 3),
1830 };
1831 
1832 static const struct nxp_c45_phy_data tja1103_phy_data = {
1833 	.regmap = &tja1103_regmap,
1834 	.stats = tja1103_hw_stats,
1835 	.n_stats = ARRAY_SIZE(tja1103_hw_stats),
1836 	.ptp_clk_period = PTP_CLK_PERIOD_100BT1,
1837 	.ext_ts_both_edges = false,
1838 	.ack_ptp_irq = false,
1839 	.counters_enable = tja1103_counters_enable,
1840 	.get_egressts = nxp_c45_get_hwtxts,
1841 	.get_extts = nxp_c45_get_extts,
1842 	.ptp_init = tja1103_ptp_init,
1843 	.ptp_enable = tja1103_ptp_enable,
1844 	.nmi_handler = tja1103_nmi_handler,
1845 };
1846 
1847 static void tja1120_counters_enable(struct phy_device *phydev)
1848 {
1849 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_SYMBOL_ERROR_CNT_XTD,
1850 			 EXTENDED_CNT_EN);
1851 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_STATUS,
1852 			 MONITOR_RESET);
1853 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_CONFIG,
1854 			 ALL_FRAMES_CNT_EN | LOST_FRAMES_CNT_EN);
1855 }
1856 
1857 static void tja1120_ptp_init(struct phy_device *phydev)
1858 {
1859 	phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_RX_TS_INSRT_CTRL,
1860 		      TJA1120_RX_TS_INSRT_EN | TJA1120_TS_INSRT_MODE);
1861 	phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_VEND1_EXT_TS_MODE,
1862 		      TJA1120_TS_INSRT_MODE);
1863 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONFIG,
1864 			 PTP_ENABLE);
1865 }
1866 
1867 static void tja1120_ptp_enable(struct phy_device *phydev, bool enable)
1868 {
1869 	if (enable)
1870 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1871 				 VEND1_PORT_FUNC_ENABLES,
1872 				 PTP_ENABLE);
1873 	else
1874 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1875 				   VEND1_PORT_FUNC_ENABLES,
1876 				   PTP_ENABLE);
1877 }
1878 
1879 static void tja1120_nmi_handler(struct phy_device *phydev,
1880 				irqreturn_t *irq_status)
1881 {
1882 	int ret;
1883 
1884 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1885 			   TJA1120_GLOBAL_INFRA_IRQ_STATUS);
1886 	if (ret & TJA1120_DEV_BOOT_DONE) {
1887 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
1888 			      TJA1120_GLOBAL_INFRA_IRQ_ACK,
1889 			      TJA1120_DEV_BOOT_DONE);
1890 		*irq_status = IRQ_HANDLED;
1891 	}
1892 }
1893 
1894 static int nxp_c45_macsec_ability(struct phy_device *phydev)
1895 {
1896 	bool macsec_ability;
1897 	int phy_abilities;
1898 
1899 	phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1900 				     VEND1_PORT_ABILITIES);
1901 	macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
1902 
1903 	return macsec_ability;
1904 }
1905 
1906 static int tja1103_match_phy_device(struct phy_device *phydev)
1907 {
1908 	return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) &&
1909 	       !nxp_c45_macsec_ability(phydev);
1910 }
1911 
1912 static int tja1104_match_phy_device(struct phy_device *phydev)
1913 {
1914 	return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) &&
1915 	       nxp_c45_macsec_ability(phydev);
1916 }
1917 
1918 static int tja1120_match_phy_device(struct phy_device *phydev)
1919 {
1920 	return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, PHY_ID_MASK) &&
1921 	       !nxp_c45_macsec_ability(phydev);
1922 }
1923 
1924 static int tja1121_match_phy_device(struct phy_device *phydev)
1925 {
1926 	return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, PHY_ID_MASK) &&
1927 	       nxp_c45_macsec_ability(phydev);
1928 }
1929 
1930 static const struct nxp_c45_regmap tja1120_regmap = {
1931 	.vend1_ptp_clk_period	= 0x1020,
1932 	.vend1_event_msg_filt	= 0x9010,
1933 	.pps_enable		=
1934 		NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 4, 1),
1935 	.pps_polarity		=
1936 		NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 5, 1),
1937 	.ltc_lock_ctrl		=
1938 		NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 2, 1),
1939 	.ltc_read		=
1940 		NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 1, 1),
1941 	.ltc_write		=
1942 		NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 2, 1),
1943 	.vend1_ltc_wr_nsec_0	= 0x1040,
1944 	.vend1_ltc_wr_nsec_1	= 0x1041,
1945 	.vend1_ltc_wr_sec_0	= 0x1042,
1946 	.vend1_ltc_wr_sec_1	= 0x1043,
1947 	.vend1_ltc_rd_nsec_0	= 0x1048,
1948 	.vend1_ltc_rd_nsec_1	= 0x1049,
1949 	.vend1_ltc_rd_sec_0	= 0x104A,
1950 	.vend1_ltc_rd_sec_1	= 0x104B,
1951 	.vend1_rate_adj_subns_0	= 0x1030,
1952 	.vend1_rate_adj_subns_1	= 0x1031,
1953 	.irq_egr_ts_en		=
1954 		NXP_C45_REG_FIELD(0x900A, MDIO_MMD_VEND1, 1, 1),
1955 	.irq_egr_ts_status	=
1956 		NXP_C45_REG_FIELD(0x900C, MDIO_MMD_VEND1, 1, 1),
1957 	.domain_number		=
1958 		NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 8, 8),
1959 	.msg_type		=
1960 		NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 4, 4),
1961 	.sequence_id		=
1962 		NXP_C45_REG_FIELD(0x9062, MDIO_MMD_VEND1, 0, 16),
1963 	.sec_1_0		=
1964 		NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 0, 2),
1965 	.sec_4_2		=
1966 		NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 2, 3),
1967 	.nsec_15_0		=
1968 		NXP_C45_REG_FIELD(0x9063, MDIO_MMD_VEND1, 0, 16),
1969 	.nsec_29_16		=
1970 		NXP_C45_REG_FIELD(0x9064, MDIO_MMD_VEND1, 0, 14),
1971 	.vend1_ext_trg_data_0	= 0x1071,
1972 	.vend1_ext_trg_data_1	= 0x1072,
1973 	.vend1_ext_trg_data_2	= 0x1073,
1974 	.vend1_ext_trg_data_3	= 0x1074,
1975 	.vend1_ext_trg_ctrl	= 0x1075,
1976 	.cable_test		= 0x8360,
1977 	.cable_test_valid	=
1978 		NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 15, 1),
1979 	.cable_test_result	=
1980 		NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 0, 3),
1981 };
1982 
1983 static const struct nxp_c45_phy_data tja1120_phy_data = {
1984 	.regmap = &tja1120_regmap,
1985 	.stats = tja1120_hw_stats,
1986 	.n_stats = ARRAY_SIZE(tja1120_hw_stats),
1987 	.ptp_clk_period = PTP_CLK_PERIOD_1000BT1,
1988 	.ext_ts_both_edges = true,
1989 	.ack_ptp_irq = true,
1990 	.counters_enable = tja1120_counters_enable,
1991 	.get_egressts = tja1120_get_hwtxts,
1992 	.get_extts = tja1120_get_extts,
1993 	.ptp_init = tja1120_ptp_init,
1994 	.ptp_enable = tja1120_ptp_enable,
1995 	.nmi_handler = tja1120_nmi_handler,
1996 };
1997 
1998 static struct phy_driver nxp_c45_driver[] = {
1999 	{
2000 		.name			= "NXP C45 TJA1103",
2001 		.get_features		= nxp_c45_get_features,
2002 		.driver_data		= &tja1103_phy_data,
2003 		.probe			= nxp_c45_probe,
2004 		.soft_reset		= nxp_c45_soft_reset,
2005 		.config_aneg		= genphy_c45_config_aneg,
2006 		.config_init		= nxp_c45_config_init,
2007 		.config_intr		= tja1103_config_intr,
2008 		.handle_interrupt	= nxp_c45_handle_interrupt,
2009 		.read_status		= genphy_c45_read_status,
2010 		.suspend		= genphy_c45_pma_suspend,
2011 		.resume			= genphy_c45_pma_resume,
2012 		.get_sset_count		= nxp_c45_get_sset_count,
2013 		.get_strings		= nxp_c45_get_strings,
2014 		.get_stats		= nxp_c45_get_stats,
2015 		.cable_test_start	= nxp_c45_cable_test_start,
2016 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
2017 		.set_loopback		= genphy_c45_loopback,
2018 		.get_sqi		= nxp_c45_get_sqi,
2019 		.get_sqi_max		= nxp_c45_get_sqi_max,
2020 		.remove			= nxp_c45_remove,
2021 		.match_phy_device	= tja1103_match_phy_device,
2022 	},
2023 	{
2024 		.name			= "NXP C45 TJA1104",
2025 		.get_features		= nxp_c45_get_features,
2026 		.driver_data		= &tja1103_phy_data,
2027 		.probe			= nxp_c45_probe,
2028 		.soft_reset		= nxp_c45_soft_reset,
2029 		.config_aneg		= genphy_c45_config_aneg,
2030 		.config_init		= nxp_c45_config_init,
2031 		.config_intr		= tja1103_config_intr,
2032 		.handle_interrupt	= nxp_c45_handle_interrupt,
2033 		.read_status		= genphy_c45_read_status,
2034 		.suspend		= genphy_c45_pma_suspend,
2035 		.resume			= genphy_c45_pma_resume,
2036 		.get_sset_count		= nxp_c45_get_sset_count,
2037 		.get_strings		= nxp_c45_get_strings,
2038 		.get_stats		= nxp_c45_get_stats,
2039 		.cable_test_start	= nxp_c45_cable_test_start,
2040 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
2041 		.set_loopback		= genphy_c45_loopback,
2042 		.get_sqi		= nxp_c45_get_sqi,
2043 		.get_sqi_max		= nxp_c45_get_sqi_max,
2044 		.remove			= nxp_c45_remove,
2045 		.match_phy_device	= tja1104_match_phy_device,
2046 	},
2047 	{
2048 		.name			= "NXP C45 TJA1120",
2049 		.get_features		= nxp_c45_get_features,
2050 		.driver_data		= &tja1120_phy_data,
2051 		.probe			= nxp_c45_probe,
2052 		.soft_reset		= nxp_c45_soft_reset,
2053 		.config_aneg		= genphy_c45_config_aneg,
2054 		.config_init		= nxp_c45_config_init,
2055 		.config_intr		= tja1120_config_intr,
2056 		.handle_interrupt	= nxp_c45_handle_interrupt,
2057 		.read_status		= genphy_c45_read_status,
2058 		.link_change_notify	= tja1120_link_change_notify,
2059 		.suspend		= genphy_c45_pma_suspend,
2060 		.resume			= genphy_c45_pma_resume,
2061 		.get_sset_count		= nxp_c45_get_sset_count,
2062 		.get_strings		= nxp_c45_get_strings,
2063 		.get_stats		= nxp_c45_get_stats,
2064 		.cable_test_start	= nxp_c45_cable_test_start,
2065 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
2066 		.set_loopback		= genphy_c45_loopback,
2067 		.get_sqi		= nxp_c45_get_sqi,
2068 		.get_sqi_max		= nxp_c45_get_sqi_max,
2069 		.remove			= nxp_c45_remove,
2070 		.match_phy_device	= tja1120_match_phy_device,
2071 	},
2072 	{
2073 		.name			= "NXP C45 TJA1121",
2074 		.get_features		= nxp_c45_get_features,
2075 		.driver_data		= &tja1120_phy_data,
2076 		.probe			= nxp_c45_probe,
2077 		.soft_reset		= nxp_c45_soft_reset,
2078 		.config_aneg		= genphy_c45_config_aneg,
2079 		.config_init		= nxp_c45_config_init,
2080 		.config_intr		= tja1120_config_intr,
2081 		.handle_interrupt	= nxp_c45_handle_interrupt,
2082 		.read_status		= genphy_c45_read_status,
2083 		.link_change_notify	= tja1120_link_change_notify,
2084 		.suspend		= genphy_c45_pma_suspend,
2085 		.resume			= genphy_c45_pma_resume,
2086 		.get_sset_count		= nxp_c45_get_sset_count,
2087 		.get_strings		= nxp_c45_get_strings,
2088 		.get_stats		= nxp_c45_get_stats,
2089 		.cable_test_start	= nxp_c45_cable_test_start,
2090 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
2091 		.set_loopback		= genphy_c45_loopback,
2092 		.get_sqi		= nxp_c45_get_sqi,
2093 		.get_sqi_max		= nxp_c45_get_sqi_max,
2094 		.remove			= nxp_c45_remove,
2095 		.match_phy_device	= tja1121_match_phy_device,
2096 	},
2097 };
2098 
2099 module_phy_driver(nxp_c45_driver);
2100 
2101 static const struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
2102 	{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
2103 	{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120) },
2104 	{ /*sentinel*/ },
2105 };
2106 
2107 MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
2108 
2109 MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
2110 MODULE_DESCRIPTION("NXP C45 PHY driver");
2111 MODULE_LICENSE("GPL v2");
2112