xref: /linux/drivers/net/phy/nxp-c45-tja11xx.c (revision 4cde72fead4cebb5b6b2fe9425904c2064739184)
1 // SPDX-License-Identifier: GPL-2.0
2 /* NXP C45 PHY driver
3  * Copyright (C) 2021 NXP
4  * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/ethtool.h>
9 #include <linux/ethtool_netlink.h>
10 #include <linux/kernel.h>
11 #include <linux/mii.h>
12 #include <linux/module.h>
13 #include <linux/phy.h>
14 #include <linux/processor.h>
15 #include <linux/property.h>
16 #include <linux/ptp_classify.h>
17 #include <linux/ptp_clock_kernel.h>
18 #include <linux/net_tstamp.h>
19 
20 #define PHY_ID_TJA_1103			0x001BB010
21 #define PHY_ID_TJA_1120			0x001BB031
22 
23 #define VEND1_DEVICE_CONTROL		0x0040
24 #define DEVICE_CONTROL_RESET		BIT(15)
25 #define DEVICE_CONTROL_CONFIG_GLOBAL_EN	BIT(14)
26 #define DEVICE_CONTROL_CONFIG_ALL_EN	BIT(13)
27 
28 #define VEND1_DEVICE_CONFIG		0x0048
29 
30 #define TJA1120_VEND1_EXT_TS_MODE	0x1012
31 
32 #define TJA1120_GLOBAL_INFRA_IRQ_ACK	0x2C08
33 #define TJA1120_GLOBAL_INFRA_IRQ_EN	0x2C0A
34 #define TJA1120_GLOBAL_INFRA_IRQ_STATUS	0x2C0C
35 #define TJA1120_DEV_BOOT_DONE		BIT(1)
36 
37 #define TJA1120_VEND1_PTP_TRIG_DATA_S	0x1070
38 
39 #define TJA1120_EGRESS_TS_DATA_S	0x9060
40 #define TJA1120_EGRESS_TS_END		0x9067
41 #define TJA1120_TS_VALID		BIT(0)
42 #define TJA1120_MORE_TS			BIT(15)
43 
44 #define VEND1_PHY_IRQ_ACK		0x80A0
45 #define VEND1_PHY_IRQ_EN		0x80A1
46 #define VEND1_PHY_IRQ_STATUS		0x80A2
47 #define PHY_IRQ_LINK_EVENT		BIT(1)
48 
49 #define VEND1_ALWAYS_ACCESSIBLE		0x801F
50 #define FUSA_PASS			BIT(4)
51 
52 #define VEND1_PHY_CONTROL		0x8100
53 #define PHY_CONFIG_EN			BIT(14)
54 #define PHY_START_OP			BIT(0)
55 
56 #define VEND1_PHY_CONFIG		0x8108
57 #define PHY_CONFIG_AUTO			BIT(0)
58 
59 #define TJA1120_EPHY_RESETS		0x810A
60 #define EPHY_PCS_RESET			BIT(3)
61 
62 #define VEND1_SIGNAL_QUALITY		0x8320
63 #define SQI_VALID			BIT(14)
64 #define SQI_MASK			GENMASK(2, 0)
65 #define MAX_SQI				SQI_MASK
66 
67 #define CABLE_TEST_ENABLE		BIT(15)
68 #define CABLE_TEST_START		BIT(14)
69 #define CABLE_TEST_OK			0x00
70 #define CABLE_TEST_SHORTED		0x01
71 #define CABLE_TEST_OPEN			0x02
72 #define CABLE_TEST_UNKNOWN		0x07
73 
74 #define VEND1_PORT_CONTROL		0x8040
75 #define PORT_CONTROL_EN			BIT(14)
76 
77 #define VEND1_PORT_ABILITIES		0x8046
78 #define PTP_ABILITY			BIT(3)
79 
80 #define VEND1_PORT_FUNC_IRQ_EN		0x807A
81 #define PTP_IRQS			BIT(3)
82 
83 #define VEND1_PTP_IRQ_ACK		0x9008
84 #define EGR_TS_IRQ			BIT(1)
85 
86 #define VEND1_PORT_INFRA_CONTROL	0xAC00
87 #define PORT_INFRA_CONTROL_EN		BIT(14)
88 
89 #define VEND1_RXID			0xAFCC
90 #define VEND1_TXID			0xAFCD
91 #define ID_ENABLE			BIT(15)
92 
93 #define VEND1_ABILITIES			0xAFC4
94 #define RGMII_ID_ABILITY		BIT(15)
95 #define RGMII_ABILITY			BIT(14)
96 #define RMII_ABILITY			BIT(10)
97 #define REVMII_ABILITY			BIT(9)
98 #define MII_ABILITY			BIT(8)
99 #define SGMII_ABILITY			BIT(0)
100 
101 #define VEND1_MII_BASIC_CONFIG		0xAFC6
102 #define MII_BASIC_CONFIG_REV		BIT(4)
103 #define MII_BASIC_CONFIG_SGMII		0x9
104 #define MII_BASIC_CONFIG_RGMII		0x7
105 #define MII_BASIC_CONFIG_RMII		0x5
106 #define MII_BASIC_CONFIG_MII		0x4
107 
108 #define VEND1_SYMBOL_ERROR_CNT_XTD	0x8351
109 #define EXTENDED_CNT_EN			BIT(15)
110 #define VEND1_MONITOR_STATUS		0xAC80
111 #define MONITOR_RESET			BIT(15)
112 #define VEND1_MONITOR_CONFIG		0xAC86
113 #define LOST_FRAMES_CNT_EN		BIT(9)
114 #define ALL_FRAMES_CNT_EN		BIT(8)
115 
116 #define VEND1_SYMBOL_ERROR_COUNTER	0x8350
117 #define VEND1_LINK_DROP_COUNTER		0x8352
118 #define VEND1_LINK_LOSSES_AND_FAILURES	0x8353
119 #define VEND1_RX_PREAMBLE_COUNT		0xAFCE
120 #define VEND1_TX_PREAMBLE_COUNT		0xAFCF
121 #define VEND1_RX_IPG_LENGTH		0xAFD0
122 #define VEND1_TX_IPG_LENGTH		0xAFD1
123 #define COUNTER_EN			BIT(15)
124 
125 #define VEND1_PTP_CONFIG		0x1102
126 #define EXT_TRG_EDGE			BIT(1)
127 
128 #define TJA1120_SYNC_TRIG_FILTER	0x1010
129 #define PTP_TRIG_RISE_TS		BIT(3)
130 #define PTP_TRIG_FALLING_TS		BIT(2)
131 
132 #define CLK_RATE_ADJ_LD			BIT(15)
133 #define CLK_RATE_ADJ_DIR		BIT(14)
134 
135 #define VEND1_RX_TS_INSRT_CTRL		0x114D
136 #define TJA1103_RX_TS_INSRT_MODE2	0x02
137 
138 #define TJA1120_RX_TS_INSRT_CTRL	0x9012
139 #define TJA1120_RX_TS_INSRT_EN		BIT(15)
140 #define TJA1120_TS_INSRT_MODE		BIT(4)
141 
142 #define VEND1_EGR_RING_DATA_0		0x114E
143 #define VEND1_EGR_RING_CTRL		0x1154
144 
145 #define RING_DATA_0_TS_VALID		BIT(15)
146 
147 #define RING_DONE			BIT(0)
148 
149 #define TS_SEC_MASK			GENMASK(1, 0)
150 
151 #define VEND1_PORT_FUNC_ENABLES		0x8048
152 #define PTP_ENABLE			BIT(3)
153 #define PHY_TEST_ENABLE			BIT(0)
154 
155 #define VEND1_PORT_PTP_CONTROL		0x9000
156 #define PORT_PTP_CONTROL_BYPASS		BIT(11)
157 
158 #define PTP_CLK_PERIOD_100BT1		15ULL
159 #define PTP_CLK_PERIOD_1000BT1		8ULL
160 
161 #define EVENT_MSG_FILT_ALL		0x0F
162 #define EVENT_MSG_FILT_NONE		0x00
163 
164 #define VEND1_GPIO_FUNC_CONFIG_BASE	0x2C40
165 #define GPIO_FUNC_EN			BIT(15)
166 #define GPIO_FUNC_PTP			BIT(6)
167 #define GPIO_SIGNAL_PTP_TRIGGER		0x01
168 #define GPIO_SIGNAL_PPS_OUT		0x12
169 #define GPIO_DISABLE			0
170 #define GPIO_PPS_OUT_CFG		(GPIO_FUNC_EN | GPIO_FUNC_PTP | \
171 	GPIO_SIGNAL_PPS_OUT)
172 #define GPIO_EXTTS_OUT_CFG		(GPIO_FUNC_EN | GPIO_FUNC_PTP | \
173 	GPIO_SIGNAL_PTP_TRIGGER)
174 
175 #define RGMII_PERIOD_PS			8000U
176 #define PS_PER_DEGREE			div_u64(RGMII_PERIOD_PS, 360)
177 #define MIN_ID_PS			1644U
178 #define MAX_ID_PS			2260U
179 #define DEFAULT_ID_PS			2000U
180 
181 #define PPM_TO_SUBNS_INC(ppb, ptp_clk_period) div_u64(GENMASK_ULL(31, 0) * \
182 	(ppb) * (ptp_clk_period), NSEC_PER_SEC)
183 
184 #define NXP_C45_SKB_CB(skb)	((struct nxp_c45_skb_cb *)(skb)->cb)
185 
186 struct nxp_c45_phy;
187 
188 struct nxp_c45_skb_cb {
189 	struct ptp_header *header;
190 	unsigned int type;
191 };
192 
193 #define NXP_C45_REG_FIELD(_reg, _devad, _offset, _size)	\
194 	((struct nxp_c45_reg_field) {			\
195 		.reg = _reg,				\
196 		.devad =  _devad,			\
197 		.offset = _offset,			\
198 		.size = _size,				\
199 	})
200 
201 struct nxp_c45_reg_field {
202 	u16 reg;
203 	u8 devad;
204 	u8 offset;
205 	u8 size;
206 };
207 
208 struct nxp_c45_hwts {
209 	u32	nsec;
210 	u32	sec;
211 	u8	domain_number;
212 	u16	sequence_id;
213 	u8	msg_type;
214 };
215 
216 struct nxp_c45_regmap {
217 	/* PTP config regs. */
218 	u16 vend1_ptp_clk_period;
219 	u16 vend1_event_msg_filt;
220 
221 	/* LTC bits and regs. */
222 	struct nxp_c45_reg_field ltc_read;
223 	struct nxp_c45_reg_field ltc_write;
224 	struct nxp_c45_reg_field ltc_lock_ctrl;
225 	u16 vend1_ltc_wr_nsec_0;
226 	u16 vend1_ltc_wr_nsec_1;
227 	u16 vend1_ltc_wr_sec_0;
228 	u16 vend1_ltc_wr_sec_1;
229 	u16 vend1_ltc_rd_nsec_0;
230 	u16 vend1_ltc_rd_nsec_1;
231 	u16 vend1_ltc_rd_sec_0;
232 	u16 vend1_ltc_rd_sec_1;
233 	u16 vend1_rate_adj_subns_0;
234 	u16 vend1_rate_adj_subns_1;
235 
236 	/* External trigger reg fields. */
237 	struct nxp_c45_reg_field irq_egr_ts_en;
238 	struct nxp_c45_reg_field irq_egr_ts_status;
239 	struct nxp_c45_reg_field domain_number;
240 	struct nxp_c45_reg_field msg_type;
241 	struct nxp_c45_reg_field sequence_id;
242 	struct nxp_c45_reg_field sec_1_0;
243 	struct nxp_c45_reg_field sec_4_2;
244 	struct nxp_c45_reg_field nsec_15_0;
245 	struct nxp_c45_reg_field nsec_29_16;
246 
247 	/* PPS and EXT Trigger bits and regs. */
248 	struct nxp_c45_reg_field pps_enable;
249 	struct nxp_c45_reg_field pps_polarity;
250 	u16 vend1_ext_trg_data_0;
251 	u16 vend1_ext_trg_data_1;
252 	u16 vend1_ext_trg_data_2;
253 	u16 vend1_ext_trg_data_3;
254 	u16 vend1_ext_trg_ctrl;
255 
256 	/* Cable test reg fields. */
257 	u16 cable_test;
258 	struct nxp_c45_reg_field cable_test_valid;
259 	struct nxp_c45_reg_field cable_test_result;
260 };
261 
262 struct nxp_c45_phy_stats {
263 	const char	*name;
264 	const struct nxp_c45_reg_field counter;
265 };
266 
267 struct nxp_c45_phy_data {
268 	const struct nxp_c45_regmap *regmap;
269 	const struct nxp_c45_phy_stats *stats;
270 	int n_stats;
271 	u8 ptp_clk_period;
272 	bool ext_ts_both_edges;
273 	bool ack_ptp_irq;
274 	void (*counters_enable)(struct phy_device *phydev);
275 	bool (*get_egressts)(struct nxp_c45_phy *priv,
276 			     struct nxp_c45_hwts *hwts);
277 	bool (*get_extts)(struct nxp_c45_phy *priv, struct timespec64 *extts);
278 	void (*ptp_init)(struct phy_device *phydev);
279 	void (*ptp_enable)(struct phy_device *phydev, bool enable);
280 	void (*nmi_handler)(struct phy_device *phydev,
281 			    irqreturn_t *irq_status);
282 };
283 
284 struct nxp_c45_phy {
285 	const struct nxp_c45_phy_data *phy_data;
286 	struct phy_device *phydev;
287 	struct mii_timestamper mii_ts;
288 	struct ptp_clock *ptp_clock;
289 	struct ptp_clock_info caps;
290 	struct sk_buff_head tx_queue;
291 	struct sk_buff_head rx_queue;
292 	/* used to access the PTP registers atomic */
293 	struct mutex ptp_lock;
294 	int hwts_tx;
295 	int hwts_rx;
296 	u32 tx_delay;
297 	u32 rx_delay;
298 	struct timespec64 extts_ts;
299 	int extts_index;
300 	bool extts;
301 };
302 
303 static const
304 struct nxp_c45_phy_data *nxp_c45_get_data(struct phy_device *phydev)
305 {
306 	return phydev->drv->driver_data;
307 }
308 
309 static const
310 struct nxp_c45_regmap *nxp_c45_get_regmap(struct phy_device *phydev)
311 {
312 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
313 
314 	return phy_data->regmap;
315 }
316 
317 static int nxp_c45_read_reg_field(struct phy_device *phydev,
318 				  const struct nxp_c45_reg_field *reg_field)
319 {
320 	u16 mask;
321 	int ret;
322 
323 	if (reg_field->size == 0) {
324 		phydev_err(phydev, "Trying to read a reg field of size 0.\n");
325 		return -EINVAL;
326 	}
327 
328 	ret = phy_read_mmd(phydev, reg_field->devad, reg_field->reg);
329 	if (ret < 0)
330 		return ret;
331 
332 	mask = reg_field->size == 1 ? BIT(reg_field->offset) :
333 		GENMASK(reg_field->offset + reg_field->size - 1,
334 			reg_field->offset);
335 	ret &= mask;
336 	ret >>= reg_field->offset;
337 
338 	return ret;
339 }
340 
341 static int nxp_c45_write_reg_field(struct phy_device *phydev,
342 				   const struct nxp_c45_reg_field *reg_field,
343 				   u16 val)
344 {
345 	u16 mask;
346 	u16 set;
347 
348 	if (reg_field->size == 0) {
349 		phydev_err(phydev, "Trying to write a reg field of size 0.\n");
350 		return -EINVAL;
351 	}
352 
353 	mask = reg_field->size == 1 ? BIT(reg_field->offset) :
354 		GENMASK(reg_field->offset + reg_field->size - 1,
355 			reg_field->offset);
356 	set = val << reg_field->offset;
357 
358 	return phy_modify_mmd_changed(phydev, reg_field->devad,
359 				      reg_field->reg, mask, set);
360 }
361 
362 static int nxp_c45_set_reg_field(struct phy_device *phydev,
363 				 const struct nxp_c45_reg_field *reg_field)
364 {
365 	if (reg_field->size != 1) {
366 		phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
367 		return -EINVAL;
368 	}
369 
370 	return nxp_c45_write_reg_field(phydev, reg_field, 1);
371 }
372 
373 static int nxp_c45_clear_reg_field(struct phy_device *phydev,
374 				   const struct nxp_c45_reg_field *reg_field)
375 {
376 	if (reg_field->size != 1) {
377 		phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
378 		return -EINVAL;
379 	}
380 
381 	return nxp_c45_write_reg_field(phydev, reg_field, 0);
382 }
383 
384 static bool nxp_c45_poll_txts(struct phy_device *phydev)
385 {
386 	return phydev->irq <= 0;
387 }
388 
389 static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
390 				   struct timespec64 *ts,
391 				   struct ptp_system_timestamp *sts)
392 {
393 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
394 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
395 
396 	nxp_c45_set_reg_field(priv->phydev, &regmap->ltc_read);
397 	ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
398 				   regmap->vend1_ltc_rd_nsec_0);
399 	ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
400 				    regmap->vend1_ltc_rd_nsec_1) << 16;
401 	ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
402 				  regmap->vend1_ltc_rd_sec_0);
403 	ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
404 				   regmap->vend1_ltc_rd_sec_1) << 16;
405 
406 	return 0;
407 }
408 
409 static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
410 				  struct timespec64 *ts,
411 				  struct ptp_system_timestamp *sts)
412 {
413 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
414 
415 	mutex_lock(&priv->ptp_lock);
416 	_nxp_c45_ptp_gettimex64(ptp, ts, sts);
417 	mutex_unlock(&priv->ptp_lock);
418 
419 	return 0;
420 }
421 
422 static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
423 				  const struct timespec64 *ts)
424 {
425 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
426 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
427 
428 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_0,
429 		      ts->tv_nsec);
430 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_1,
431 		      ts->tv_nsec >> 16);
432 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_0,
433 		      ts->tv_sec);
434 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_1,
435 		      ts->tv_sec >> 16);
436 	nxp_c45_set_reg_field(priv->phydev, &regmap->ltc_write);
437 
438 	return 0;
439 }
440 
441 static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
442 				 const struct timespec64 *ts)
443 {
444 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
445 
446 	mutex_lock(&priv->ptp_lock);
447 	_nxp_c45_ptp_settime64(ptp, ts);
448 	mutex_unlock(&priv->ptp_lock);
449 
450 	return 0;
451 }
452 
453 static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
454 {
455 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
456 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
457 	const struct nxp_c45_regmap *regmap = data->regmap;
458 	s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
459 	u64 subns_inc_val;
460 	bool inc;
461 
462 	mutex_lock(&priv->ptp_lock);
463 	inc = ppb >= 0;
464 	ppb = abs(ppb);
465 
466 	subns_inc_val = PPM_TO_SUBNS_INC(ppb, data->ptp_clk_period);
467 
468 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
469 		      regmap->vend1_rate_adj_subns_0,
470 		      subns_inc_val);
471 	subns_inc_val >>= 16;
472 	subns_inc_val |= CLK_RATE_ADJ_LD;
473 	if (inc)
474 		subns_inc_val |= CLK_RATE_ADJ_DIR;
475 
476 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
477 		      regmap->vend1_rate_adj_subns_1,
478 		      subns_inc_val);
479 	mutex_unlock(&priv->ptp_lock);
480 
481 	return 0;
482 }
483 
484 static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
485 {
486 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
487 	struct timespec64 now, then;
488 
489 	mutex_lock(&priv->ptp_lock);
490 	then = ns_to_timespec64(delta);
491 	_nxp_c45_ptp_gettimex64(ptp, &now, NULL);
492 	now = timespec64_add(now, then);
493 	_nxp_c45_ptp_settime64(ptp, &now);
494 	mutex_unlock(&priv->ptp_lock);
495 
496 	return 0;
497 }
498 
499 static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
500 				   struct nxp_c45_hwts *hwts)
501 {
502 	ts->tv_nsec = hwts->nsec;
503 	if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
504 		ts->tv_sec -= TS_SEC_MASK + 1;
505 	ts->tv_sec &= ~TS_SEC_MASK;
506 	ts->tv_sec |= hwts->sec & TS_SEC_MASK;
507 }
508 
509 static bool nxp_c45_match_ts(struct ptp_header *header,
510 			     struct nxp_c45_hwts *hwts,
511 			     unsigned int type)
512 {
513 	return ntohs(header->sequence_id) == hwts->sequence_id &&
514 	       ptp_get_msgtype(header, type) == hwts->msg_type &&
515 	       header->domain_number  == hwts->domain_number;
516 }
517 
518 static bool nxp_c45_get_extts(struct nxp_c45_phy *priv,
519 			      struct timespec64 *extts)
520 {
521 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
522 
523 	extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
524 				      regmap->vend1_ext_trg_data_0);
525 	extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
526 				       regmap->vend1_ext_trg_data_1) << 16;
527 	extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
528 				     regmap->vend1_ext_trg_data_2);
529 	extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
530 				      regmap->vend1_ext_trg_data_3) << 16;
531 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
532 		      regmap->vend1_ext_trg_ctrl, RING_DONE);
533 
534 	return true;
535 }
536 
537 static bool tja1120_extts_is_valid(struct phy_device *phydev)
538 {
539 	bool valid;
540 	int reg;
541 
542 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
543 			   TJA1120_VEND1_PTP_TRIG_DATA_S);
544 	valid = !!(reg & TJA1120_TS_VALID);
545 
546 	return valid;
547 }
548 
549 static bool tja1120_get_extts(struct nxp_c45_phy *priv,
550 			      struct timespec64 *extts)
551 {
552 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
553 	struct phy_device *phydev = priv->phydev;
554 	bool more_ts;
555 	bool valid;
556 	u16 reg;
557 
558 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
559 			   regmap->vend1_ext_trg_ctrl);
560 	more_ts = !!(reg & TJA1120_MORE_TS);
561 
562 	valid = tja1120_extts_is_valid(phydev);
563 	if (!valid) {
564 		if (!more_ts)
565 			goto tja1120_get_extts_out;
566 
567 		/* Bug workaround for TJA1120 engineering samples: move the new
568 		 * timestamp from the FIFO to the buffer.
569 		 */
570 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
571 			      regmap->vend1_ext_trg_ctrl, RING_DONE);
572 		valid = tja1120_extts_is_valid(phydev);
573 		if (!valid)
574 			goto tja1120_get_extts_out;
575 	}
576 
577 	nxp_c45_get_extts(priv, extts);
578 tja1120_get_extts_out:
579 	return valid;
580 }
581 
582 static void nxp_c45_read_egress_ts(struct nxp_c45_phy *priv,
583 				   struct nxp_c45_hwts *hwts)
584 {
585 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
586 	struct phy_device *phydev = priv->phydev;
587 
588 	hwts->domain_number =
589 		nxp_c45_read_reg_field(phydev, &regmap->domain_number);
590 	hwts->msg_type =
591 		nxp_c45_read_reg_field(phydev, &regmap->msg_type);
592 	hwts->sequence_id =
593 		nxp_c45_read_reg_field(phydev, &regmap->sequence_id);
594 	hwts->nsec =
595 		nxp_c45_read_reg_field(phydev, &regmap->nsec_15_0);
596 	hwts->nsec |=
597 		nxp_c45_read_reg_field(phydev, &regmap->nsec_29_16) << 16;
598 	hwts->sec = nxp_c45_read_reg_field(phydev, &regmap->sec_1_0);
599 	hwts->sec |= nxp_c45_read_reg_field(phydev, &regmap->sec_4_2) << 2;
600 }
601 
602 static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
603 			       struct nxp_c45_hwts *hwts)
604 {
605 	bool valid;
606 	u16 reg;
607 
608 	mutex_lock(&priv->ptp_lock);
609 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
610 		      RING_DONE);
611 	reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
612 	valid = !!(reg & RING_DATA_0_TS_VALID);
613 	if (!valid)
614 		goto nxp_c45_get_hwtxts_out;
615 
616 	nxp_c45_read_egress_ts(priv, hwts);
617 nxp_c45_get_hwtxts_out:
618 	mutex_unlock(&priv->ptp_lock);
619 	return valid;
620 }
621 
622 static bool tja1120_egress_ts_is_valid(struct phy_device *phydev)
623 {
624 	bool valid;
625 	u16 reg;
626 
627 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S);
628 	valid = !!(reg & TJA1120_TS_VALID);
629 
630 	return valid;
631 }
632 
633 static bool tja1120_get_hwtxts(struct nxp_c45_phy *priv,
634 			       struct nxp_c45_hwts *hwts)
635 {
636 	struct phy_device *phydev = priv->phydev;
637 	bool more_ts;
638 	bool valid;
639 	u16 reg;
640 
641 	mutex_lock(&priv->ptp_lock);
642 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_END);
643 	more_ts = !!(reg & TJA1120_MORE_TS);
644 	valid = tja1120_egress_ts_is_valid(phydev);
645 	if (!valid) {
646 		if (!more_ts)
647 			goto tja1120_get_hwtxts_out;
648 
649 		/* Bug workaround for TJA1120 engineering samples: move the
650 		 * new timestamp from the FIFO to the buffer.
651 		 */
652 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
653 			      TJA1120_EGRESS_TS_END, TJA1120_TS_VALID);
654 		valid = tja1120_egress_ts_is_valid(phydev);
655 		if (!valid)
656 			goto tja1120_get_hwtxts_out;
657 	}
658 	nxp_c45_read_egress_ts(priv, hwts);
659 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S,
660 			   TJA1120_TS_VALID);
661 tja1120_get_hwtxts_out:
662 	mutex_unlock(&priv->ptp_lock);
663 	return valid;
664 }
665 
666 static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
667 				 struct nxp_c45_hwts *txts)
668 {
669 	struct sk_buff *skb, *tmp, *skb_match = NULL;
670 	struct skb_shared_hwtstamps shhwtstamps;
671 	struct timespec64 ts;
672 	unsigned long flags;
673 	bool ts_match;
674 	s64 ts_ns;
675 
676 	spin_lock_irqsave(&priv->tx_queue.lock, flags);
677 	skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
678 		ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
679 					    NXP_C45_SKB_CB(skb)->type);
680 		if (!ts_match)
681 			continue;
682 		skb_match = skb;
683 		__skb_unlink(skb, &priv->tx_queue);
684 		break;
685 	}
686 	spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
687 
688 	if (skb_match) {
689 		nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
690 		nxp_c45_reconstruct_ts(&ts, txts);
691 		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
692 		ts_ns = timespec64_to_ns(&ts);
693 		shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
694 		skb_complete_tx_timestamp(skb_match, &shhwtstamps);
695 	} else {
696 		phydev_warn(priv->phydev,
697 			    "the tx timestamp doesn't match with any skb\n");
698 	}
699 }
700 
701 static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
702 {
703 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
704 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
705 	bool poll_txts = nxp_c45_poll_txts(priv->phydev);
706 	struct skb_shared_hwtstamps *shhwtstamps_rx;
707 	struct ptp_clock_event event;
708 	struct nxp_c45_hwts hwts;
709 	bool reschedule = false;
710 	struct timespec64 ts;
711 	struct sk_buff *skb;
712 	bool ts_valid;
713 	u32 ts_raw;
714 
715 	while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
716 		ts_valid = data->get_egressts(priv, &hwts);
717 		if (unlikely(!ts_valid)) {
718 			/* Still more skbs in the queue */
719 			reschedule = true;
720 			break;
721 		}
722 
723 		nxp_c45_process_txts(priv, &hwts);
724 	}
725 
726 	while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
727 		nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
728 		ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
729 		hwts.sec = ts_raw >> 30;
730 		hwts.nsec = ts_raw & GENMASK(29, 0);
731 		nxp_c45_reconstruct_ts(&ts, &hwts);
732 		shhwtstamps_rx = skb_hwtstamps(skb);
733 		shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
734 		NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
735 		netif_rx(skb);
736 	}
737 
738 	if (priv->extts) {
739 		ts_valid = data->get_extts(priv, &ts);
740 		if (ts_valid && timespec64_compare(&ts, &priv->extts_ts) != 0) {
741 			priv->extts_ts = ts;
742 			event.index = priv->extts_index;
743 			event.type = PTP_CLOCK_EXTTS;
744 			event.timestamp = ns_to_ktime(timespec64_to_ns(&ts));
745 			ptp_clock_event(priv->ptp_clock, &event);
746 		}
747 		reschedule = true;
748 	}
749 
750 	return reschedule ? 1 : -1;
751 }
752 
753 static void nxp_c45_gpio_config(struct nxp_c45_phy *priv,
754 				int pin, u16 pin_cfg)
755 {
756 	struct phy_device *phydev = priv->phydev;
757 
758 	phy_write_mmd(phydev, MDIO_MMD_VEND1,
759 		      VEND1_GPIO_FUNC_CONFIG_BASE + pin, pin_cfg);
760 }
761 
762 static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
763 				 struct ptp_perout_request *perout, int on)
764 {
765 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
766 	struct phy_device *phydev = priv->phydev;
767 	int pin;
768 
769 	if (perout->flags & ~PTP_PEROUT_PHASE)
770 		return -EOPNOTSUPP;
771 
772 	pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
773 	if (pin < 0)
774 		return pin;
775 
776 	if (!on) {
777 		nxp_c45_clear_reg_field(priv->phydev,
778 					&regmap->pps_enable);
779 		nxp_c45_clear_reg_field(priv->phydev,
780 					&regmap->pps_polarity);
781 
782 		nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
783 
784 		return 0;
785 	}
786 
787 	/* The PPS signal is fixed to 1 second and is always generated when the
788 	 * seconds counter is incremented. The start time is not configurable.
789 	 * If the clock is adjusted, the PPS signal is automatically readjusted.
790 	 */
791 	if (perout->period.sec != 1 || perout->period.nsec != 0) {
792 		phydev_warn(phydev, "The period can be set only to 1 second.");
793 		return -EINVAL;
794 	}
795 
796 	if (!(perout->flags & PTP_PEROUT_PHASE)) {
797 		if (perout->start.sec != 0 || perout->start.nsec != 0) {
798 			phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds.");
799 			return -EINVAL;
800 		}
801 	} else {
802 		if (perout->phase.nsec != 0 &&
803 		    perout->phase.nsec != (NSEC_PER_SEC >> 1)) {
804 			phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds.");
805 			return -EINVAL;
806 		}
807 
808 		if (perout->phase.nsec == 0)
809 			nxp_c45_clear_reg_field(priv->phydev,
810 						&regmap->pps_polarity);
811 		else
812 			nxp_c45_set_reg_field(priv->phydev,
813 					      &regmap->pps_polarity);
814 	}
815 
816 	nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG);
817 
818 	nxp_c45_set_reg_field(priv->phydev, &regmap->pps_enable);
819 
820 	return 0;
821 }
822 
823 static void nxp_c45_set_rising_or_falling(struct phy_device *phydev,
824 					  struct ptp_extts_request *extts)
825 {
826 	if (extts->flags & PTP_RISING_EDGE)
827 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
828 				   VEND1_PTP_CONFIG, EXT_TRG_EDGE);
829 
830 	if (extts->flags & PTP_FALLING_EDGE)
831 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
832 				 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
833 }
834 
835 static void nxp_c45_set_rising_and_falling(struct phy_device *phydev,
836 					   struct ptp_extts_request *extts)
837 {
838 	/* PTP_EXTTS_REQUEST may have only the PTP_ENABLE_FEATURE flag set. In
839 	 * this case external ts will be enabled on rising edge.
840 	 */
841 	if (extts->flags & PTP_RISING_EDGE ||
842 	    extts->flags == PTP_ENABLE_FEATURE)
843 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
844 				 TJA1120_SYNC_TRIG_FILTER,
845 				 PTP_TRIG_RISE_TS);
846 	else
847 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
848 				   TJA1120_SYNC_TRIG_FILTER,
849 				   PTP_TRIG_RISE_TS);
850 
851 	if (extts->flags & PTP_FALLING_EDGE)
852 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
853 				 TJA1120_SYNC_TRIG_FILTER,
854 				 PTP_TRIG_FALLING_TS);
855 	else
856 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
857 				   TJA1120_SYNC_TRIG_FILTER,
858 				   PTP_TRIG_FALLING_TS);
859 }
860 
861 static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
862 				struct ptp_extts_request *extts, int on)
863 {
864 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
865 	int pin;
866 
867 	if (extts->flags & ~(PTP_ENABLE_FEATURE |
868 			      PTP_RISING_EDGE |
869 			      PTP_FALLING_EDGE |
870 			      PTP_STRICT_FLAGS))
871 		return -EOPNOTSUPP;
872 
873 	/* Sampling on both edges is not supported */
874 	if ((extts->flags & PTP_RISING_EDGE) &&
875 	    (extts->flags & PTP_FALLING_EDGE) &&
876 	    !data->ext_ts_both_edges)
877 		return -EOPNOTSUPP;
878 
879 	pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index);
880 	if (pin < 0)
881 		return pin;
882 
883 	if (!on) {
884 		nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
885 		priv->extts = false;
886 
887 		return 0;
888 	}
889 
890 	if (data->ext_ts_both_edges)
891 		nxp_c45_set_rising_and_falling(priv->phydev, extts);
892 	else
893 		nxp_c45_set_rising_or_falling(priv->phydev, extts);
894 
895 	nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG);
896 	priv->extts = true;
897 	priv->extts_index = extts->index;
898 	ptp_schedule_worker(priv->ptp_clock, 0);
899 
900 	return 0;
901 }
902 
903 static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp,
904 			      struct ptp_clock_request *req, int on)
905 {
906 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
907 
908 	switch (req->type) {
909 	case PTP_CLK_REQ_EXTTS:
910 		return nxp_c45_extts_enable(priv, &req->extts, on);
911 	case PTP_CLK_REQ_PEROUT:
912 		return nxp_c45_perout_enable(priv, &req->perout, on);
913 	default:
914 		return -EOPNOTSUPP;
915 	}
916 }
917 
918 static struct ptp_pin_desc nxp_c45_ptp_pins[] = {
919 	{ "nxp_c45_gpio0", 0, PTP_PF_NONE},
920 	{ "nxp_c45_gpio1", 1, PTP_PF_NONE},
921 	{ "nxp_c45_gpio2", 2, PTP_PF_NONE},
922 	{ "nxp_c45_gpio3", 3, PTP_PF_NONE},
923 	{ "nxp_c45_gpio4", 4, PTP_PF_NONE},
924 	{ "nxp_c45_gpio5", 5, PTP_PF_NONE},
925 	{ "nxp_c45_gpio6", 6, PTP_PF_NONE},
926 	{ "nxp_c45_gpio7", 7, PTP_PF_NONE},
927 	{ "nxp_c45_gpio8", 8, PTP_PF_NONE},
928 	{ "nxp_c45_gpio9", 9, PTP_PF_NONE},
929 	{ "nxp_c45_gpio10", 10, PTP_PF_NONE},
930 	{ "nxp_c45_gpio11", 11, PTP_PF_NONE},
931 };
932 
933 static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
934 				  enum ptp_pin_function func, unsigned int chan)
935 {
936 	if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins))
937 		return -EINVAL;
938 
939 	switch (func) {
940 	case PTP_PF_NONE:
941 	case PTP_PF_PEROUT:
942 	case PTP_PF_EXTTS:
943 		break;
944 	default:
945 		return -EOPNOTSUPP;
946 	}
947 
948 	return 0;
949 }
950 
951 static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
952 {
953 	priv->caps = (struct ptp_clock_info) {
954 		.owner		= THIS_MODULE,
955 		.name		= "NXP C45 PHC",
956 		.max_adj	= 16666666,
957 		.adjfine	= nxp_c45_ptp_adjfine,
958 		.adjtime	= nxp_c45_ptp_adjtime,
959 		.gettimex64	= nxp_c45_ptp_gettimex64,
960 		.settime64	= nxp_c45_ptp_settime64,
961 		.enable		= nxp_c45_ptp_enable,
962 		.verify		= nxp_c45_ptp_verify_pin,
963 		.do_aux_work	= nxp_c45_do_aux_work,
964 		.pin_config	= nxp_c45_ptp_pins,
965 		.n_pins		= ARRAY_SIZE(nxp_c45_ptp_pins),
966 		.n_ext_ts	= 1,
967 		.n_per_out	= 1,
968 	};
969 
970 	priv->ptp_clock = ptp_clock_register(&priv->caps,
971 					     &priv->phydev->mdio.dev);
972 
973 	if (IS_ERR(priv->ptp_clock))
974 		return PTR_ERR(priv->ptp_clock);
975 
976 	if (!priv->ptp_clock)
977 		return -ENOMEM;
978 
979 	return 0;
980 }
981 
982 static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
983 			     struct sk_buff *skb, int type)
984 {
985 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
986 						mii_ts);
987 
988 	switch (priv->hwts_tx) {
989 	case HWTSTAMP_TX_ON:
990 		NXP_C45_SKB_CB(skb)->type = type;
991 		NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
992 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
993 		skb_queue_tail(&priv->tx_queue, skb);
994 		if (nxp_c45_poll_txts(priv->phydev))
995 			ptp_schedule_worker(priv->ptp_clock, 0);
996 		break;
997 	case HWTSTAMP_TX_OFF:
998 	default:
999 		kfree_skb(skb);
1000 		break;
1001 	}
1002 }
1003 
1004 static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
1005 			     struct sk_buff *skb, int type)
1006 {
1007 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1008 						mii_ts);
1009 	struct ptp_header *header = ptp_parse_header(skb, type);
1010 
1011 	if (!header)
1012 		return false;
1013 
1014 	if (!priv->hwts_rx)
1015 		return false;
1016 
1017 	NXP_C45_SKB_CB(skb)->header = header;
1018 	skb_queue_tail(&priv->rx_queue, skb);
1019 	ptp_schedule_worker(priv->ptp_clock, 0);
1020 
1021 	return true;
1022 }
1023 
1024 static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
1025 			    struct kernel_hwtstamp_config *cfg,
1026 			    struct netlink_ext_ack *extack)
1027 {
1028 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1029 						mii_ts);
1030 	struct phy_device *phydev = priv->phydev;
1031 	const struct nxp_c45_phy_data *data;
1032 
1033 	if (cfg->tx_type < 0 || cfg->tx_type > HWTSTAMP_TX_ON)
1034 		return -ERANGE;
1035 
1036 	data = nxp_c45_get_data(phydev);
1037 	priv->hwts_tx = cfg->tx_type;
1038 
1039 	switch (cfg->rx_filter) {
1040 	case HWTSTAMP_FILTER_NONE:
1041 		priv->hwts_rx = 0;
1042 		break;
1043 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1044 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1045 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1046 		priv->hwts_rx = 1;
1047 		cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1048 		break;
1049 	default:
1050 		return -ERANGE;
1051 	}
1052 
1053 	if (priv->hwts_rx || priv->hwts_tx) {
1054 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
1055 			      data->regmap->vend1_event_msg_filt,
1056 			      EVENT_MSG_FILT_ALL);
1057 		data->ptp_enable(phydev, true);
1058 	} else {
1059 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
1060 			      data->regmap->vend1_event_msg_filt,
1061 			      EVENT_MSG_FILT_NONE);
1062 		data->ptp_enable(phydev, false);
1063 	}
1064 
1065 	if (nxp_c45_poll_txts(priv->phydev))
1066 		goto nxp_c45_no_ptp_irq;
1067 
1068 	if (priv->hwts_tx)
1069 		nxp_c45_set_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1070 	else
1071 		nxp_c45_clear_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1072 
1073 nxp_c45_no_ptp_irq:
1074 	return 0;
1075 }
1076 
1077 static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
1078 			   struct ethtool_ts_info *ts_info)
1079 {
1080 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1081 						mii_ts);
1082 
1083 	ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1084 			SOF_TIMESTAMPING_RX_HARDWARE |
1085 			SOF_TIMESTAMPING_RAW_HARDWARE;
1086 	ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
1087 	ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1088 	ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1089 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
1090 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
1091 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
1092 
1093 	return 0;
1094 }
1095 
1096 static const struct nxp_c45_phy_stats common_hw_stats[] = {
1097 	{ "phy_link_status_drop_cnt",
1098 		NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 8, 6), },
1099 	{ "phy_link_availability_drop_cnt",
1100 		NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 0, 6), },
1101 	{ "phy_link_loss_cnt",
1102 		NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 10, 6), },
1103 	{ "phy_link_failure_cnt",
1104 		NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 0, 10), },
1105 	{ "phy_symbol_error_cnt",
1106 		NXP_C45_REG_FIELD(0x8350, MDIO_MMD_VEND1, 0, 16) },
1107 };
1108 
1109 static const struct nxp_c45_phy_stats tja1103_hw_stats[] = {
1110 	{ "rx_preamble_count",
1111 		NXP_C45_REG_FIELD(0xAFCE, MDIO_MMD_VEND1, 0, 6), },
1112 	{ "tx_preamble_count",
1113 		NXP_C45_REG_FIELD(0xAFCF, MDIO_MMD_VEND1, 0, 6), },
1114 	{ "rx_ipg_length",
1115 		NXP_C45_REG_FIELD(0xAFD0, MDIO_MMD_VEND1, 0, 9), },
1116 	{ "tx_ipg_length",
1117 		NXP_C45_REG_FIELD(0xAFD1, MDIO_MMD_VEND1, 0, 9), },
1118 };
1119 
1120 static const struct nxp_c45_phy_stats tja1120_hw_stats[] = {
1121 	{ "phy_symbol_error_cnt_ext",
1122 		NXP_C45_REG_FIELD(0x8351, MDIO_MMD_VEND1, 0, 14) },
1123 	{ "tx_frames_xtd",
1124 		NXP_C45_REG_FIELD(0xACA1, MDIO_MMD_VEND1, 0, 8), },
1125 	{ "tx_frames",
1126 		NXP_C45_REG_FIELD(0xACA0, MDIO_MMD_VEND1, 0, 16), },
1127 	{ "rx_frames_xtd",
1128 		NXP_C45_REG_FIELD(0xACA3, MDIO_MMD_VEND1, 0, 8), },
1129 	{ "rx_frames",
1130 		NXP_C45_REG_FIELD(0xACA2, MDIO_MMD_VEND1, 0, 16), },
1131 	{ "tx_lost_frames_xtd",
1132 		NXP_C45_REG_FIELD(0xACA5, MDIO_MMD_VEND1, 0, 8), },
1133 	{ "tx_lost_frames",
1134 		NXP_C45_REG_FIELD(0xACA4, MDIO_MMD_VEND1, 0, 16), },
1135 	{ "rx_lost_frames_xtd",
1136 		NXP_C45_REG_FIELD(0xACA7, MDIO_MMD_VEND1, 0, 8), },
1137 	{ "rx_lost_frames",
1138 		NXP_C45_REG_FIELD(0xACA6, MDIO_MMD_VEND1, 0, 16), },
1139 };
1140 
1141 static int nxp_c45_get_sset_count(struct phy_device *phydev)
1142 {
1143 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1144 
1145 	return ARRAY_SIZE(common_hw_stats) + (phy_data ? phy_data->n_stats : 0);
1146 }
1147 
1148 static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
1149 {
1150 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1151 	size_t count = nxp_c45_get_sset_count(phydev);
1152 	size_t idx;
1153 	size_t i;
1154 
1155 	for (i = 0; i < count; i++) {
1156 		if (i < ARRAY_SIZE(common_hw_stats)) {
1157 			strscpy(data + i * ETH_GSTRING_LEN,
1158 				common_hw_stats[i].name, ETH_GSTRING_LEN);
1159 			continue;
1160 		}
1161 		idx = i - ARRAY_SIZE(common_hw_stats);
1162 		strscpy(data + i * ETH_GSTRING_LEN,
1163 			phy_data->stats[idx].name, ETH_GSTRING_LEN);
1164 	}
1165 }
1166 
1167 static void nxp_c45_get_stats(struct phy_device *phydev,
1168 			      struct ethtool_stats *stats, u64 *data)
1169 {
1170 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1171 	size_t count = nxp_c45_get_sset_count(phydev);
1172 	const struct nxp_c45_reg_field *reg_field;
1173 	size_t idx;
1174 	size_t i;
1175 	int ret;
1176 
1177 	for (i = 0; i < count; i++) {
1178 		if (i < ARRAY_SIZE(common_hw_stats)) {
1179 			reg_field = &common_hw_stats[i].counter;
1180 		} else {
1181 			idx = i - ARRAY_SIZE(common_hw_stats);
1182 			reg_field = &phy_data->stats[idx].counter;
1183 		}
1184 
1185 		ret = nxp_c45_read_reg_field(phydev, reg_field);
1186 		if (ret < 0)
1187 			data[i] = U64_MAX;
1188 		else
1189 			data[i] = ret;
1190 	}
1191 }
1192 
1193 static int nxp_c45_config_enable(struct phy_device *phydev)
1194 {
1195 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1196 		      DEVICE_CONTROL_CONFIG_GLOBAL_EN |
1197 		      DEVICE_CONTROL_CONFIG_ALL_EN);
1198 	usleep_range(400, 450);
1199 
1200 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
1201 		      PORT_CONTROL_EN);
1202 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1203 		      PHY_CONFIG_EN);
1204 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
1205 		      PORT_INFRA_CONTROL_EN);
1206 
1207 	return 0;
1208 }
1209 
1210 static int nxp_c45_start_op(struct phy_device *phydev)
1211 {
1212 	return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1213 				PHY_START_OP);
1214 }
1215 
1216 static int nxp_c45_config_intr(struct phy_device *phydev)
1217 {
1218 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
1219 		return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1220 					VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1221 	else
1222 		return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1223 					  VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1224 }
1225 
1226 static int tja1103_config_intr(struct phy_device *phydev)
1227 {
1228 	int ret;
1229 
1230 	/* We can't disable the FUSA IRQ for TJA1103, but we can clean it up. */
1231 	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_ALWAYS_ACCESSIBLE,
1232 			    FUSA_PASS);
1233 	if (ret)
1234 		return ret;
1235 
1236 	return nxp_c45_config_intr(phydev);
1237 }
1238 
1239 static int tja1120_config_intr(struct phy_device *phydev)
1240 {
1241 	int ret;
1242 
1243 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
1244 		ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1245 				       TJA1120_GLOBAL_INFRA_IRQ_EN,
1246 				       TJA1120_DEV_BOOT_DONE);
1247 	else
1248 		ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1249 					 TJA1120_GLOBAL_INFRA_IRQ_EN,
1250 					 TJA1120_DEV_BOOT_DONE);
1251 	if (ret)
1252 		return ret;
1253 
1254 	return nxp_c45_config_intr(phydev);
1255 }
1256 
1257 static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
1258 {
1259 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1260 	struct nxp_c45_phy *priv = phydev->priv;
1261 	irqreturn_t ret = IRQ_NONE;
1262 	struct nxp_c45_hwts hwts;
1263 	int irq;
1264 
1265 	irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
1266 	if (irq & PHY_IRQ_LINK_EVENT) {
1267 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
1268 			      PHY_IRQ_LINK_EVENT);
1269 		phy_trigger_machine(phydev);
1270 		ret = IRQ_HANDLED;
1271 	}
1272 
1273 	irq = nxp_c45_read_reg_field(phydev, &data->regmap->irq_egr_ts_status);
1274 	if (irq) {
1275 		/* If ack_ptp_irq is false, the IRQ bit is self-clear and will
1276 		 * be cleared when the EGR TS FIFO is empty. Otherwise, the
1277 		 * IRQ bit should be cleared before reading the timestamp,
1278 		 */
1279 		if (data->ack_ptp_irq)
1280 			phy_write_mmd(phydev, MDIO_MMD_VEND1,
1281 				      VEND1_PTP_IRQ_ACK, EGR_TS_IRQ);
1282 		while (data->get_egressts(priv, &hwts))
1283 			nxp_c45_process_txts(priv, &hwts);
1284 
1285 		ret = IRQ_HANDLED;
1286 	}
1287 
1288 	data->nmi_handler(phydev, &ret);
1289 
1290 	return ret;
1291 }
1292 
1293 static int nxp_c45_soft_reset(struct phy_device *phydev)
1294 {
1295 	int ret;
1296 
1297 	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1298 			    DEVICE_CONTROL_RESET);
1299 	if (ret)
1300 		return ret;
1301 
1302 	return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
1303 					 VEND1_DEVICE_CONTROL, ret,
1304 					 !(ret & DEVICE_CONTROL_RESET), 20000,
1305 					 240000, false);
1306 }
1307 
1308 static int nxp_c45_cable_test_start(struct phy_device *phydev)
1309 {
1310 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1311 
1312 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1313 			 VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1314 	return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1315 				CABLE_TEST_ENABLE | CABLE_TEST_START);
1316 }
1317 
1318 static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
1319 					 bool *finished)
1320 {
1321 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1322 	int ret;
1323 	u8 cable_test_result;
1324 
1325 	ret = nxp_c45_read_reg_field(phydev, &regmap->cable_test_valid);
1326 	if (!ret) {
1327 		*finished = false;
1328 		return 0;
1329 	}
1330 
1331 	*finished = true;
1332 	cable_test_result = nxp_c45_read_reg_field(phydev,
1333 						   &regmap->cable_test_result);
1334 
1335 	switch (cable_test_result) {
1336 	case CABLE_TEST_OK:
1337 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1338 					ETHTOOL_A_CABLE_RESULT_CODE_OK);
1339 		break;
1340 	case CABLE_TEST_SHORTED:
1341 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1342 					ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
1343 		break;
1344 	case CABLE_TEST_OPEN:
1345 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1346 					ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
1347 		break;
1348 	default:
1349 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1350 					ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
1351 	}
1352 
1353 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1354 			   CABLE_TEST_ENABLE);
1355 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1356 			   VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1357 
1358 	return nxp_c45_start_op(phydev);
1359 }
1360 
1361 static int nxp_c45_get_sqi(struct phy_device *phydev)
1362 {
1363 	int reg;
1364 
1365 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
1366 	if (!(reg & SQI_VALID))
1367 		return -EINVAL;
1368 
1369 	reg &= SQI_MASK;
1370 
1371 	return reg;
1372 }
1373 
1374 static void tja1120_link_change_notify(struct phy_device *phydev)
1375 {
1376 	/* Bug workaround for TJA1120 enegineering samples: fix egress
1377 	 * timestamps lost after link recovery.
1378 	 */
1379 	if (phydev->state == PHY_NOLINK) {
1380 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1381 				 TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1382 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1383 				   TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1384 	}
1385 }
1386 
1387 static int nxp_c45_get_sqi_max(struct phy_device *phydev)
1388 {
1389 	return MAX_SQI;
1390 }
1391 
1392 static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
1393 {
1394 	if (delay < MIN_ID_PS) {
1395 		phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
1396 		return -EINVAL;
1397 	}
1398 
1399 	if (delay > MAX_ID_PS) {
1400 		phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
1401 		return -EINVAL;
1402 	}
1403 
1404 	return 0;
1405 }
1406 
1407 static void nxp_c45_counters_enable(struct phy_device *phydev)
1408 {
1409 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1410 
1411 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
1412 			 COUNTER_EN);
1413 
1414 	data->counters_enable(phydev);
1415 }
1416 
1417 static void nxp_c45_ptp_init(struct phy_device *phydev)
1418 {
1419 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1420 
1421 	phy_write_mmd(phydev, MDIO_MMD_VEND1,
1422 		      data->regmap->vend1_ptp_clk_period,
1423 		      data->ptp_clk_period);
1424 	nxp_c45_clear_reg_field(phydev, &data->regmap->ltc_lock_ctrl);
1425 
1426 	data->ptp_init(phydev);
1427 }
1428 
1429 static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
1430 {
1431 	/* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
1432 	 * To avoid floating point operations we'll multiply by 10
1433 	 * and get 1 decimal point precision.
1434 	 */
1435 	phase_offset_raw *= 10;
1436 	phase_offset_raw -= 738;
1437 	return div_u64(phase_offset_raw, 9);
1438 }
1439 
1440 static void nxp_c45_disable_delays(struct phy_device *phydev)
1441 {
1442 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
1443 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
1444 }
1445 
1446 static void nxp_c45_set_delays(struct phy_device *phydev)
1447 {
1448 	struct nxp_c45_phy *priv = phydev->priv;
1449 	u64 tx_delay = priv->tx_delay;
1450 	u64 rx_delay = priv->rx_delay;
1451 	u64 degree;
1452 
1453 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1454 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1455 		degree = div_u64(tx_delay, PS_PER_DEGREE);
1456 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1457 			      ID_ENABLE | nxp_c45_get_phase_shift(degree));
1458 	} else {
1459 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1460 				   ID_ENABLE);
1461 	}
1462 
1463 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1464 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1465 		degree = div_u64(rx_delay, PS_PER_DEGREE);
1466 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1467 			      ID_ENABLE | nxp_c45_get_phase_shift(degree));
1468 	} else {
1469 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1470 				   ID_ENABLE);
1471 	}
1472 }
1473 
1474 static int nxp_c45_get_delays(struct phy_device *phydev)
1475 {
1476 	struct nxp_c45_phy *priv = phydev->priv;
1477 	int ret;
1478 
1479 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1480 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1481 		ret = device_property_read_u32(&phydev->mdio.dev,
1482 					       "tx-internal-delay-ps",
1483 					       &priv->tx_delay);
1484 		if (ret)
1485 			priv->tx_delay = DEFAULT_ID_PS;
1486 
1487 		ret = nxp_c45_check_delay(phydev, priv->tx_delay);
1488 		if (ret) {
1489 			phydev_err(phydev,
1490 				   "tx-internal-delay-ps invalid value\n");
1491 			return ret;
1492 		}
1493 	}
1494 
1495 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1496 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1497 		ret = device_property_read_u32(&phydev->mdio.dev,
1498 					       "rx-internal-delay-ps",
1499 					       &priv->rx_delay);
1500 		if (ret)
1501 			priv->rx_delay = DEFAULT_ID_PS;
1502 
1503 		ret = nxp_c45_check_delay(phydev, priv->rx_delay);
1504 		if (ret) {
1505 			phydev_err(phydev,
1506 				   "rx-internal-delay-ps invalid value\n");
1507 			return ret;
1508 		}
1509 	}
1510 
1511 	return 0;
1512 }
1513 
1514 static int nxp_c45_set_phy_mode(struct phy_device *phydev)
1515 {
1516 	int ret;
1517 
1518 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
1519 	phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
1520 
1521 	switch (phydev->interface) {
1522 	case PHY_INTERFACE_MODE_RGMII:
1523 		if (!(ret & RGMII_ABILITY)) {
1524 			phydev_err(phydev, "rgmii mode not supported\n");
1525 			return -EINVAL;
1526 		}
1527 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1528 			      MII_BASIC_CONFIG_RGMII);
1529 		nxp_c45_disable_delays(phydev);
1530 		break;
1531 	case PHY_INTERFACE_MODE_RGMII_ID:
1532 	case PHY_INTERFACE_MODE_RGMII_TXID:
1533 	case PHY_INTERFACE_MODE_RGMII_RXID:
1534 		if (!(ret & RGMII_ID_ABILITY)) {
1535 			phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
1536 			return -EINVAL;
1537 		}
1538 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1539 			      MII_BASIC_CONFIG_RGMII);
1540 		ret = nxp_c45_get_delays(phydev);
1541 		if (ret)
1542 			return ret;
1543 
1544 		nxp_c45_set_delays(phydev);
1545 		break;
1546 	case PHY_INTERFACE_MODE_MII:
1547 		if (!(ret & MII_ABILITY)) {
1548 			phydev_err(phydev, "mii mode not supported\n");
1549 			return -EINVAL;
1550 		}
1551 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1552 			      MII_BASIC_CONFIG_MII);
1553 		break;
1554 	case PHY_INTERFACE_MODE_REVMII:
1555 		if (!(ret & REVMII_ABILITY)) {
1556 			phydev_err(phydev, "rev-mii mode not supported\n");
1557 			return -EINVAL;
1558 		}
1559 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1560 			      MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
1561 		break;
1562 	case PHY_INTERFACE_MODE_RMII:
1563 		if (!(ret & RMII_ABILITY)) {
1564 			phydev_err(phydev, "rmii mode not supported\n");
1565 			return -EINVAL;
1566 		}
1567 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1568 			      MII_BASIC_CONFIG_RMII);
1569 		break;
1570 	case PHY_INTERFACE_MODE_SGMII:
1571 		if (!(ret & SGMII_ABILITY)) {
1572 			phydev_err(phydev, "sgmii mode not supported\n");
1573 			return -EINVAL;
1574 		}
1575 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1576 			      MII_BASIC_CONFIG_SGMII);
1577 		break;
1578 	case PHY_INTERFACE_MODE_INTERNAL:
1579 		break;
1580 	default:
1581 		return -EINVAL;
1582 	}
1583 
1584 	return 0;
1585 }
1586 
1587 static int nxp_c45_config_init(struct phy_device *phydev)
1588 {
1589 	int ret;
1590 
1591 	ret = nxp_c45_config_enable(phydev);
1592 	if (ret) {
1593 		phydev_err(phydev, "Failed to enable config\n");
1594 		return ret;
1595 	}
1596 
1597 	/* Bug workaround for SJA1110 rev B: enable write access
1598 	 * to MDIO_MMD_PMAPMD
1599 	 */
1600 	phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
1601 	phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
1602 
1603 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
1604 			 PHY_CONFIG_AUTO);
1605 
1606 	ret = nxp_c45_set_phy_mode(phydev);
1607 	if (ret)
1608 		return ret;
1609 
1610 	phydev->autoneg = AUTONEG_DISABLE;
1611 
1612 	nxp_c45_counters_enable(phydev);
1613 	nxp_c45_ptp_init(phydev);
1614 
1615 	return nxp_c45_start_op(phydev);
1616 }
1617 
1618 static int nxp_c45_get_features(struct phy_device *phydev)
1619 {
1620 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported);
1621 	linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, phydev->supported);
1622 
1623 	return genphy_c45_pma_read_abilities(phydev);
1624 }
1625 
1626 static int nxp_c45_probe(struct phy_device *phydev)
1627 {
1628 	struct nxp_c45_phy *priv;
1629 	int ptp_ability;
1630 	int ret = 0;
1631 
1632 	priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
1633 	if (!priv)
1634 		return -ENOMEM;
1635 
1636 	skb_queue_head_init(&priv->tx_queue);
1637 	skb_queue_head_init(&priv->rx_queue);
1638 
1639 	priv->phydev = phydev;
1640 
1641 	phydev->priv = priv;
1642 
1643 	mutex_init(&priv->ptp_lock);
1644 
1645 	ptp_ability = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1646 				   VEND1_PORT_ABILITIES);
1647 	ptp_ability = !!(ptp_ability & PTP_ABILITY);
1648 	if (!ptp_ability) {
1649 		phydev_dbg(phydev, "the phy does not support PTP");
1650 		goto no_ptp_support;
1651 	}
1652 
1653 	if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
1654 	    IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
1655 		priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
1656 		priv->mii_ts.txtstamp = nxp_c45_txtstamp;
1657 		priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
1658 		priv->mii_ts.ts_info = nxp_c45_ts_info;
1659 		phydev->mii_ts = &priv->mii_ts;
1660 		ret = nxp_c45_init_ptp_clock(priv);
1661 	} else {
1662 		phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
1663 	}
1664 
1665 no_ptp_support:
1666 
1667 	return ret;
1668 }
1669 
1670 static void nxp_c45_remove(struct phy_device *phydev)
1671 {
1672 	struct nxp_c45_phy *priv = phydev->priv;
1673 
1674 	if (priv->ptp_clock)
1675 		ptp_clock_unregister(priv->ptp_clock);
1676 
1677 	skb_queue_purge(&priv->tx_queue);
1678 	skb_queue_purge(&priv->rx_queue);
1679 }
1680 
1681 static void tja1103_counters_enable(struct phy_device *phydev)
1682 {
1683 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
1684 			 COUNTER_EN);
1685 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
1686 			 COUNTER_EN);
1687 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
1688 			 COUNTER_EN);
1689 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
1690 			 COUNTER_EN);
1691 }
1692 
1693 static void tja1103_ptp_init(struct phy_device *phydev)
1694 {
1695 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
1696 		      TJA1103_RX_TS_INSRT_MODE2);
1697 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
1698 			 PTP_ENABLE);
1699 }
1700 
1701 static void tja1103_ptp_enable(struct phy_device *phydev, bool enable)
1702 {
1703 	if (enable)
1704 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1705 				   VEND1_PORT_PTP_CONTROL,
1706 				   PORT_PTP_CONTROL_BYPASS);
1707 	else
1708 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1709 				 VEND1_PORT_PTP_CONTROL,
1710 				 PORT_PTP_CONTROL_BYPASS);
1711 }
1712 
1713 static void tja1103_nmi_handler(struct phy_device *phydev,
1714 				irqreturn_t *irq_status)
1715 {
1716 	int ret;
1717 
1718 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1719 			   VEND1_ALWAYS_ACCESSIBLE);
1720 	if (ret & FUSA_PASS) {
1721 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
1722 			      VEND1_ALWAYS_ACCESSIBLE,
1723 			      FUSA_PASS);
1724 		*irq_status = IRQ_HANDLED;
1725 	}
1726 }
1727 
1728 static const struct nxp_c45_regmap tja1103_regmap = {
1729 	.vend1_ptp_clk_period	= 0x1104,
1730 	.vend1_event_msg_filt	= 0x1148,
1731 	.pps_enable		=
1732 		NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 3, 1),
1733 	.pps_polarity		=
1734 		NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 2, 1),
1735 	.ltc_lock_ctrl		=
1736 		NXP_C45_REG_FIELD(0x1115, MDIO_MMD_VEND1, 0, 1),
1737 	.ltc_read		=
1738 		NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 2, 1),
1739 	.ltc_write		=
1740 		NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 0, 1),
1741 	.vend1_ltc_wr_nsec_0	= 0x1106,
1742 	.vend1_ltc_wr_nsec_1	= 0x1107,
1743 	.vend1_ltc_wr_sec_0	= 0x1108,
1744 	.vend1_ltc_wr_sec_1	= 0x1109,
1745 	.vend1_ltc_rd_nsec_0	= 0x110A,
1746 	.vend1_ltc_rd_nsec_1	= 0x110B,
1747 	.vend1_ltc_rd_sec_0	= 0x110C,
1748 	.vend1_ltc_rd_sec_1	= 0x110D,
1749 	.vend1_rate_adj_subns_0	= 0x110F,
1750 	.vend1_rate_adj_subns_1	= 0x1110,
1751 	.irq_egr_ts_en		=
1752 		NXP_C45_REG_FIELD(0x1131, MDIO_MMD_VEND1, 0, 1),
1753 	.irq_egr_ts_status	=
1754 		NXP_C45_REG_FIELD(0x1132, MDIO_MMD_VEND1, 0, 1),
1755 	.domain_number		=
1756 		NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 0, 8),
1757 	.msg_type		=
1758 		NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 8, 4),
1759 	.sequence_id		=
1760 		NXP_C45_REG_FIELD(0x114F, MDIO_MMD_VEND1, 0, 16),
1761 	.sec_1_0		=
1762 		NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 14, 2),
1763 	.sec_4_2		=
1764 		NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 12, 3),
1765 	.nsec_15_0		=
1766 		NXP_C45_REG_FIELD(0x1150, MDIO_MMD_VEND1, 0, 16),
1767 	.nsec_29_16		=
1768 		NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 0, 14),
1769 	.vend1_ext_trg_data_0	= 0x1121,
1770 	.vend1_ext_trg_data_1	= 0x1122,
1771 	.vend1_ext_trg_data_2	= 0x1123,
1772 	.vend1_ext_trg_data_3	= 0x1124,
1773 	.vend1_ext_trg_ctrl	= 0x1126,
1774 	.cable_test		= 0x8330,
1775 	.cable_test_valid	=
1776 		NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 13, 1),
1777 	.cable_test_result	=
1778 		NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 0, 3),
1779 };
1780 
1781 static const struct nxp_c45_phy_data tja1103_phy_data = {
1782 	.regmap = &tja1103_regmap,
1783 	.stats = tja1103_hw_stats,
1784 	.n_stats = ARRAY_SIZE(tja1103_hw_stats),
1785 	.ptp_clk_period = PTP_CLK_PERIOD_100BT1,
1786 	.ext_ts_both_edges = false,
1787 	.ack_ptp_irq = false,
1788 	.counters_enable = tja1103_counters_enable,
1789 	.get_egressts = nxp_c45_get_hwtxts,
1790 	.get_extts = nxp_c45_get_extts,
1791 	.ptp_init = tja1103_ptp_init,
1792 	.ptp_enable = tja1103_ptp_enable,
1793 	.nmi_handler = tja1103_nmi_handler,
1794 };
1795 
1796 static void tja1120_counters_enable(struct phy_device *phydev)
1797 {
1798 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_SYMBOL_ERROR_CNT_XTD,
1799 			 EXTENDED_CNT_EN);
1800 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_STATUS,
1801 			 MONITOR_RESET);
1802 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_CONFIG,
1803 			 ALL_FRAMES_CNT_EN | LOST_FRAMES_CNT_EN);
1804 }
1805 
1806 static void tja1120_ptp_init(struct phy_device *phydev)
1807 {
1808 	phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_RX_TS_INSRT_CTRL,
1809 		      TJA1120_RX_TS_INSRT_EN | TJA1120_TS_INSRT_MODE);
1810 	phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_VEND1_EXT_TS_MODE,
1811 		      TJA1120_TS_INSRT_MODE);
1812 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONFIG,
1813 			 PTP_ENABLE);
1814 }
1815 
1816 static void tja1120_ptp_enable(struct phy_device *phydev, bool enable)
1817 {
1818 	if (enable)
1819 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1820 				 VEND1_PORT_FUNC_ENABLES,
1821 				 PTP_ENABLE);
1822 	else
1823 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1824 				   VEND1_PORT_FUNC_ENABLES,
1825 				   PTP_ENABLE);
1826 }
1827 
1828 static void tja1120_nmi_handler(struct phy_device *phydev,
1829 				irqreturn_t *irq_status)
1830 {
1831 	int ret;
1832 
1833 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1834 			   TJA1120_GLOBAL_INFRA_IRQ_STATUS);
1835 	if (ret & TJA1120_DEV_BOOT_DONE) {
1836 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
1837 			      TJA1120_GLOBAL_INFRA_IRQ_ACK,
1838 			      TJA1120_DEV_BOOT_DONE);
1839 		*irq_status = IRQ_HANDLED;
1840 	}
1841 }
1842 
1843 static const struct nxp_c45_regmap tja1120_regmap = {
1844 	.vend1_ptp_clk_period	= 0x1020,
1845 	.vend1_event_msg_filt	= 0x9010,
1846 	.pps_enable		=
1847 		NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 4, 1),
1848 	.pps_polarity		=
1849 		NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 5, 1),
1850 	.ltc_lock_ctrl		=
1851 		NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 2, 1),
1852 	.ltc_read		=
1853 		NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 1, 1),
1854 	.ltc_write		=
1855 		NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 2, 1),
1856 	.vend1_ltc_wr_nsec_0	= 0x1040,
1857 	.vend1_ltc_wr_nsec_1	= 0x1041,
1858 	.vend1_ltc_wr_sec_0	= 0x1042,
1859 	.vend1_ltc_wr_sec_1	= 0x1043,
1860 	.vend1_ltc_rd_nsec_0	= 0x1048,
1861 	.vend1_ltc_rd_nsec_1	= 0x1049,
1862 	.vend1_ltc_rd_sec_0	= 0x104A,
1863 	.vend1_ltc_rd_sec_1	= 0x104B,
1864 	.vend1_rate_adj_subns_0	= 0x1030,
1865 	.vend1_rate_adj_subns_1	= 0x1031,
1866 	.irq_egr_ts_en		=
1867 		NXP_C45_REG_FIELD(0x900A, MDIO_MMD_VEND1, 1, 1),
1868 	.irq_egr_ts_status	=
1869 		NXP_C45_REG_FIELD(0x900C, MDIO_MMD_VEND1, 1, 1),
1870 	.domain_number		=
1871 		NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 8, 8),
1872 	.msg_type		=
1873 		NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 4, 4),
1874 	.sequence_id		=
1875 		NXP_C45_REG_FIELD(0x9062, MDIO_MMD_VEND1, 0, 16),
1876 	.sec_1_0		=
1877 		NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 0, 2),
1878 	.sec_4_2		=
1879 		NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 2, 3),
1880 	.nsec_15_0		=
1881 		NXP_C45_REG_FIELD(0x9063, MDIO_MMD_VEND1, 0, 16),
1882 	.nsec_29_16		=
1883 		NXP_C45_REG_FIELD(0x9064, MDIO_MMD_VEND1, 0, 14),
1884 	.vend1_ext_trg_data_0	= 0x1071,
1885 	.vend1_ext_trg_data_1	= 0x1072,
1886 	.vend1_ext_trg_data_2	= 0x1073,
1887 	.vend1_ext_trg_data_3	= 0x1074,
1888 	.vend1_ext_trg_ctrl	= 0x1075,
1889 	.cable_test		= 0x8360,
1890 	.cable_test_valid	=
1891 		NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 15, 1),
1892 	.cable_test_result	=
1893 		NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 0, 3),
1894 };
1895 
1896 static const struct nxp_c45_phy_data tja1120_phy_data = {
1897 	.regmap = &tja1120_regmap,
1898 	.stats = tja1120_hw_stats,
1899 	.n_stats = ARRAY_SIZE(tja1120_hw_stats),
1900 	.ptp_clk_period = PTP_CLK_PERIOD_1000BT1,
1901 	.ext_ts_both_edges = true,
1902 	.ack_ptp_irq = true,
1903 	.counters_enable = tja1120_counters_enable,
1904 	.get_egressts = tja1120_get_hwtxts,
1905 	.get_extts = tja1120_get_extts,
1906 	.ptp_init = tja1120_ptp_init,
1907 	.ptp_enable = tja1120_ptp_enable,
1908 	.nmi_handler = tja1120_nmi_handler,
1909 };
1910 
1911 static struct phy_driver nxp_c45_driver[] = {
1912 	{
1913 		PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
1914 		.name			= "NXP C45 TJA1103",
1915 		.get_features		= nxp_c45_get_features,
1916 		.driver_data		= &tja1103_phy_data,
1917 		.probe			= nxp_c45_probe,
1918 		.soft_reset		= nxp_c45_soft_reset,
1919 		.config_aneg		= genphy_c45_config_aneg,
1920 		.config_init		= nxp_c45_config_init,
1921 		.config_intr		= tja1103_config_intr,
1922 		.handle_interrupt	= nxp_c45_handle_interrupt,
1923 		.read_status		= genphy_c45_read_status,
1924 		.suspend		= genphy_c45_pma_suspend,
1925 		.resume			= genphy_c45_pma_resume,
1926 		.get_sset_count		= nxp_c45_get_sset_count,
1927 		.get_strings		= nxp_c45_get_strings,
1928 		.get_stats		= nxp_c45_get_stats,
1929 		.cable_test_start	= nxp_c45_cable_test_start,
1930 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
1931 		.set_loopback		= genphy_c45_loopback,
1932 		.get_sqi		= nxp_c45_get_sqi,
1933 		.get_sqi_max		= nxp_c45_get_sqi_max,
1934 		.remove			= nxp_c45_remove,
1935 	},
1936 	{
1937 		PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120),
1938 		.name			= "NXP C45 TJA1120",
1939 		.get_features		= nxp_c45_get_features,
1940 		.driver_data		= &tja1120_phy_data,
1941 		.probe			= nxp_c45_probe,
1942 		.soft_reset		= nxp_c45_soft_reset,
1943 		.config_aneg		= genphy_c45_config_aneg,
1944 		.config_init		= nxp_c45_config_init,
1945 		.config_intr		= tja1120_config_intr,
1946 		.handle_interrupt	= nxp_c45_handle_interrupt,
1947 		.read_status		= genphy_c45_read_status,
1948 		.link_change_notify	= tja1120_link_change_notify,
1949 		.suspend		= genphy_c45_pma_suspend,
1950 		.resume			= genphy_c45_pma_resume,
1951 		.get_sset_count		= nxp_c45_get_sset_count,
1952 		.get_strings		= nxp_c45_get_strings,
1953 		.get_stats		= nxp_c45_get_stats,
1954 		.cable_test_start	= nxp_c45_cable_test_start,
1955 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
1956 		.set_loopback		= genphy_c45_loopback,
1957 		.get_sqi		= nxp_c45_get_sqi,
1958 		.get_sqi_max		= nxp_c45_get_sqi_max,
1959 		.remove			= nxp_c45_remove,
1960 	},
1961 };
1962 
1963 module_phy_driver(nxp_c45_driver);
1964 
1965 static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
1966 	{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
1967 	{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120) },
1968 	{ /*sentinel*/ },
1969 };
1970 
1971 MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
1972 
1973 MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
1974 MODULE_DESCRIPTION("NXP C45 PHY driver");
1975 MODULE_LICENSE("GPL v2");
1976