xref: /linux/drivers/net/phy/nxp-c45-tja11xx.c (revision 29e31a8ee811f5d85274f0381f13cd6fe650aea4)
1 // SPDX-License-Identifier: GPL-2.0
2 /* NXP C45 PHY driver
3  * Copyright (C) 2021 NXP
4  * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/ethtool.h>
9 #include <linux/ethtool_netlink.h>
10 #include <linux/kernel.h>
11 #include <linux/mii.h>
12 #include <linux/module.h>
13 #include <linux/phy.h>
14 #include <linux/processor.h>
15 #include <linux/property.h>
16 #include <linux/ptp_classify.h>
17 #include <linux/ptp_clock_kernel.h>
18 #include <linux/net_tstamp.h>
19 
20 #define PHY_ID_TJA_1103			0x001BB010
21 
22 #define PMAPMD_B100T1_PMAPMD_CTL	0x0834
23 #define B100T1_PMAPMD_CONFIG_EN		BIT(15)
24 #define B100T1_PMAPMD_MASTER		BIT(14)
25 #define MASTER_MODE			(B100T1_PMAPMD_CONFIG_EN | \
26 					 B100T1_PMAPMD_MASTER)
27 #define SLAVE_MODE			(B100T1_PMAPMD_CONFIG_EN)
28 
29 #define VEND1_DEVICE_CONTROL		0x0040
30 #define DEVICE_CONTROL_RESET		BIT(15)
31 #define DEVICE_CONTROL_CONFIG_GLOBAL_EN	BIT(14)
32 #define DEVICE_CONTROL_CONFIG_ALL_EN	BIT(13)
33 
34 #define VEND1_PHY_IRQ_ACK		0x80A0
35 #define VEND1_PHY_IRQ_EN		0x80A1
36 #define VEND1_PHY_IRQ_STATUS		0x80A2
37 #define PHY_IRQ_LINK_EVENT		BIT(1)
38 
39 #define VEND1_PHY_CONTROL		0x8100
40 #define PHY_CONFIG_EN			BIT(14)
41 #define PHY_START_OP			BIT(0)
42 
43 #define VEND1_PHY_CONFIG		0x8108
44 #define PHY_CONFIG_AUTO			BIT(0)
45 
46 #define VEND1_SIGNAL_QUALITY		0x8320
47 #define SQI_VALID			BIT(14)
48 #define SQI_MASK			GENMASK(2, 0)
49 #define MAX_SQI				SQI_MASK
50 
51 #define VEND1_CABLE_TEST		0x8330
52 #define CABLE_TEST_ENABLE		BIT(15)
53 #define CABLE_TEST_START		BIT(14)
54 #define CABLE_TEST_VALID		BIT(13)
55 #define CABLE_TEST_OK			0x00
56 #define CABLE_TEST_SHORTED		0x01
57 #define CABLE_TEST_OPEN			0x02
58 #define CABLE_TEST_UNKNOWN		0x07
59 
60 #define VEND1_PORT_CONTROL		0x8040
61 #define PORT_CONTROL_EN			BIT(14)
62 
63 #define VEND1_PORT_ABILITIES		0x8046
64 #define PTP_ABILITY			BIT(3)
65 
66 #define VEND1_PORT_INFRA_CONTROL	0xAC00
67 #define PORT_INFRA_CONTROL_EN		BIT(14)
68 
69 #define VEND1_RXID			0xAFCC
70 #define VEND1_TXID			0xAFCD
71 #define ID_ENABLE			BIT(15)
72 
73 #define VEND1_ABILITIES			0xAFC4
74 #define RGMII_ID_ABILITY		BIT(15)
75 #define RGMII_ABILITY			BIT(14)
76 #define RMII_ABILITY			BIT(10)
77 #define REVMII_ABILITY			BIT(9)
78 #define MII_ABILITY			BIT(8)
79 #define SGMII_ABILITY			BIT(0)
80 
81 #define VEND1_MII_BASIC_CONFIG		0xAFC6
82 #define MII_BASIC_CONFIG_REV		BIT(4)
83 #define MII_BASIC_CONFIG_SGMII		0x9
84 #define MII_BASIC_CONFIG_RGMII		0x7
85 #define MII_BASIC_CONFIG_RMII		0x5
86 #define MII_BASIC_CONFIG_MII		0x4
87 
88 #define VEND1_SYMBOL_ERROR_COUNTER	0x8350
89 #define VEND1_LINK_DROP_COUNTER		0x8352
90 #define VEND1_LINK_LOSSES_AND_FAILURES	0x8353
91 #define VEND1_R_GOOD_FRAME_CNT		0xA950
92 #define VEND1_R_BAD_FRAME_CNT		0xA952
93 #define VEND1_R_RXER_FRAME_CNT		0xA954
94 #define VEND1_RX_PREAMBLE_COUNT		0xAFCE
95 #define VEND1_TX_PREAMBLE_COUNT		0xAFCF
96 #define VEND1_RX_IPG_LENGTH		0xAFD0
97 #define VEND1_TX_IPG_LENGTH		0xAFD1
98 #define COUNTER_EN			BIT(15)
99 
100 #define VEND1_PTP_CONFIG		0x1102
101 #define EXT_TRG_EDGE			BIT(1)
102 #define PPS_OUT_POL			BIT(2)
103 #define PPS_OUT_EN			BIT(3)
104 
105 #define VEND1_LTC_LOAD_CTRL		0x1105
106 #define READ_LTC			BIT(2)
107 #define LOAD_LTC			BIT(0)
108 
109 #define VEND1_LTC_WR_NSEC_0		0x1106
110 #define VEND1_LTC_WR_NSEC_1		0x1107
111 #define VEND1_LTC_WR_SEC_0		0x1108
112 #define VEND1_LTC_WR_SEC_1		0x1109
113 
114 #define VEND1_LTC_RD_NSEC_0		0x110A
115 #define VEND1_LTC_RD_NSEC_1		0x110B
116 #define VEND1_LTC_RD_SEC_0		0x110C
117 #define VEND1_LTC_RD_SEC_1		0x110D
118 
119 #define VEND1_RATE_ADJ_SUBNS_0		0x110F
120 #define VEND1_RATE_ADJ_SUBNS_1		0x1110
121 #define CLK_RATE_ADJ_LD			BIT(15)
122 #define CLK_RATE_ADJ_DIR		BIT(14)
123 
124 #define VEND1_HW_LTC_LOCK_CTRL		0x1115
125 #define HW_LTC_LOCK_EN			BIT(0)
126 
127 #define VEND1_PTP_IRQ_EN		0x1131
128 #define VEND1_PTP_IRQ_STATUS		0x1132
129 #define PTP_IRQ_EGR_TS			BIT(0)
130 
131 #define VEND1_RX_TS_INSRT_CTRL		0x114D
132 #define RX_TS_INSRT_MODE2		0x02
133 
134 #define VEND1_EGR_RING_DATA_0		0x114E
135 #define VEND1_EGR_RING_DATA_1_SEQ_ID	0x114F
136 #define VEND1_EGR_RING_DATA_2_NSEC_15_0	0x1150
137 #define VEND1_EGR_RING_DATA_3		0x1151
138 #define VEND1_EGR_RING_CTRL		0x1154
139 
140 #define VEND1_EXT_TRG_TS_DATA_0		0x1121
141 #define VEND1_EXT_TRG_TS_DATA_1		0x1122
142 #define VEND1_EXT_TRG_TS_DATA_2		0x1123
143 #define VEND1_EXT_TRG_TS_DATA_3		0x1124
144 #define VEND1_EXT_TRG_TS_DATA_4		0x1125
145 #define VEND1_EXT_TRG_TS_CTRL		0x1126
146 
147 #define RING_DATA_0_DOMAIN_NUMBER	GENMASK(7, 0)
148 #define RING_DATA_0_MSG_TYPE		GENMASK(11, 8)
149 #define RING_DATA_0_SEC_4_2		GENMASK(14, 2)
150 #define RING_DATA_0_TS_VALID		BIT(15)
151 
152 #define RING_DATA_3_NSEC_29_16		GENMASK(13, 0)
153 #define RING_DATA_3_SEC_1_0		GENMASK(15, 14)
154 #define RING_DATA_5_SEC_16_5		GENMASK(15, 4)
155 #define RING_DONE			BIT(0)
156 
157 #define TS_SEC_MASK			GENMASK(1, 0)
158 
159 #define VEND1_PORT_FUNC_ENABLES		0x8048
160 #define PTP_ENABLE			BIT(3)
161 
162 #define VEND1_PORT_PTP_CONTROL		0x9000
163 #define PORT_PTP_CONTROL_BYPASS		BIT(11)
164 
165 #define VEND1_PTP_CLK_PERIOD		0x1104
166 #define PTP_CLK_PERIOD_100BT1		15ULL
167 
168 #define VEND1_EVENT_MSG_FILT		0x1148
169 #define EVENT_MSG_FILT_ALL		0x0F
170 #define EVENT_MSG_FILT_NONE		0x00
171 
172 #define VEND1_TX_PIPE_DLY_NS		0x1149
173 #define VEND1_TX_PIPEDLY_SUBNS		0x114A
174 #define VEND1_RX_PIPE_DLY_NS		0x114B
175 #define VEND1_RX_PIPEDLY_SUBNS		0x114C
176 
177 #define VEND1_GPIO_FUNC_CONFIG_BASE	0x2C40
178 #define GPIO_FUNC_EN			BIT(15)
179 #define GPIO_FUNC_PTP			BIT(6)
180 #define GPIO_SIGNAL_PTP_TRIGGER		0x01
181 #define GPIO_SIGNAL_PPS_OUT		0x12
182 #define GPIO_DISABLE			0
183 #define GPIO_PPS_OUT_CFG		(GPIO_FUNC_EN | GPIO_FUNC_PTP | \
184 	GPIO_SIGNAL_PPS_OUT)
185 #define GPIO_EXTTS_OUT_CFG		(GPIO_FUNC_EN | GPIO_FUNC_PTP | \
186 	GPIO_SIGNAL_PTP_TRIGGER)
187 
188 #define RGMII_PERIOD_PS			8000U
189 #define PS_PER_DEGREE			div_u64(RGMII_PERIOD_PS, 360)
190 #define MIN_ID_PS			1644U
191 #define MAX_ID_PS			2260U
192 #define DEFAULT_ID_PS			2000U
193 
194 #define PPM_TO_SUBNS_INC(ppb)	div_u64(GENMASK_ULL(31, 0) * (ppb) * \
195 					PTP_CLK_PERIOD_100BT1, NSEC_PER_SEC)
196 
197 #define NXP_C45_SKB_CB(skb)	((struct nxp_c45_skb_cb *)(skb)->cb)
198 
199 struct nxp_c45_skb_cb {
200 	struct ptp_header *header;
201 	unsigned int type;
202 };
203 
204 struct nxp_c45_hwts {
205 	u32	nsec;
206 	u32	sec;
207 	u8	domain_number;
208 	u16	sequence_id;
209 	u8	msg_type;
210 };
211 
212 struct nxp_c45_phy {
213 	struct phy_device *phydev;
214 	struct mii_timestamper mii_ts;
215 	struct ptp_clock *ptp_clock;
216 	struct ptp_clock_info caps;
217 	struct sk_buff_head tx_queue;
218 	struct sk_buff_head rx_queue;
219 	/* used to access the PTP registers atomic */
220 	struct mutex ptp_lock;
221 	int hwts_tx;
222 	int hwts_rx;
223 	u32 tx_delay;
224 	u32 rx_delay;
225 	struct timespec64 extts_ts;
226 	int extts_index;
227 	bool extts;
228 };
229 
230 struct nxp_c45_phy_stats {
231 	const char	*name;
232 	u8		mmd;
233 	u16		reg;
234 	u8		off;
235 	u16		mask;
236 };
237 
238 static bool nxp_c45_poll_txts(struct phy_device *phydev)
239 {
240 	return phydev->irq <= 0;
241 }
242 
243 static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
244 				   struct timespec64 *ts,
245 				   struct ptp_system_timestamp *sts)
246 {
247 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
248 
249 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_LOAD_CTRL,
250 		      READ_LTC);
251 	ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
252 				   VEND1_LTC_RD_NSEC_0);
253 	ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
254 				    VEND1_LTC_RD_NSEC_1) << 16;
255 	ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
256 				  VEND1_LTC_RD_SEC_0);
257 	ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
258 				   VEND1_LTC_RD_SEC_1) << 16;
259 
260 	return 0;
261 }
262 
263 static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
264 				  struct timespec64 *ts,
265 				  struct ptp_system_timestamp *sts)
266 {
267 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
268 
269 	mutex_lock(&priv->ptp_lock);
270 	_nxp_c45_ptp_gettimex64(ptp, ts, sts);
271 	mutex_unlock(&priv->ptp_lock);
272 
273 	return 0;
274 }
275 
276 static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
277 				  const struct timespec64 *ts)
278 {
279 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
280 
281 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_NSEC_0,
282 		      ts->tv_nsec);
283 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_NSEC_1,
284 		      ts->tv_nsec >> 16);
285 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_SEC_0,
286 		      ts->tv_sec);
287 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_SEC_1,
288 		      ts->tv_sec >> 16);
289 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_LOAD_CTRL,
290 		      LOAD_LTC);
291 
292 	return 0;
293 }
294 
295 static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
296 				 const struct timespec64 *ts)
297 {
298 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
299 
300 	mutex_lock(&priv->ptp_lock);
301 	_nxp_c45_ptp_settime64(ptp, ts);
302 	mutex_unlock(&priv->ptp_lock);
303 
304 	return 0;
305 }
306 
307 static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
308 {
309 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
310 	s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
311 	u64 subns_inc_val;
312 	bool inc;
313 
314 	mutex_lock(&priv->ptp_lock);
315 	inc = ppb >= 0;
316 	ppb = abs(ppb);
317 
318 	subns_inc_val = PPM_TO_SUBNS_INC(ppb);
319 
320 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_RATE_ADJ_SUBNS_0,
321 		      subns_inc_val);
322 	subns_inc_val >>= 16;
323 	subns_inc_val |= CLK_RATE_ADJ_LD;
324 	if (inc)
325 		subns_inc_val |= CLK_RATE_ADJ_DIR;
326 
327 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_RATE_ADJ_SUBNS_1,
328 		      subns_inc_val);
329 	mutex_unlock(&priv->ptp_lock);
330 
331 	return 0;
332 }
333 
334 static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
335 {
336 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
337 	struct timespec64 now, then;
338 
339 	mutex_lock(&priv->ptp_lock);
340 	then = ns_to_timespec64(delta);
341 	_nxp_c45_ptp_gettimex64(ptp, &now, NULL);
342 	now = timespec64_add(now, then);
343 	_nxp_c45_ptp_settime64(ptp, &now);
344 	mutex_unlock(&priv->ptp_lock);
345 
346 	return 0;
347 }
348 
349 static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
350 				   struct nxp_c45_hwts *hwts)
351 {
352 	ts->tv_nsec = hwts->nsec;
353 	if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
354 		ts->tv_sec -= TS_SEC_MASK + 1;
355 	ts->tv_sec &= ~TS_SEC_MASK;
356 	ts->tv_sec |= hwts->sec & TS_SEC_MASK;
357 }
358 
359 static bool nxp_c45_match_ts(struct ptp_header *header,
360 			     struct nxp_c45_hwts *hwts,
361 			     unsigned int type)
362 {
363 	return ntohs(header->sequence_id) == hwts->sequence_id &&
364 	       ptp_get_msgtype(header, type) == hwts->msg_type &&
365 	       header->domain_number  == hwts->domain_number;
366 }
367 
368 static void nxp_c45_get_extts(struct nxp_c45_phy *priv,
369 			      struct timespec64 *extts)
370 {
371 	extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
372 				      VEND1_EXT_TRG_TS_DATA_0);
373 	extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
374 				       VEND1_EXT_TRG_TS_DATA_1) << 16;
375 	extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
376 				     VEND1_EXT_TRG_TS_DATA_2);
377 	extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
378 				      VEND1_EXT_TRG_TS_DATA_3) << 16;
379 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EXT_TRG_TS_CTRL,
380 		      RING_DONE);
381 }
382 
383 static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
384 			       struct nxp_c45_hwts *hwts)
385 {
386 	bool valid;
387 	u16 reg;
388 
389 	mutex_lock(&priv->ptp_lock);
390 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
391 		      RING_DONE);
392 	reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
393 	valid = !!(reg & RING_DATA_0_TS_VALID);
394 	if (!valid)
395 		goto nxp_c45_get_hwtxts_out;
396 
397 	hwts->domain_number = reg;
398 	hwts->msg_type = (reg & RING_DATA_0_MSG_TYPE) >> 8;
399 	hwts->sec = (reg & RING_DATA_0_SEC_4_2) >> 10;
400 	hwts->sequence_id = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
401 					 VEND1_EGR_RING_DATA_1_SEQ_ID);
402 	hwts->nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
403 				  VEND1_EGR_RING_DATA_2_NSEC_15_0);
404 	reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_3);
405 	hwts->nsec |= (reg & RING_DATA_3_NSEC_29_16) << 16;
406 	hwts->sec |= (reg & RING_DATA_3_SEC_1_0) >> 14;
407 
408 nxp_c45_get_hwtxts_out:
409 	mutex_unlock(&priv->ptp_lock);
410 	return valid;
411 }
412 
413 static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
414 				 struct nxp_c45_hwts *txts)
415 {
416 	struct sk_buff *skb, *tmp, *skb_match = NULL;
417 	struct skb_shared_hwtstamps shhwtstamps;
418 	struct timespec64 ts;
419 	unsigned long flags;
420 	bool ts_match;
421 	s64 ts_ns;
422 
423 	spin_lock_irqsave(&priv->tx_queue.lock, flags);
424 	skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
425 		ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
426 					    NXP_C45_SKB_CB(skb)->type);
427 		if (!ts_match)
428 			continue;
429 		skb_match = skb;
430 		__skb_unlink(skb, &priv->tx_queue);
431 		break;
432 	}
433 	spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
434 
435 	if (skb_match) {
436 		nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
437 		nxp_c45_reconstruct_ts(&ts, txts);
438 		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
439 		ts_ns = timespec64_to_ns(&ts);
440 		shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
441 		skb_complete_tx_timestamp(skb_match, &shhwtstamps);
442 	} else {
443 		phydev_warn(priv->phydev,
444 			    "the tx timestamp doesn't match with any skb\n");
445 	}
446 }
447 
448 static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
449 {
450 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
451 	bool poll_txts = nxp_c45_poll_txts(priv->phydev);
452 	struct skb_shared_hwtstamps *shhwtstamps_rx;
453 	struct ptp_clock_event event;
454 	struct nxp_c45_hwts hwts;
455 	bool reschedule = false;
456 	struct timespec64 ts;
457 	struct sk_buff *skb;
458 	bool txts_valid;
459 	u32 ts_raw;
460 
461 	while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
462 		txts_valid = nxp_c45_get_hwtxts(priv, &hwts);
463 		if (unlikely(!txts_valid)) {
464 			/* Still more skbs in the queue */
465 			reschedule = true;
466 			break;
467 		}
468 
469 		nxp_c45_process_txts(priv, &hwts);
470 	}
471 
472 	while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
473 		nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
474 		ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
475 		hwts.sec = ts_raw >> 30;
476 		hwts.nsec = ts_raw & GENMASK(29, 0);
477 		nxp_c45_reconstruct_ts(&ts, &hwts);
478 		shhwtstamps_rx = skb_hwtstamps(skb);
479 		shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
480 		NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
481 		netif_rx(skb);
482 	}
483 
484 	if (priv->extts) {
485 		nxp_c45_get_extts(priv, &ts);
486 		if (timespec64_compare(&ts, &priv->extts_ts) != 0) {
487 			priv->extts_ts = ts;
488 			event.index = priv->extts_index;
489 			event.type = PTP_CLOCK_EXTTS;
490 			event.timestamp = ns_to_ktime(timespec64_to_ns(&ts));
491 			ptp_clock_event(priv->ptp_clock, &event);
492 		}
493 		reschedule = true;
494 	}
495 
496 	return reschedule ? 1 : -1;
497 }
498 
499 static void nxp_c45_gpio_config(struct nxp_c45_phy *priv,
500 				int pin, u16 pin_cfg)
501 {
502 	struct phy_device *phydev = priv->phydev;
503 
504 	phy_write_mmd(phydev, MDIO_MMD_VEND1,
505 		      VEND1_GPIO_FUNC_CONFIG_BASE + pin, pin_cfg);
506 }
507 
508 static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
509 				 struct ptp_perout_request *perout, int on)
510 {
511 	struct phy_device *phydev = priv->phydev;
512 	int pin;
513 
514 	if (perout->flags & ~PTP_PEROUT_PHASE)
515 		return -EOPNOTSUPP;
516 
517 	pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
518 	if (pin < 0)
519 		return pin;
520 
521 	if (!on) {
522 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG,
523 				   PPS_OUT_EN);
524 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG,
525 				   PPS_OUT_POL);
526 
527 		nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
528 
529 		return 0;
530 	}
531 
532 	/* The PPS signal is fixed to 1 second and is always generated when the
533 	 * seconds counter is incremented. The start time is not configurable.
534 	 * If the clock is adjusted, the PPS signal is automatically readjusted.
535 	 */
536 	if (perout->period.sec != 1 || perout->period.nsec != 0) {
537 		phydev_warn(phydev, "The period can be set only to 1 second.");
538 		return -EINVAL;
539 	}
540 
541 	if (!(perout->flags & PTP_PEROUT_PHASE)) {
542 		if (perout->start.sec != 0 || perout->start.nsec != 0) {
543 			phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds.");
544 			return -EINVAL;
545 		}
546 	} else {
547 		if (perout->phase.nsec != 0 &&
548 		    perout->phase.nsec != (NSEC_PER_SEC >> 1)) {
549 			phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds.");
550 			return -EINVAL;
551 		}
552 
553 		if (perout->phase.nsec == 0)
554 			phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
555 					   VEND1_PTP_CONFIG, PPS_OUT_POL);
556 		else
557 			phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
558 					 VEND1_PTP_CONFIG, PPS_OUT_POL);
559 	}
560 
561 	nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG);
562 
563 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG, PPS_OUT_EN);
564 
565 	return 0;
566 }
567 
568 static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
569 				struct ptp_extts_request *extts, int on)
570 {
571 	int pin;
572 
573 	if (extts->flags & ~(PTP_ENABLE_FEATURE |
574 			      PTP_RISING_EDGE |
575 			      PTP_FALLING_EDGE |
576 			      PTP_STRICT_FLAGS))
577 		return -EOPNOTSUPP;
578 
579 	/* Sampling on both edges is not supported */
580 	if ((extts->flags & PTP_RISING_EDGE) &&
581 	    (extts->flags & PTP_FALLING_EDGE))
582 		return -EOPNOTSUPP;
583 
584 	pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index);
585 	if (pin < 0)
586 		return pin;
587 
588 	if (!on) {
589 		nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
590 		priv->extts = false;
591 
592 		return 0;
593 	}
594 
595 	if (extts->flags & PTP_RISING_EDGE)
596 		phy_clear_bits_mmd(priv->phydev, MDIO_MMD_VEND1,
597 				   VEND1_PTP_CONFIG, EXT_TRG_EDGE);
598 
599 	if (extts->flags & PTP_FALLING_EDGE)
600 		phy_set_bits_mmd(priv->phydev, MDIO_MMD_VEND1,
601 				 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
602 
603 	nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG);
604 	priv->extts = true;
605 	priv->extts_index = extts->index;
606 	ptp_schedule_worker(priv->ptp_clock, 0);
607 
608 	return 0;
609 }
610 
611 static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp,
612 			      struct ptp_clock_request *req, int on)
613 {
614 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
615 
616 	switch (req->type) {
617 	case PTP_CLK_REQ_EXTTS:
618 		return nxp_c45_extts_enable(priv, &req->extts, on);
619 	case PTP_CLK_REQ_PEROUT:
620 		return nxp_c45_perout_enable(priv, &req->perout, on);
621 	default:
622 		return -EOPNOTSUPP;
623 	}
624 }
625 
626 static struct ptp_pin_desc nxp_c45_ptp_pins[] = {
627 	{ "nxp_c45_gpio0", 0, PTP_PF_NONE},
628 	{ "nxp_c45_gpio1", 1, PTP_PF_NONE},
629 	{ "nxp_c45_gpio2", 2, PTP_PF_NONE},
630 	{ "nxp_c45_gpio3", 3, PTP_PF_NONE},
631 	{ "nxp_c45_gpio4", 4, PTP_PF_NONE},
632 	{ "nxp_c45_gpio5", 5, PTP_PF_NONE},
633 	{ "nxp_c45_gpio6", 6, PTP_PF_NONE},
634 	{ "nxp_c45_gpio7", 7, PTP_PF_NONE},
635 	{ "nxp_c45_gpio8", 8, PTP_PF_NONE},
636 	{ "nxp_c45_gpio9", 9, PTP_PF_NONE},
637 	{ "nxp_c45_gpio10", 10, PTP_PF_NONE},
638 	{ "nxp_c45_gpio11", 11, PTP_PF_NONE},
639 };
640 
641 static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
642 				  enum ptp_pin_function func, unsigned int chan)
643 {
644 	if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins))
645 		return -EINVAL;
646 
647 	switch (func) {
648 	case PTP_PF_NONE:
649 	case PTP_PF_PEROUT:
650 	case PTP_PF_EXTTS:
651 		break;
652 	default:
653 		return -EOPNOTSUPP;
654 	}
655 
656 	return 0;
657 }
658 
659 static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
660 {
661 	priv->caps = (struct ptp_clock_info) {
662 		.owner		= THIS_MODULE,
663 		.name		= "NXP C45 PHC",
664 		.max_adj	= 16666666,
665 		.adjfine	= nxp_c45_ptp_adjfine,
666 		.adjtime	= nxp_c45_ptp_adjtime,
667 		.gettimex64	= nxp_c45_ptp_gettimex64,
668 		.settime64	= nxp_c45_ptp_settime64,
669 		.enable		= nxp_c45_ptp_enable,
670 		.verify		= nxp_c45_ptp_verify_pin,
671 		.do_aux_work	= nxp_c45_do_aux_work,
672 		.pin_config	= nxp_c45_ptp_pins,
673 		.n_pins		= ARRAY_SIZE(nxp_c45_ptp_pins),
674 		.n_ext_ts	= 1,
675 		.n_per_out	= 1,
676 	};
677 
678 	priv->ptp_clock = ptp_clock_register(&priv->caps,
679 					     &priv->phydev->mdio.dev);
680 
681 	if (IS_ERR(priv->ptp_clock))
682 		return PTR_ERR(priv->ptp_clock);
683 
684 	if (!priv->ptp_clock)
685 		return -ENOMEM;
686 
687 	return 0;
688 }
689 
690 static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
691 			     struct sk_buff *skb, int type)
692 {
693 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
694 						mii_ts);
695 
696 	switch (priv->hwts_tx) {
697 	case HWTSTAMP_TX_ON:
698 		NXP_C45_SKB_CB(skb)->type = type;
699 		NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
700 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
701 		skb_queue_tail(&priv->tx_queue, skb);
702 		if (nxp_c45_poll_txts(priv->phydev))
703 			ptp_schedule_worker(priv->ptp_clock, 0);
704 		break;
705 	case HWTSTAMP_TX_OFF:
706 	default:
707 		kfree_skb(skb);
708 		break;
709 	}
710 }
711 
712 static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
713 			     struct sk_buff *skb, int type)
714 {
715 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
716 						mii_ts);
717 	struct ptp_header *header = ptp_parse_header(skb, type);
718 
719 	if (!header)
720 		return false;
721 
722 	if (!priv->hwts_rx)
723 		return false;
724 
725 	NXP_C45_SKB_CB(skb)->header = header;
726 	skb_queue_tail(&priv->rx_queue, skb);
727 	ptp_schedule_worker(priv->ptp_clock, 0);
728 
729 	return true;
730 }
731 
732 static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
733 			    struct ifreq *ifreq)
734 {
735 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
736 						mii_ts);
737 	struct phy_device *phydev = priv->phydev;
738 	struct hwtstamp_config cfg;
739 
740 	if (copy_from_user(&cfg, ifreq->ifr_data, sizeof(cfg)))
741 		return -EFAULT;
742 
743 	if (cfg.tx_type < 0 || cfg.tx_type > HWTSTAMP_TX_ON)
744 		return -ERANGE;
745 
746 	priv->hwts_tx = cfg.tx_type;
747 
748 	switch (cfg.rx_filter) {
749 	case HWTSTAMP_FILTER_NONE:
750 		priv->hwts_rx = 0;
751 		break;
752 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
753 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
754 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
755 		priv->hwts_rx = 1;
756 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
757 		break;
758 	default:
759 		return -ERANGE;
760 	}
761 
762 	if (priv->hwts_rx || priv->hwts_tx) {
763 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_EVENT_MSG_FILT,
764 			      EVENT_MSG_FILT_ALL);
765 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
766 				   VEND1_PORT_PTP_CONTROL,
767 				   PORT_PTP_CONTROL_BYPASS);
768 	} else {
769 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_EVENT_MSG_FILT,
770 			      EVENT_MSG_FILT_NONE);
771 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_PTP_CONTROL,
772 				 PORT_PTP_CONTROL_BYPASS);
773 	}
774 
775 	if (nxp_c45_poll_txts(priv->phydev))
776 		goto nxp_c45_no_ptp_irq;
777 
778 	if (priv->hwts_tx)
779 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
780 				 VEND1_PTP_IRQ_EN, PTP_IRQ_EGR_TS);
781 	else
782 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
783 				   VEND1_PTP_IRQ_EN, PTP_IRQ_EGR_TS);
784 
785 nxp_c45_no_ptp_irq:
786 	return copy_to_user(ifreq->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
787 }
788 
789 static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
790 			   struct ethtool_ts_info *ts_info)
791 {
792 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
793 						mii_ts);
794 
795 	ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
796 			SOF_TIMESTAMPING_RX_HARDWARE |
797 			SOF_TIMESTAMPING_RAW_HARDWARE;
798 	ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
799 	ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
800 	ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
801 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
802 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
803 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
804 
805 	return 0;
806 }
807 
808 static const struct nxp_c45_phy_stats nxp_c45_hw_stats[] = {
809 	{ "phy_symbol_error_cnt", MDIO_MMD_VEND1,
810 		VEND1_SYMBOL_ERROR_COUNTER, 0, GENMASK(15, 0) },
811 	{ "phy_link_status_drop_cnt", MDIO_MMD_VEND1,
812 		VEND1_LINK_DROP_COUNTER, 8, GENMASK(13, 8) },
813 	{ "phy_link_availability_drop_cnt", MDIO_MMD_VEND1,
814 		VEND1_LINK_DROP_COUNTER, 0, GENMASK(5, 0) },
815 	{ "phy_link_loss_cnt", MDIO_MMD_VEND1,
816 		VEND1_LINK_LOSSES_AND_FAILURES, 10, GENMASK(15, 10) },
817 	{ "phy_link_failure_cnt", MDIO_MMD_VEND1,
818 		VEND1_LINK_LOSSES_AND_FAILURES, 0, GENMASK(9, 0) },
819 	{ "r_good_frame_cnt", MDIO_MMD_VEND1,
820 		VEND1_R_GOOD_FRAME_CNT, 0, GENMASK(15, 0) },
821 	{ "r_bad_frame_cnt", MDIO_MMD_VEND1,
822 		VEND1_R_BAD_FRAME_CNT, 0, GENMASK(15, 0) },
823 	{ "r_rxer_frame_cnt", MDIO_MMD_VEND1,
824 		VEND1_R_RXER_FRAME_CNT, 0, GENMASK(15, 0) },
825 	{ "rx_preamble_count", MDIO_MMD_VEND1,
826 		VEND1_RX_PREAMBLE_COUNT, 0, GENMASK(5, 0) },
827 	{ "tx_preamble_count", MDIO_MMD_VEND1,
828 		VEND1_TX_PREAMBLE_COUNT, 0, GENMASK(5, 0) },
829 	{ "rx_ipg_length", MDIO_MMD_VEND1,
830 		VEND1_RX_IPG_LENGTH, 0, GENMASK(8, 0) },
831 	{ "tx_ipg_length", MDIO_MMD_VEND1,
832 		VEND1_TX_IPG_LENGTH, 0, GENMASK(8, 0) },
833 };
834 
835 static int nxp_c45_get_sset_count(struct phy_device *phydev)
836 {
837 	return ARRAY_SIZE(nxp_c45_hw_stats);
838 }
839 
840 static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
841 {
842 	size_t i;
843 
844 	for (i = 0; i < ARRAY_SIZE(nxp_c45_hw_stats); i++) {
845 		strncpy(data + i * ETH_GSTRING_LEN,
846 			nxp_c45_hw_stats[i].name, ETH_GSTRING_LEN);
847 	}
848 }
849 
850 static void nxp_c45_get_stats(struct phy_device *phydev,
851 			      struct ethtool_stats *stats, u64 *data)
852 {
853 	size_t i;
854 	int ret;
855 
856 	for (i = 0; i < ARRAY_SIZE(nxp_c45_hw_stats); i++) {
857 		ret = phy_read_mmd(phydev, nxp_c45_hw_stats[i].mmd,
858 				   nxp_c45_hw_stats[i].reg);
859 		if (ret < 0) {
860 			data[i] = U64_MAX;
861 		} else {
862 			data[i] = ret & nxp_c45_hw_stats[i].mask;
863 			data[i] >>= nxp_c45_hw_stats[i].off;
864 		}
865 	}
866 }
867 
868 static int nxp_c45_config_enable(struct phy_device *phydev)
869 {
870 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
871 		      DEVICE_CONTROL_CONFIG_GLOBAL_EN |
872 		      DEVICE_CONTROL_CONFIG_ALL_EN);
873 	usleep_range(400, 450);
874 
875 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
876 		      PORT_CONTROL_EN);
877 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
878 		      PHY_CONFIG_EN);
879 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
880 		      PORT_INFRA_CONTROL_EN);
881 
882 	return 0;
883 }
884 
885 static int nxp_c45_start_op(struct phy_device *phydev)
886 {
887 	return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
888 				PHY_START_OP);
889 }
890 
891 static int nxp_c45_config_intr(struct phy_device *phydev)
892 {
893 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
894 		return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
895 					VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
896 	else
897 		return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
898 					  VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
899 }
900 
901 static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
902 {
903 	struct nxp_c45_phy *priv = phydev->priv;
904 	irqreturn_t ret = IRQ_NONE;
905 	struct nxp_c45_hwts hwts;
906 	int irq;
907 
908 	irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
909 	if (irq & PHY_IRQ_LINK_EVENT) {
910 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
911 			      PHY_IRQ_LINK_EVENT);
912 		phy_trigger_machine(phydev);
913 		ret = IRQ_HANDLED;
914 	}
915 
916 	/* There is no need for ACK.
917 	 * The irq signal will be asserted until the EGR TS FIFO will be
918 	 * emptied.
919 	 */
920 	irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_IRQ_STATUS);
921 	if (irq & PTP_IRQ_EGR_TS) {
922 		while (nxp_c45_get_hwtxts(priv, &hwts))
923 			nxp_c45_process_txts(priv, &hwts);
924 
925 		ret = IRQ_HANDLED;
926 	}
927 
928 	return ret;
929 }
930 
931 static int nxp_c45_soft_reset(struct phy_device *phydev)
932 {
933 	int ret;
934 
935 	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
936 			    DEVICE_CONTROL_RESET);
937 	if (ret)
938 		return ret;
939 
940 	return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
941 					 VEND1_DEVICE_CONTROL, ret,
942 					 !(ret & DEVICE_CONTROL_RESET), 20000,
943 					 240000, false);
944 }
945 
946 static int nxp_c45_cable_test_start(struct phy_device *phydev)
947 {
948 	return phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST,
949 			     CABLE_TEST_ENABLE | CABLE_TEST_START);
950 }
951 
952 static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
953 					 bool *finished)
954 {
955 	int ret;
956 	u8 cable_test_result;
957 
958 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST);
959 	if (!(ret & CABLE_TEST_VALID)) {
960 		*finished = false;
961 		return 0;
962 	}
963 
964 	*finished = true;
965 	cable_test_result = ret & GENMASK(2, 0);
966 
967 	switch (cable_test_result) {
968 	case CABLE_TEST_OK:
969 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
970 					ETHTOOL_A_CABLE_RESULT_CODE_OK);
971 		break;
972 	case CABLE_TEST_SHORTED:
973 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
974 					ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
975 		break;
976 	case CABLE_TEST_OPEN:
977 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
978 					ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
979 		break;
980 	default:
981 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
982 					ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
983 	}
984 
985 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST,
986 			   CABLE_TEST_ENABLE);
987 
988 	return nxp_c45_start_op(phydev);
989 }
990 
991 static int nxp_c45_setup_master_slave(struct phy_device *phydev)
992 {
993 	switch (phydev->master_slave_set) {
994 	case MASTER_SLAVE_CFG_MASTER_FORCE:
995 	case MASTER_SLAVE_CFG_MASTER_PREFERRED:
996 		phy_write_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL,
997 			      MASTER_MODE);
998 		break;
999 	case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
1000 	case MASTER_SLAVE_CFG_SLAVE_FORCE:
1001 		phy_write_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL,
1002 			      SLAVE_MODE);
1003 		break;
1004 	case MASTER_SLAVE_CFG_UNKNOWN:
1005 	case MASTER_SLAVE_CFG_UNSUPPORTED:
1006 		return 0;
1007 	default:
1008 		phydev_warn(phydev, "Unsupported Master/Slave mode\n");
1009 		return -EOPNOTSUPP;
1010 	}
1011 
1012 	return 0;
1013 }
1014 
1015 static int nxp_c45_read_master_slave(struct phy_device *phydev)
1016 {
1017 	int reg;
1018 
1019 	phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
1020 	phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
1021 
1022 	reg = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL);
1023 	if (reg < 0)
1024 		return reg;
1025 
1026 	if (reg & B100T1_PMAPMD_MASTER) {
1027 		phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_FORCE;
1028 		phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER;
1029 	} else {
1030 		phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_FORCE;
1031 		phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE;
1032 	}
1033 
1034 	return 0;
1035 }
1036 
1037 static int nxp_c45_config_aneg(struct phy_device *phydev)
1038 {
1039 	return nxp_c45_setup_master_slave(phydev);
1040 }
1041 
1042 static int nxp_c45_read_status(struct phy_device *phydev)
1043 {
1044 	int ret;
1045 
1046 	ret = genphy_c45_read_status(phydev);
1047 	if (ret)
1048 		return ret;
1049 
1050 	ret = nxp_c45_read_master_slave(phydev);
1051 	if (ret)
1052 		return ret;
1053 
1054 	return 0;
1055 }
1056 
1057 static int nxp_c45_get_sqi(struct phy_device *phydev)
1058 {
1059 	int reg;
1060 
1061 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
1062 	if (!(reg & SQI_VALID))
1063 		return -EINVAL;
1064 
1065 	reg &= SQI_MASK;
1066 
1067 	return reg;
1068 }
1069 
1070 static int nxp_c45_get_sqi_max(struct phy_device *phydev)
1071 {
1072 	return MAX_SQI;
1073 }
1074 
1075 static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
1076 {
1077 	if (delay < MIN_ID_PS) {
1078 		phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
1079 		return -EINVAL;
1080 	}
1081 
1082 	if (delay > MAX_ID_PS) {
1083 		phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
1084 		return -EINVAL;
1085 	}
1086 
1087 	return 0;
1088 }
1089 
1090 static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
1091 {
1092 	/* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
1093 	 * To avoid floating point operations we'll multiply by 10
1094 	 * and get 1 decimal point precision.
1095 	 */
1096 	phase_offset_raw *= 10;
1097 	phase_offset_raw -= 738;
1098 	return div_u64(phase_offset_raw, 9);
1099 }
1100 
1101 static void nxp_c45_disable_delays(struct phy_device *phydev)
1102 {
1103 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
1104 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
1105 }
1106 
1107 static void nxp_c45_set_delays(struct phy_device *phydev)
1108 {
1109 	struct nxp_c45_phy *priv = phydev->priv;
1110 	u64 tx_delay = priv->tx_delay;
1111 	u64 rx_delay = priv->rx_delay;
1112 	u64 degree;
1113 
1114 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1115 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1116 		degree = div_u64(tx_delay, PS_PER_DEGREE);
1117 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1118 			      ID_ENABLE | nxp_c45_get_phase_shift(degree));
1119 	} else {
1120 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1121 				   ID_ENABLE);
1122 	}
1123 
1124 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1125 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1126 		degree = div_u64(rx_delay, PS_PER_DEGREE);
1127 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1128 			      ID_ENABLE | nxp_c45_get_phase_shift(degree));
1129 	} else {
1130 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1131 				   ID_ENABLE);
1132 	}
1133 }
1134 
1135 static int nxp_c45_get_delays(struct phy_device *phydev)
1136 {
1137 	struct nxp_c45_phy *priv = phydev->priv;
1138 	int ret;
1139 
1140 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1141 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1142 		ret = device_property_read_u32(&phydev->mdio.dev,
1143 					       "tx-internal-delay-ps",
1144 					       &priv->tx_delay);
1145 		if (ret)
1146 			priv->tx_delay = DEFAULT_ID_PS;
1147 
1148 		ret = nxp_c45_check_delay(phydev, priv->tx_delay);
1149 		if (ret) {
1150 			phydev_err(phydev,
1151 				   "tx-internal-delay-ps invalid value\n");
1152 			return ret;
1153 		}
1154 	}
1155 
1156 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1157 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1158 		ret = device_property_read_u32(&phydev->mdio.dev,
1159 					       "rx-internal-delay-ps",
1160 					       &priv->rx_delay);
1161 		if (ret)
1162 			priv->rx_delay = DEFAULT_ID_PS;
1163 
1164 		ret = nxp_c45_check_delay(phydev, priv->rx_delay);
1165 		if (ret) {
1166 			phydev_err(phydev,
1167 				   "rx-internal-delay-ps invalid value\n");
1168 			return ret;
1169 		}
1170 	}
1171 
1172 	return 0;
1173 }
1174 
1175 static int nxp_c45_set_phy_mode(struct phy_device *phydev)
1176 {
1177 	int ret;
1178 
1179 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
1180 	phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
1181 
1182 	switch (phydev->interface) {
1183 	case PHY_INTERFACE_MODE_RGMII:
1184 		if (!(ret & RGMII_ABILITY)) {
1185 			phydev_err(phydev, "rgmii mode not supported\n");
1186 			return -EINVAL;
1187 		}
1188 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1189 			      MII_BASIC_CONFIG_RGMII);
1190 		nxp_c45_disable_delays(phydev);
1191 		break;
1192 	case PHY_INTERFACE_MODE_RGMII_ID:
1193 	case PHY_INTERFACE_MODE_RGMII_TXID:
1194 	case PHY_INTERFACE_MODE_RGMII_RXID:
1195 		if (!(ret & RGMII_ID_ABILITY)) {
1196 			phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
1197 			return -EINVAL;
1198 		}
1199 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1200 			      MII_BASIC_CONFIG_RGMII);
1201 		ret = nxp_c45_get_delays(phydev);
1202 		if (ret)
1203 			return ret;
1204 
1205 		nxp_c45_set_delays(phydev);
1206 		break;
1207 	case PHY_INTERFACE_MODE_MII:
1208 		if (!(ret & MII_ABILITY)) {
1209 			phydev_err(phydev, "mii mode not supported\n");
1210 			return -EINVAL;
1211 		}
1212 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1213 			      MII_BASIC_CONFIG_MII);
1214 		break;
1215 	case PHY_INTERFACE_MODE_REVMII:
1216 		if (!(ret & REVMII_ABILITY)) {
1217 			phydev_err(phydev, "rev-mii mode not supported\n");
1218 			return -EINVAL;
1219 		}
1220 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1221 			      MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
1222 		break;
1223 	case PHY_INTERFACE_MODE_RMII:
1224 		if (!(ret & RMII_ABILITY)) {
1225 			phydev_err(phydev, "rmii mode not supported\n");
1226 			return -EINVAL;
1227 		}
1228 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1229 			      MII_BASIC_CONFIG_RMII);
1230 		break;
1231 	case PHY_INTERFACE_MODE_SGMII:
1232 		if (!(ret & SGMII_ABILITY)) {
1233 			phydev_err(phydev, "sgmii mode not supported\n");
1234 			return -EINVAL;
1235 		}
1236 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1237 			      MII_BASIC_CONFIG_SGMII);
1238 		break;
1239 	case PHY_INTERFACE_MODE_INTERNAL:
1240 		break;
1241 	default:
1242 		return -EINVAL;
1243 	}
1244 
1245 	return 0;
1246 }
1247 
1248 static int nxp_c45_config_init(struct phy_device *phydev)
1249 {
1250 	int ret;
1251 
1252 	ret = nxp_c45_config_enable(phydev);
1253 	if (ret) {
1254 		phydev_err(phydev, "Failed to enable config\n");
1255 		return ret;
1256 	}
1257 
1258 	/* Bug workaround for SJA1110 rev B: enable write access
1259 	 * to MDIO_MMD_PMAPMD
1260 	 */
1261 	phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
1262 	phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
1263 
1264 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
1265 			 PHY_CONFIG_AUTO);
1266 
1267 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
1268 			 COUNTER_EN);
1269 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
1270 			 COUNTER_EN);
1271 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
1272 			 COUNTER_EN);
1273 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
1274 			 COUNTER_EN);
1275 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
1276 			 COUNTER_EN);
1277 
1278 	ret = nxp_c45_set_phy_mode(phydev);
1279 	if (ret)
1280 		return ret;
1281 
1282 	phydev->autoneg = AUTONEG_DISABLE;
1283 
1284 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CLK_PERIOD,
1285 		      PTP_CLK_PERIOD_100BT1);
1286 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_HW_LTC_LOCK_CTRL,
1287 			   HW_LTC_LOCK_EN);
1288 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
1289 		      RX_TS_INSRT_MODE2);
1290 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
1291 			 PTP_ENABLE);
1292 
1293 	return nxp_c45_start_op(phydev);
1294 }
1295 
1296 static int nxp_c45_probe(struct phy_device *phydev)
1297 {
1298 	struct nxp_c45_phy *priv;
1299 	int ptp_ability;
1300 	int ret = 0;
1301 
1302 	priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
1303 	if (!priv)
1304 		return -ENOMEM;
1305 
1306 	skb_queue_head_init(&priv->tx_queue);
1307 	skb_queue_head_init(&priv->rx_queue);
1308 
1309 	priv->phydev = phydev;
1310 
1311 	phydev->priv = priv;
1312 
1313 	mutex_init(&priv->ptp_lock);
1314 
1315 	ptp_ability = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1316 				   VEND1_PORT_ABILITIES);
1317 	ptp_ability = !!(ptp_ability & PTP_ABILITY);
1318 	if (!ptp_ability) {
1319 		phydev_dbg(phydev, "the phy does not support PTP");
1320 		goto no_ptp_support;
1321 	}
1322 
1323 	if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
1324 	    IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
1325 		priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
1326 		priv->mii_ts.txtstamp = nxp_c45_txtstamp;
1327 		priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
1328 		priv->mii_ts.ts_info = nxp_c45_ts_info;
1329 		phydev->mii_ts = &priv->mii_ts;
1330 		ret = nxp_c45_init_ptp_clock(priv);
1331 	} else {
1332 		phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
1333 	}
1334 
1335 no_ptp_support:
1336 
1337 	return ret;
1338 }
1339 
1340 static void nxp_c45_remove(struct phy_device *phydev)
1341 {
1342 	struct nxp_c45_phy *priv = phydev->priv;
1343 
1344 	if (priv->ptp_clock)
1345 		ptp_clock_unregister(priv->ptp_clock);
1346 
1347 	skb_queue_purge(&priv->tx_queue);
1348 	skb_queue_purge(&priv->rx_queue);
1349 }
1350 
1351 static struct phy_driver nxp_c45_driver[] = {
1352 	{
1353 		PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
1354 		.name			= "NXP C45 TJA1103",
1355 		.features		= PHY_BASIC_T1_FEATURES,
1356 		.probe			= nxp_c45_probe,
1357 		.soft_reset		= nxp_c45_soft_reset,
1358 		.config_aneg		= nxp_c45_config_aneg,
1359 		.config_init		= nxp_c45_config_init,
1360 		.config_intr		= nxp_c45_config_intr,
1361 		.handle_interrupt	= nxp_c45_handle_interrupt,
1362 		.read_status		= nxp_c45_read_status,
1363 		.suspend		= genphy_c45_pma_suspend,
1364 		.resume			= genphy_c45_pma_resume,
1365 		.get_sset_count		= nxp_c45_get_sset_count,
1366 		.get_strings		= nxp_c45_get_strings,
1367 		.get_stats		= nxp_c45_get_stats,
1368 		.cable_test_start	= nxp_c45_cable_test_start,
1369 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
1370 		.set_loopback		= genphy_c45_loopback,
1371 		.get_sqi		= nxp_c45_get_sqi,
1372 		.get_sqi_max		= nxp_c45_get_sqi_max,
1373 		.remove			= nxp_c45_remove,
1374 	},
1375 };
1376 
1377 module_phy_driver(nxp_c45_driver);
1378 
1379 static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
1380 	{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
1381 	{ /*sentinel*/ },
1382 };
1383 
1384 MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
1385 
1386 MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
1387 MODULE_DESCRIPTION("NXP C45 PHY driver");
1388 MODULE_LICENSE("GPL v2");
1389