1 // SPDX-License-Identifier: GPL-2.0
2 /* NXP C45 PHY driver
3 * Copyright 2021-2023 NXP
4 * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
5 */
6
7 #include <linux/delay.h>
8 #include <linux/ethtool.h>
9 #include <linux/ethtool_netlink.h>
10 #include <linux/kernel.h>
11 #include <linux/mii.h>
12 #include <linux/module.h>
13 #include <linux/of.h>
14 #include <linux/phy.h>
15 #include <linux/processor.h>
16 #include <linux/property.h>
17 #include <linux/ptp_classify.h>
18 #include <linux/net_tstamp.h>
19
20 #include "nxp-c45-tja11xx.h"
21
22 #define PHY_ID_TJA_1103 0x001BB010
23 #define PHY_ID_TJA_1120 0x001BB031
24
25 #define VEND1_DEVICE_ID3 0x0004
26 #define TJA1120_DEV_ID3_SILICON_VERSION GENMASK(15, 12)
27 #define TJA1120_DEV_ID3_SAMPLE_TYPE GENMASK(11, 8)
28 #define DEVICE_ID3_SAMPLE_TYPE_R 0x9
29
30 #define VEND1_DEVICE_CONTROL 0x0040
31 #define DEVICE_CONTROL_RESET BIT(15)
32 #define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14)
33 #define DEVICE_CONTROL_CONFIG_ALL_EN BIT(13)
34
35 #define VEND1_DEVICE_CONFIG 0x0048
36
37 #define TJA1120_VEND1_EXT_TS_MODE 0x1012
38
39 #define TJA1120_GLOBAL_INFRA_IRQ_ACK 0x2C08
40 #define TJA1120_GLOBAL_INFRA_IRQ_EN 0x2C0A
41 #define TJA1120_GLOBAL_INFRA_IRQ_STATUS 0x2C0C
42 #define TJA1120_DEV_BOOT_DONE BIT(1)
43
44 #define TJA1120_VEND1_PTP_TRIG_DATA_S 0x1070
45
46 #define TJA1120_EGRESS_TS_DATA_S 0x9060
47 #define TJA1120_EGRESS_TS_END 0x9067
48 #define TJA1120_TS_VALID BIT(0)
49 #define TJA1120_MORE_TS BIT(15)
50
51 #define VEND1_PHY_IRQ_ACK 0x80A0
52 #define VEND1_PHY_IRQ_EN 0x80A1
53 #define VEND1_PHY_IRQ_STATUS 0x80A2
54 #define PHY_IRQ_LINK_EVENT BIT(1)
55
56 #define VEND1_ALWAYS_ACCESSIBLE 0x801F
57 #define FUSA_PASS BIT(4)
58
59 #define VEND1_PHY_CONTROL 0x8100
60 #define PHY_CONFIG_EN BIT(14)
61 #define PHY_START_OP BIT(0)
62
63 #define VEND1_PHY_CONFIG 0x8108
64 #define PHY_CONFIG_AUTO BIT(0)
65
66 #define TJA1120_EPHY_RESETS 0x810A
67 #define EPHY_PCS_RESET BIT(3)
68
69 #define VEND1_SIGNAL_QUALITY 0x8320
70 #define SQI_VALID BIT(14)
71 #define SQI_MASK GENMASK(2, 0)
72 #define MAX_SQI SQI_MASK
73
74 #define CABLE_TEST_ENABLE BIT(15)
75 #define CABLE_TEST_START BIT(14)
76 #define CABLE_TEST_OK 0x00
77 #define CABLE_TEST_SHORTED 0x01
78 #define CABLE_TEST_OPEN 0x02
79 #define CABLE_TEST_UNKNOWN 0x07
80
81 #define VEND1_PORT_CONTROL 0x8040
82 #define PORT_CONTROL_EN BIT(14)
83
84 #define VEND1_PORT_ABILITIES 0x8046
85 #define MACSEC_ABILITY BIT(5)
86 #define PTP_ABILITY BIT(3)
87
88 #define VEND1_PORT_FUNC_IRQ_EN 0x807A
89 #define MACSEC_IRQS BIT(5)
90 #define PTP_IRQS BIT(3)
91
92 #define VEND1_PTP_IRQ_ACK 0x9008
93 #define EGR_TS_IRQ BIT(1)
94
95 #define VEND1_PORT_INFRA_CONTROL 0xAC00
96 #define PORT_INFRA_CONTROL_EN BIT(14)
97
98 #define VEND1_RXID 0xAFCC
99 #define VEND1_TXID 0xAFCD
100 #define ID_ENABLE BIT(15)
101
102 #define VEND1_ABILITIES 0xAFC4
103 #define RGMII_ID_ABILITY BIT(15)
104 #define RGMII_ABILITY BIT(14)
105 #define RMII_ABILITY BIT(10)
106 #define REVMII_ABILITY BIT(9)
107 #define MII_ABILITY BIT(8)
108 #define SGMII_ABILITY BIT(0)
109
110 #define VEND1_MII_BASIC_CONFIG 0xAFC6
111 #define MII_BASIC_CONFIG_REV BIT(4)
112 #define MII_BASIC_CONFIG_SGMII 0x9
113 #define MII_BASIC_CONFIG_RGMII 0x7
114 #define MII_BASIC_CONFIG_RMII 0x5
115 #define MII_BASIC_CONFIG_MII 0x4
116
117 #define VEND1_SGMII_BASIC_CONTROL 0xB000
118 #define SGMII_LPM BIT(11)
119
120 #define VEND1_SYMBOL_ERROR_CNT_XTD 0x8351
121 #define EXTENDED_CNT_EN BIT(15)
122 #define VEND1_MONITOR_STATUS 0xAC80
123 #define MONITOR_RESET BIT(15)
124 #define VEND1_MONITOR_CONFIG 0xAC86
125 #define LOST_FRAMES_CNT_EN BIT(9)
126 #define ALL_FRAMES_CNT_EN BIT(8)
127
128 #define VEND1_SYMBOL_ERROR_COUNTER 0x8350
129 #define VEND1_LINK_DROP_COUNTER 0x8352
130 #define VEND1_LINK_LOSSES_AND_FAILURES 0x8353
131 #define VEND1_RX_PREAMBLE_COUNT 0xAFCE
132 #define VEND1_TX_PREAMBLE_COUNT 0xAFCF
133 #define VEND1_RX_IPG_LENGTH 0xAFD0
134 #define VEND1_TX_IPG_LENGTH 0xAFD1
135 #define COUNTER_EN BIT(15)
136
137 #define VEND1_PTP_CONFIG 0x1102
138 #define EXT_TRG_EDGE BIT(1)
139
140 #define TJA1120_SYNC_TRIG_FILTER 0x1010
141 #define PTP_TRIG_RISE_TS BIT(3)
142 #define PTP_TRIG_FALLING_TS BIT(2)
143
144 #define CLK_RATE_ADJ_LD BIT(15)
145 #define CLK_RATE_ADJ_DIR BIT(14)
146
147 #define VEND1_RX_TS_INSRT_CTRL 0x114D
148 #define TJA1103_RX_TS_INSRT_MODE2 0x02
149
150 #define TJA1120_RX_TS_INSRT_CTRL 0x9012
151 #define TJA1120_RX_TS_INSRT_EN BIT(15)
152 #define TJA1120_TS_INSRT_MODE BIT(4)
153
154 #define VEND1_EGR_RING_DATA_0 0x114E
155 #define VEND1_EGR_RING_CTRL 0x1154
156
157 #define RING_DATA_0_TS_VALID BIT(15)
158
159 #define RING_DONE BIT(0)
160
161 #define TS_SEC_MASK GENMASK(1, 0)
162
163 #define PTP_ENABLE BIT(3)
164 #define PHY_TEST_ENABLE BIT(0)
165
166 #define VEND1_PORT_PTP_CONTROL 0x9000
167 #define PORT_PTP_CONTROL_BYPASS BIT(11)
168
169 #define PTP_CLK_PERIOD_100BT1 15ULL
170 #define PTP_CLK_PERIOD_1000BT1 8ULL
171
172 #define EVENT_MSG_FILT_ALL 0x0F
173 #define EVENT_MSG_FILT_NONE 0x00
174
175 #define VEND1_GPIO_FUNC_CONFIG_BASE 0x2C40
176 #define GPIO_FUNC_EN BIT(15)
177 #define GPIO_FUNC_PTP BIT(6)
178 #define GPIO_SIGNAL_PTP_TRIGGER 0x01
179 #define GPIO_SIGNAL_PPS_OUT 0x12
180 #define GPIO_DISABLE 0
181 #define GPIO_PPS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
182 GPIO_SIGNAL_PPS_OUT)
183 #define GPIO_EXTTS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
184 GPIO_SIGNAL_PTP_TRIGGER)
185
186 #define RGMII_PERIOD_PS 8000U
187 #define PS_PER_DEGREE div_u64(RGMII_PERIOD_PS, 360)
188 #define MIN_ID_PS 1644U
189 #define MAX_ID_PS 2260U
190 #define DEFAULT_ID_PS 2000U
191
192 #define PPM_TO_SUBNS_INC(ppb, ptp_clk_period) div_u64(GENMASK_ULL(31, 0) * \
193 (ppb) * (ptp_clk_period), NSEC_PER_SEC)
194
195 #define NXP_C45_SKB_CB(skb) ((struct nxp_c45_skb_cb *)(skb)->cb)
196
197 #define TJA11XX_REVERSE_MODE BIT(0)
198
199 struct nxp_c45_phy;
200
201 struct nxp_c45_skb_cb {
202 struct ptp_header *header;
203 unsigned int type;
204 };
205
206 #define NXP_C45_REG_FIELD(_reg, _devad, _offset, _size) \
207 ((struct nxp_c45_reg_field) { \
208 .reg = _reg, \
209 .devad = _devad, \
210 .offset = _offset, \
211 .size = _size, \
212 })
213
214 struct nxp_c45_reg_field {
215 u16 reg;
216 u8 devad;
217 u8 offset;
218 u8 size;
219 };
220
221 struct nxp_c45_hwts {
222 u32 nsec;
223 u32 sec;
224 u8 domain_number;
225 u16 sequence_id;
226 u8 msg_type;
227 };
228
229 struct nxp_c45_regmap {
230 /* PTP config regs. */
231 u16 vend1_ptp_clk_period;
232 u16 vend1_event_msg_filt;
233
234 /* LTC bits and regs. */
235 struct nxp_c45_reg_field ltc_read;
236 struct nxp_c45_reg_field ltc_write;
237 struct nxp_c45_reg_field ltc_lock_ctrl;
238 u16 vend1_ltc_wr_nsec_0;
239 u16 vend1_ltc_wr_nsec_1;
240 u16 vend1_ltc_wr_sec_0;
241 u16 vend1_ltc_wr_sec_1;
242 u16 vend1_ltc_rd_nsec_0;
243 u16 vend1_ltc_rd_nsec_1;
244 u16 vend1_ltc_rd_sec_0;
245 u16 vend1_ltc_rd_sec_1;
246 u16 vend1_rate_adj_subns_0;
247 u16 vend1_rate_adj_subns_1;
248
249 /* External trigger reg fields. */
250 struct nxp_c45_reg_field irq_egr_ts_en;
251 struct nxp_c45_reg_field irq_egr_ts_status;
252 struct nxp_c45_reg_field domain_number;
253 struct nxp_c45_reg_field msg_type;
254 struct nxp_c45_reg_field sequence_id;
255 struct nxp_c45_reg_field sec_1_0;
256 struct nxp_c45_reg_field sec_4_2;
257 struct nxp_c45_reg_field nsec_15_0;
258 struct nxp_c45_reg_field nsec_29_16;
259
260 /* PPS and EXT Trigger bits and regs. */
261 struct nxp_c45_reg_field pps_enable;
262 struct nxp_c45_reg_field pps_polarity;
263 u16 vend1_ext_trg_data_0;
264 u16 vend1_ext_trg_data_1;
265 u16 vend1_ext_trg_data_2;
266 u16 vend1_ext_trg_data_3;
267 u16 vend1_ext_trg_ctrl;
268
269 /* Cable test reg fields. */
270 u16 cable_test;
271 struct nxp_c45_reg_field cable_test_valid;
272 struct nxp_c45_reg_field cable_test_result;
273 };
274
275 struct nxp_c45_phy_stats {
276 const char *name;
277 const struct nxp_c45_reg_field counter;
278 };
279
280 struct nxp_c45_phy_data {
281 const struct nxp_c45_regmap *regmap;
282 const struct nxp_c45_phy_stats *stats;
283 int n_stats;
284 u8 ptp_clk_period;
285 bool ext_ts_both_edges;
286 bool ack_ptp_irq;
287 void (*counters_enable)(struct phy_device *phydev);
288 bool (*get_egressts)(struct nxp_c45_phy *priv,
289 struct nxp_c45_hwts *hwts);
290 bool (*get_extts)(struct nxp_c45_phy *priv, struct timespec64 *extts);
291 void (*ptp_init)(struct phy_device *phydev);
292 void (*ptp_enable)(struct phy_device *phydev, bool enable);
293 void (*nmi_handler)(struct phy_device *phydev,
294 irqreturn_t *irq_status);
295 };
296
297 static const
nxp_c45_get_data(struct phy_device * phydev)298 struct nxp_c45_phy_data *nxp_c45_get_data(struct phy_device *phydev)
299 {
300 return phydev->drv->driver_data;
301 }
302
303 static const
nxp_c45_get_regmap(struct phy_device * phydev)304 struct nxp_c45_regmap *nxp_c45_get_regmap(struct phy_device *phydev)
305 {
306 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
307
308 return phy_data->regmap;
309 }
310
nxp_c45_read_reg_field(struct phy_device * phydev,const struct nxp_c45_reg_field * reg_field)311 static int nxp_c45_read_reg_field(struct phy_device *phydev,
312 const struct nxp_c45_reg_field *reg_field)
313 {
314 u16 mask;
315 int ret;
316
317 if (reg_field->size == 0) {
318 phydev_err(phydev, "Trying to read a reg field of size 0.\n");
319 return -EINVAL;
320 }
321
322 ret = phy_read_mmd(phydev, reg_field->devad, reg_field->reg);
323 if (ret < 0)
324 return ret;
325
326 mask = reg_field->size == 1 ? BIT(reg_field->offset) :
327 GENMASK(reg_field->offset + reg_field->size - 1,
328 reg_field->offset);
329 ret &= mask;
330 ret >>= reg_field->offset;
331
332 return ret;
333 }
334
nxp_c45_write_reg_field(struct phy_device * phydev,const struct nxp_c45_reg_field * reg_field,u16 val)335 static int nxp_c45_write_reg_field(struct phy_device *phydev,
336 const struct nxp_c45_reg_field *reg_field,
337 u16 val)
338 {
339 u16 mask;
340 u16 set;
341
342 if (reg_field->size == 0) {
343 phydev_err(phydev, "Trying to write a reg field of size 0.\n");
344 return -EINVAL;
345 }
346
347 mask = reg_field->size == 1 ? BIT(reg_field->offset) :
348 GENMASK(reg_field->offset + reg_field->size - 1,
349 reg_field->offset);
350 set = val << reg_field->offset;
351
352 return phy_modify_mmd_changed(phydev, reg_field->devad,
353 reg_field->reg, mask, set);
354 }
355
nxp_c45_set_reg_field(struct phy_device * phydev,const struct nxp_c45_reg_field * reg_field)356 static int nxp_c45_set_reg_field(struct phy_device *phydev,
357 const struct nxp_c45_reg_field *reg_field)
358 {
359 if (reg_field->size != 1) {
360 phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
361 return -EINVAL;
362 }
363
364 return nxp_c45_write_reg_field(phydev, reg_field, 1);
365 }
366
nxp_c45_clear_reg_field(struct phy_device * phydev,const struct nxp_c45_reg_field * reg_field)367 static int nxp_c45_clear_reg_field(struct phy_device *phydev,
368 const struct nxp_c45_reg_field *reg_field)
369 {
370 if (reg_field->size != 1) {
371 phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
372 return -EINVAL;
373 }
374
375 return nxp_c45_write_reg_field(phydev, reg_field, 0);
376 }
377
nxp_c45_poll_txts(struct phy_device * phydev)378 static bool nxp_c45_poll_txts(struct phy_device *phydev)
379 {
380 return phydev->irq <= 0;
381 }
382
_nxp_c45_ptp_gettimex64(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)383 static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
384 struct timespec64 *ts,
385 struct ptp_system_timestamp *sts)
386 {
387 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
388 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
389
390 nxp_c45_set_reg_field(priv->phydev, ®map->ltc_read);
391 ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
392 regmap->vend1_ltc_rd_nsec_0);
393 ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
394 regmap->vend1_ltc_rd_nsec_1) << 16;
395 ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
396 regmap->vend1_ltc_rd_sec_0);
397 ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
398 regmap->vend1_ltc_rd_sec_1) << 16;
399
400 return 0;
401 }
402
nxp_c45_ptp_gettimex64(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)403 static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
404 struct timespec64 *ts,
405 struct ptp_system_timestamp *sts)
406 {
407 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
408
409 mutex_lock(&priv->ptp_lock);
410 _nxp_c45_ptp_gettimex64(ptp, ts, sts);
411 mutex_unlock(&priv->ptp_lock);
412
413 return 0;
414 }
415
_nxp_c45_ptp_settime64(struct ptp_clock_info * ptp,const struct timespec64 * ts)416 static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
417 const struct timespec64 *ts)
418 {
419 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
420 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
421
422 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_0,
423 ts->tv_nsec);
424 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_1,
425 ts->tv_nsec >> 16);
426 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_0,
427 ts->tv_sec);
428 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_1,
429 ts->tv_sec >> 16);
430 nxp_c45_set_reg_field(priv->phydev, ®map->ltc_write);
431
432 return 0;
433 }
434
nxp_c45_ptp_settime64(struct ptp_clock_info * ptp,const struct timespec64 * ts)435 static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
436 const struct timespec64 *ts)
437 {
438 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
439
440 mutex_lock(&priv->ptp_lock);
441 _nxp_c45_ptp_settime64(ptp, ts);
442 mutex_unlock(&priv->ptp_lock);
443
444 return 0;
445 }
446
nxp_c45_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)447 static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
448 {
449 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
450 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
451 const struct nxp_c45_regmap *regmap = data->regmap;
452 s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
453 u64 subns_inc_val;
454 bool inc;
455
456 mutex_lock(&priv->ptp_lock);
457 inc = ppb >= 0;
458 ppb = abs(ppb);
459
460 subns_inc_val = PPM_TO_SUBNS_INC(ppb, data->ptp_clk_period);
461
462 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
463 regmap->vend1_rate_adj_subns_0,
464 subns_inc_val);
465 subns_inc_val >>= 16;
466 subns_inc_val |= CLK_RATE_ADJ_LD;
467 if (inc)
468 subns_inc_val |= CLK_RATE_ADJ_DIR;
469
470 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
471 regmap->vend1_rate_adj_subns_1,
472 subns_inc_val);
473 mutex_unlock(&priv->ptp_lock);
474
475 return 0;
476 }
477
nxp_c45_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)478 static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
479 {
480 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
481 struct timespec64 now, then;
482
483 mutex_lock(&priv->ptp_lock);
484 then = ns_to_timespec64(delta);
485 _nxp_c45_ptp_gettimex64(ptp, &now, NULL);
486 now = timespec64_add(now, then);
487 _nxp_c45_ptp_settime64(ptp, &now);
488 mutex_unlock(&priv->ptp_lock);
489
490 return 0;
491 }
492
nxp_c45_reconstruct_ts(struct timespec64 * ts,struct nxp_c45_hwts * hwts)493 static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
494 struct nxp_c45_hwts *hwts)
495 {
496 ts->tv_nsec = hwts->nsec;
497 if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
498 ts->tv_sec -= TS_SEC_MASK + 1;
499 ts->tv_sec &= ~TS_SEC_MASK;
500 ts->tv_sec |= hwts->sec & TS_SEC_MASK;
501 }
502
nxp_c45_match_ts(struct ptp_header * header,struct nxp_c45_hwts * hwts,unsigned int type)503 static bool nxp_c45_match_ts(struct ptp_header *header,
504 struct nxp_c45_hwts *hwts,
505 unsigned int type)
506 {
507 return ntohs(header->sequence_id) == hwts->sequence_id &&
508 ptp_get_msgtype(header, type) == hwts->msg_type &&
509 header->domain_number == hwts->domain_number;
510 }
511
nxp_c45_get_extts(struct nxp_c45_phy * priv,struct timespec64 * extts)512 static bool nxp_c45_get_extts(struct nxp_c45_phy *priv,
513 struct timespec64 *extts)
514 {
515 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
516
517 extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
518 regmap->vend1_ext_trg_data_0);
519 extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
520 regmap->vend1_ext_trg_data_1) << 16;
521 extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
522 regmap->vend1_ext_trg_data_2);
523 extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
524 regmap->vend1_ext_trg_data_3) << 16;
525 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
526 regmap->vend1_ext_trg_ctrl, RING_DONE);
527
528 return true;
529 }
530
tja1120_extts_is_valid(struct phy_device * phydev)531 static bool tja1120_extts_is_valid(struct phy_device *phydev)
532 {
533 bool valid;
534 int reg;
535
536 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
537 TJA1120_VEND1_PTP_TRIG_DATA_S);
538 valid = !!(reg & TJA1120_TS_VALID);
539
540 return valid;
541 }
542
tja1120_get_extts(struct nxp_c45_phy * priv,struct timespec64 * extts)543 static bool tja1120_get_extts(struct nxp_c45_phy *priv,
544 struct timespec64 *extts)
545 {
546 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
547 struct phy_device *phydev = priv->phydev;
548 bool more_ts;
549 bool valid;
550 u16 reg;
551
552 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
553 regmap->vend1_ext_trg_ctrl);
554 more_ts = !!(reg & TJA1120_MORE_TS);
555
556 valid = tja1120_extts_is_valid(phydev);
557 if (!valid) {
558 if (!more_ts)
559 goto tja1120_get_extts_out;
560
561 /* Bug workaround for TJA1120 engineering samples: move the new
562 * timestamp from the FIFO to the buffer.
563 */
564 phy_write_mmd(phydev, MDIO_MMD_VEND1,
565 regmap->vend1_ext_trg_ctrl, RING_DONE);
566 valid = tja1120_extts_is_valid(phydev);
567 if (!valid)
568 goto tja1120_get_extts_out;
569 }
570
571 nxp_c45_get_extts(priv, extts);
572 tja1120_get_extts_out:
573 return valid;
574 }
575
nxp_c45_read_egress_ts(struct nxp_c45_phy * priv,struct nxp_c45_hwts * hwts)576 static void nxp_c45_read_egress_ts(struct nxp_c45_phy *priv,
577 struct nxp_c45_hwts *hwts)
578 {
579 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
580 struct phy_device *phydev = priv->phydev;
581
582 hwts->domain_number =
583 nxp_c45_read_reg_field(phydev, ®map->domain_number);
584 hwts->msg_type =
585 nxp_c45_read_reg_field(phydev, ®map->msg_type);
586 hwts->sequence_id =
587 nxp_c45_read_reg_field(phydev, ®map->sequence_id);
588 hwts->nsec =
589 nxp_c45_read_reg_field(phydev, ®map->nsec_15_0);
590 hwts->nsec |=
591 nxp_c45_read_reg_field(phydev, ®map->nsec_29_16) << 16;
592 hwts->sec = nxp_c45_read_reg_field(phydev, ®map->sec_1_0);
593 hwts->sec |= nxp_c45_read_reg_field(phydev, ®map->sec_4_2) << 2;
594 }
595
nxp_c45_get_hwtxts(struct nxp_c45_phy * priv,struct nxp_c45_hwts * hwts)596 static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
597 struct nxp_c45_hwts *hwts)
598 {
599 bool valid;
600 u16 reg;
601
602 mutex_lock(&priv->ptp_lock);
603 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
604 RING_DONE);
605 reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
606 valid = !!(reg & RING_DATA_0_TS_VALID);
607 if (!valid)
608 goto nxp_c45_get_hwtxts_out;
609
610 nxp_c45_read_egress_ts(priv, hwts);
611 nxp_c45_get_hwtxts_out:
612 mutex_unlock(&priv->ptp_lock);
613 return valid;
614 }
615
tja1120_egress_ts_is_valid(struct phy_device * phydev)616 static bool tja1120_egress_ts_is_valid(struct phy_device *phydev)
617 {
618 bool valid;
619 u16 reg;
620
621 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S);
622 valid = !!(reg & TJA1120_TS_VALID);
623
624 return valid;
625 }
626
tja1120_get_hwtxts(struct nxp_c45_phy * priv,struct nxp_c45_hwts * hwts)627 static bool tja1120_get_hwtxts(struct nxp_c45_phy *priv,
628 struct nxp_c45_hwts *hwts)
629 {
630 struct phy_device *phydev = priv->phydev;
631 bool more_ts;
632 bool valid;
633 u16 reg;
634
635 mutex_lock(&priv->ptp_lock);
636 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_END);
637 more_ts = !!(reg & TJA1120_MORE_TS);
638 valid = tja1120_egress_ts_is_valid(phydev);
639 if (!valid) {
640 if (!more_ts)
641 goto tja1120_get_hwtxts_out;
642
643 /* Bug workaround for TJA1120 engineering samples: move the
644 * new timestamp from the FIFO to the buffer.
645 */
646 phy_write_mmd(phydev, MDIO_MMD_VEND1,
647 TJA1120_EGRESS_TS_END, TJA1120_TS_VALID);
648 valid = tja1120_egress_ts_is_valid(phydev);
649 if (!valid)
650 goto tja1120_get_hwtxts_out;
651 }
652 nxp_c45_read_egress_ts(priv, hwts);
653 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S,
654 TJA1120_TS_VALID);
655 tja1120_get_hwtxts_out:
656 mutex_unlock(&priv->ptp_lock);
657 return valid;
658 }
659
nxp_c45_process_txts(struct nxp_c45_phy * priv,struct nxp_c45_hwts * txts)660 static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
661 struct nxp_c45_hwts *txts)
662 {
663 struct sk_buff *skb, *tmp, *skb_match = NULL;
664 struct skb_shared_hwtstamps shhwtstamps;
665 struct timespec64 ts;
666 unsigned long flags;
667 bool ts_match;
668 s64 ts_ns;
669
670 spin_lock_irqsave(&priv->tx_queue.lock, flags);
671 skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
672 ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
673 NXP_C45_SKB_CB(skb)->type);
674 if (!ts_match)
675 continue;
676 skb_match = skb;
677 __skb_unlink(skb, &priv->tx_queue);
678 break;
679 }
680 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
681
682 if (skb_match) {
683 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
684 nxp_c45_reconstruct_ts(&ts, txts);
685 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
686 ts_ns = timespec64_to_ns(&ts);
687 shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
688 skb_complete_tx_timestamp(skb_match, &shhwtstamps);
689 } else {
690 phydev_warn(priv->phydev,
691 "the tx timestamp doesn't match with any skb\n");
692 }
693 }
694
nxp_c45_do_aux_work(struct ptp_clock_info * ptp)695 static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
696 {
697 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
698 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
699 bool poll_txts = nxp_c45_poll_txts(priv->phydev);
700 struct skb_shared_hwtstamps *shhwtstamps_rx;
701 struct ptp_clock_event event;
702 struct nxp_c45_hwts hwts;
703 bool reschedule = false;
704 struct timespec64 ts;
705 struct sk_buff *skb;
706 bool ts_valid;
707 u32 ts_raw;
708
709 while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
710 ts_valid = data->get_egressts(priv, &hwts);
711 if (unlikely(!ts_valid)) {
712 /* Still more skbs in the queue */
713 reschedule = true;
714 break;
715 }
716
717 nxp_c45_process_txts(priv, &hwts);
718 }
719
720 while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
721 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
722 ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
723 hwts.sec = ts_raw >> 30;
724 hwts.nsec = ts_raw & GENMASK(29, 0);
725 nxp_c45_reconstruct_ts(&ts, &hwts);
726 shhwtstamps_rx = skb_hwtstamps(skb);
727 shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
728 NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
729 netif_rx(skb);
730 }
731
732 if (priv->extts) {
733 ts_valid = data->get_extts(priv, &ts);
734 if (ts_valid && timespec64_compare(&ts, &priv->extts_ts) != 0) {
735 priv->extts_ts = ts;
736 event.index = priv->extts_index;
737 event.type = PTP_CLOCK_EXTTS;
738 event.timestamp = ns_to_ktime(timespec64_to_ns(&ts));
739 ptp_clock_event(priv->ptp_clock, &event);
740 }
741 reschedule = true;
742 }
743
744 return reschedule ? 1 : -1;
745 }
746
nxp_c45_gpio_config(struct nxp_c45_phy * priv,int pin,u16 pin_cfg)747 static void nxp_c45_gpio_config(struct nxp_c45_phy *priv,
748 int pin, u16 pin_cfg)
749 {
750 struct phy_device *phydev = priv->phydev;
751
752 phy_write_mmd(phydev, MDIO_MMD_VEND1,
753 VEND1_GPIO_FUNC_CONFIG_BASE + pin, pin_cfg);
754 }
755
nxp_c45_perout_enable(struct nxp_c45_phy * priv,struct ptp_perout_request * perout,int on)756 static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
757 struct ptp_perout_request *perout, int on)
758 {
759 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
760 struct phy_device *phydev = priv->phydev;
761 int pin;
762
763 if (perout->flags & ~PTP_PEROUT_PHASE)
764 return -EOPNOTSUPP;
765
766 pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
767 if (pin < 0)
768 return pin;
769
770 if (!on) {
771 nxp_c45_clear_reg_field(priv->phydev,
772 ®map->pps_enable);
773 nxp_c45_clear_reg_field(priv->phydev,
774 ®map->pps_polarity);
775
776 nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
777
778 return 0;
779 }
780
781 /* The PPS signal is fixed to 1 second and is always generated when the
782 * seconds counter is incremented. The start time is not configurable.
783 * If the clock is adjusted, the PPS signal is automatically readjusted.
784 */
785 if (perout->period.sec != 1 || perout->period.nsec != 0) {
786 phydev_warn(phydev, "The period can be set only to 1 second.");
787 return -EINVAL;
788 }
789
790 if (!(perout->flags & PTP_PEROUT_PHASE)) {
791 if (perout->start.sec != 0 || perout->start.nsec != 0) {
792 phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds.");
793 return -EINVAL;
794 }
795 } else {
796 if (perout->phase.nsec != 0 &&
797 perout->phase.nsec != (NSEC_PER_SEC >> 1)) {
798 phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds.");
799 return -EINVAL;
800 }
801
802 if (perout->phase.nsec == 0)
803 nxp_c45_clear_reg_field(priv->phydev,
804 ®map->pps_polarity);
805 else
806 nxp_c45_set_reg_field(priv->phydev,
807 ®map->pps_polarity);
808 }
809
810 nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG);
811
812 nxp_c45_set_reg_field(priv->phydev, ®map->pps_enable);
813
814 return 0;
815 }
816
nxp_c45_set_rising_or_falling(struct phy_device * phydev,struct ptp_extts_request * extts)817 static void nxp_c45_set_rising_or_falling(struct phy_device *phydev,
818 struct ptp_extts_request *extts)
819 {
820 if (extts->flags & PTP_RISING_EDGE)
821 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
822 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
823
824 if (extts->flags & PTP_FALLING_EDGE)
825 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
826 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
827 }
828
nxp_c45_set_rising_and_falling(struct phy_device * phydev,struct ptp_extts_request * extts)829 static void nxp_c45_set_rising_and_falling(struct phy_device *phydev,
830 struct ptp_extts_request *extts)
831 {
832 /* PTP_EXTTS_REQUEST may have only the PTP_ENABLE_FEATURE flag set. In
833 * this case external ts will be enabled on rising edge.
834 */
835 if (extts->flags & PTP_RISING_EDGE ||
836 extts->flags == PTP_ENABLE_FEATURE)
837 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
838 TJA1120_SYNC_TRIG_FILTER,
839 PTP_TRIG_RISE_TS);
840 else
841 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
842 TJA1120_SYNC_TRIG_FILTER,
843 PTP_TRIG_RISE_TS);
844
845 if (extts->flags & PTP_FALLING_EDGE)
846 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
847 TJA1120_SYNC_TRIG_FILTER,
848 PTP_TRIG_FALLING_TS);
849 else
850 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
851 TJA1120_SYNC_TRIG_FILTER,
852 PTP_TRIG_FALLING_TS);
853 }
854
nxp_c45_extts_enable(struct nxp_c45_phy * priv,struct ptp_extts_request * extts,int on)855 static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
856 struct ptp_extts_request *extts, int on)
857 {
858 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
859 int pin;
860
861 if (extts->flags & ~(PTP_ENABLE_FEATURE |
862 PTP_RISING_EDGE |
863 PTP_FALLING_EDGE |
864 PTP_STRICT_FLAGS))
865 return -EOPNOTSUPP;
866
867 /* Sampling on both edges is not supported */
868 if ((extts->flags & PTP_RISING_EDGE) &&
869 (extts->flags & PTP_FALLING_EDGE) &&
870 !data->ext_ts_both_edges)
871 return -EOPNOTSUPP;
872
873 pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index);
874 if (pin < 0)
875 return pin;
876
877 if (!on) {
878 nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
879 priv->extts = false;
880
881 return 0;
882 }
883
884 if (data->ext_ts_both_edges)
885 nxp_c45_set_rising_and_falling(priv->phydev, extts);
886 else
887 nxp_c45_set_rising_or_falling(priv->phydev, extts);
888
889 nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG);
890 priv->extts = true;
891 priv->extts_index = extts->index;
892 ptp_schedule_worker(priv->ptp_clock, 0);
893
894 return 0;
895 }
896
nxp_c45_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * req,int on)897 static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp,
898 struct ptp_clock_request *req, int on)
899 {
900 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
901
902 switch (req->type) {
903 case PTP_CLK_REQ_EXTTS:
904 return nxp_c45_extts_enable(priv, &req->extts, on);
905 case PTP_CLK_REQ_PEROUT:
906 return nxp_c45_perout_enable(priv, &req->perout, on);
907 default:
908 return -EOPNOTSUPP;
909 }
910 }
911
912 static struct ptp_pin_desc nxp_c45_ptp_pins[] = {
913 { "nxp_c45_gpio0", 0, PTP_PF_NONE},
914 { "nxp_c45_gpio1", 1, PTP_PF_NONE},
915 { "nxp_c45_gpio2", 2, PTP_PF_NONE},
916 { "nxp_c45_gpio3", 3, PTP_PF_NONE},
917 { "nxp_c45_gpio4", 4, PTP_PF_NONE},
918 { "nxp_c45_gpio5", 5, PTP_PF_NONE},
919 { "nxp_c45_gpio6", 6, PTP_PF_NONE},
920 { "nxp_c45_gpio7", 7, PTP_PF_NONE},
921 { "nxp_c45_gpio8", 8, PTP_PF_NONE},
922 { "nxp_c45_gpio9", 9, PTP_PF_NONE},
923 { "nxp_c45_gpio10", 10, PTP_PF_NONE},
924 { "nxp_c45_gpio11", 11, PTP_PF_NONE},
925 };
926
nxp_c45_ptp_verify_pin(struct ptp_clock_info * ptp,unsigned int pin,enum ptp_pin_function func,unsigned int chan)927 static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
928 enum ptp_pin_function func, unsigned int chan)
929 {
930 if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins))
931 return -EINVAL;
932
933 switch (func) {
934 case PTP_PF_NONE:
935 case PTP_PF_PEROUT:
936 case PTP_PF_EXTTS:
937 break;
938 default:
939 return -EOPNOTSUPP;
940 }
941
942 return 0;
943 }
944
nxp_c45_init_ptp_clock(struct nxp_c45_phy * priv)945 static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
946 {
947 priv->caps = (struct ptp_clock_info) {
948 .owner = THIS_MODULE,
949 .name = "NXP C45 PHC",
950 .max_adj = 16666666,
951 .adjfine = nxp_c45_ptp_adjfine,
952 .adjtime = nxp_c45_ptp_adjtime,
953 .gettimex64 = nxp_c45_ptp_gettimex64,
954 .settime64 = nxp_c45_ptp_settime64,
955 .enable = nxp_c45_ptp_enable,
956 .verify = nxp_c45_ptp_verify_pin,
957 .do_aux_work = nxp_c45_do_aux_work,
958 .pin_config = nxp_c45_ptp_pins,
959 .n_pins = ARRAY_SIZE(nxp_c45_ptp_pins),
960 .n_ext_ts = 1,
961 .n_per_out = 1,
962 };
963
964 priv->ptp_clock = ptp_clock_register(&priv->caps,
965 &priv->phydev->mdio.dev);
966
967 if (IS_ERR(priv->ptp_clock))
968 return PTR_ERR(priv->ptp_clock);
969
970 if (!priv->ptp_clock)
971 return -ENOMEM;
972
973 return 0;
974 }
975
nxp_c45_txtstamp(struct mii_timestamper * mii_ts,struct sk_buff * skb,int type)976 static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
977 struct sk_buff *skb, int type)
978 {
979 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
980 mii_ts);
981
982 switch (priv->hwts_tx) {
983 case HWTSTAMP_TX_ON:
984 NXP_C45_SKB_CB(skb)->type = type;
985 NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
986 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
987 skb_queue_tail(&priv->tx_queue, skb);
988 if (nxp_c45_poll_txts(priv->phydev))
989 ptp_schedule_worker(priv->ptp_clock, 0);
990 break;
991 case HWTSTAMP_TX_OFF:
992 default:
993 kfree_skb(skb);
994 break;
995 }
996 }
997
nxp_c45_rxtstamp(struct mii_timestamper * mii_ts,struct sk_buff * skb,int type)998 static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
999 struct sk_buff *skb, int type)
1000 {
1001 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1002 mii_ts);
1003 struct ptp_header *header = ptp_parse_header(skb, type);
1004
1005 if (!header)
1006 return false;
1007
1008 if (!priv->hwts_rx)
1009 return false;
1010
1011 NXP_C45_SKB_CB(skb)->header = header;
1012 skb_queue_tail(&priv->rx_queue, skb);
1013 ptp_schedule_worker(priv->ptp_clock, 0);
1014
1015 return true;
1016 }
1017
nxp_c45_hwtstamp(struct mii_timestamper * mii_ts,struct kernel_hwtstamp_config * cfg,struct netlink_ext_ack * extack)1018 static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
1019 struct kernel_hwtstamp_config *cfg,
1020 struct netlink_ext_ack *extack)
1021 {
1022 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1023 mii_ts);
1024 struct phy_device *phydev = priv->phydev;
1025 const struct nxp_c45_phy_data *data;
1026
1027 if (cfg->tx_type < 0 || cfg->tx_type > HWTSTAMP_TX_ON)
1028 return -ERANGE;
1029
1030 data = nxp_c45_get_data(phydev);
1031 priv->hwts_tx = cfg->tx_type;
1032
1033 switch (cfg->rx_filter) {
1034 case HWTSTAMP_FILTER_NONE:
1035 priv->hwts_rx = 0;
1036 break;
1037 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1038 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1039 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1040 priv->hwts_rx = 1;
1041 cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1042 break;
1043 default:
1044 return -ERANGE;
1045 }
1046
1047 if (priv->hwts_rx || priv->hwts_tx) {
1048 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1049 data->regmap->vend1_event_msg_filt,
1050 EVENT_MSG_FILT_ALL);
1051 data->ptp_enable(phydev, true);
1052 } else {
1053 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1054 data->regmap->vend1_event_msg_filt,
1055 EVENT_MSG_FILT_NONE);
1056 data->ptp_enable(phydev, false);
1057 }
1058
1059 if (nxp_c45_poll_txts(priv->phydev))
1060 goto nxp_c45_no_ptp_irq;
1061
1062 if (priv->hwts_tx)
1063 nxp_c45_set_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1064 else
1065 nxp_c45_clear_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1066
1067 nxp_c45_no_ptp_irq:
1068 return 0;
1069 }
1070
nxp_c45_ts_info(struct mii_timestamper * mii_ts,struct kernel_ethtool_ts_info * ts_info)1071 static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
1072 struct kernel_ethtool_ts_info *ts_info)
1073 {
1074 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1075 mii_ts);
1076
1077 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1078 SOF_TIMESTAMPING_RX_HARDWARE |
1079 SOF_TIMESTAMPING_RAW_HARDWARE;
1080 ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
1081 ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1082 ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1083 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
1084 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
1085 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
1086
1087 return 0;
1088 }
1089
1090 static const struct nxp_c45_phy_stats common_hw_stats[] = {
1091 { "phy_link_status_drop_cnt",
1092 NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 8, 6), },
1093 { "phy_link_availability_drop_cnt",
1094 NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 0, 6), },
1095 { "phy_link_loss_cnt",
1096 NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 10, 6), },
1097 { "phy_link_failure_cnt",
1098 NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 0, 10), },
1099 { "phy_symbol_error_cnt",
1100 NXP_C45_REG_FIELD(0x8350, MDIO_MMD_VEND1, 0, 16) },
1101 };
1102
1103 static const struct nxp_c45_phy_stats tja1103_hw_stats[] = {
1104 { "rx_preamble_count",
1105 NXP_C45_REG_FIELD(0xAFCE, MDIO_MMD_VEND1, 0, 6), },
1106 { "tx_preamble_count",
1107 NXP_C45_REG_FIELD(0xAFCF, MDIO_MMD_VEND1, 0, 6), },
1108 { "rx_ipg_length",
1109 NXP_C45_REG_FIELD(0xAFD0, MDIO_MMD_VEND1, 0, 9), },
1110 { "tx_ipg_length",
1111 NXP_C45_REG_FIELD(0xAFD1, MDIO_MMD_VEND1, 0, 9), },
1112 };
1113
1114 static const struct nxp_c45_phy_stats tja1120_hw_stats[] = {
1115 { "phy_symbol_error_cnt_ext",
1116 NXP_C45_REG_FIELD(0x8351, MDIO_MMD_VEND1, 0, 14) },
1117 { "tx_frames_xtd",
1118 NXP_C45_REG_FIELD(0xACA1, MDIO_MMD_VEND1, 0, 8), },
1119 { "tx_frames",
1120 NXP_C45_REG_FIELD(0xACA0, MDIO_MMD_VEND1, 0, 16), },
1121 { "rx_frames_xtd",
1122 NXP_C45_REG_FIELD(0xACA3, MDIO_MMD_VEND1, 0, 8), },
1123 { "rx_frames",
1124 NXP_C45_REG_FIELD(0xACA2, MDIO_MMD_VEND1, 0, 16), },
1125 { "tx_lost_frames_xtd",
1126 NXP_C45_REG_FIELD(0xACA5, MDIO_MMD_VEND1, 0, 8), },
1127 { "tx_lost_frames",
1128 NXP_C45_REG_FIELD(0xACA4, MDIO_MMD_VEND1, 0, 16), },
1129 { "rx_lost_frames_xtd",
1130 NXP_C45_REG_FIELD(0xACA7, MDIO_MMD_VEND1, 0, 8), },
1131 { "rx_lost_frames",
1132 NXP_C45_REG_FIELD(0xACA6, MDIO_MMD_VEND1, 0, 16), },
1133 };
1134
nxp_c45_get_sset_count(struct phy_device * phydev)1135 static int nxp_c45_get_sset_count(struct phy_device *phydev)
1136 {
1137 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1138
1139 return ARRAY_SIZE(common_hw_stats) + (phy_data ? phy_data->n_stats : 0);
1140 }
1141
nxp_c45_get_strings(struct phy_device * phydev,u8 * data)1142 static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
1143 {
1144 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1145 size_t count = nxp_c45_get_sset_count(phydev);
1146 size_t idx;
1147 size_t i;
1148
1149 for (i = 0; i < count; i++) {
1150 if (i < ARRAY_SIZE(common_hw_stats)) {
1151 ethtool_puts(&data, common_hw_stats[i].name);
1152 continue;
1153 }
1154 idx = i - ARRAY_SIZE(common_hw_stats);
1155 ethtool_puts(&data, phy_data->stats[idx].name);
1156 }
1157 }
1158
nxp_c45_get_stats(struct phy_device * phydev,struct ethtool_stats * stats,u64 * data)1159 static void nxp_c45_get_stats(struct phy_device *phydev,
1160 struct ethtool_stats *stats, u64 *data)
1161 {
1162 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1163 size_t count = nxp_c45_get_sset_count(phydev);
1164 const struct nxp_c45_reg_field *reg_field;
1165 size_t idx;
1166 size_t i;
1167 int ret;
1168
1169 for (i = 0; i < count; i++) {
1170 if (i < ARRAY_SIZE(common_hw_stats)) {
1171 reg_field = &common_hw_stats[i].counter;
1172 } else {
1173 idx = i - ARRAY_SIZE(common_hw_stats);
1174 reg_field = &phy_data->stats[idx].counter;
1175 }
1176
1177 ret = nxp_c45_read_reg_field(phydev, reg_field);
1178 if (ret < 0)
1179 data[i] = U64_MAX;
1180 else
1181 data[i] = ret;
1182 }
1183 }
1184
nxp_c45_config_enable(struct phy_device * phydev)1185 static int nxp_c45_config_enable(struct phy_device *phydev)
1186 {
1187 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1188 DEVICE_CONTROL_CONFIG_GLOBAL_EN |
1189 DEVICE_CONTROL_CONFIG_ALL_EN);
1190 usleep_range(400, 450);
1191
1192 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
1193 PORT_CONTROL_EN);
1194 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1195 PHY_CONFIG_EN);
1196 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
1197 PORT_INFRA_CONTROL_EN);
1198
1199 return 0;
1200 }
1201
nxp_c45_start_op(struct phy_device * phydev)1202 static int nxp_c45_start_op(struct phy_device *phydev)
1203 {
1204 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1205 PHY_START_OP);
1206 }
1207
nxp_c45_config_intr(struct phy_device * phydev)1208 static int nxp_c45_config_intr(struct phy_device *phydev)
1209 {
1210 int ret;
1211
1212 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
1213 ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1214 VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
1215 if (ret)
1216 return ret;
1217
1218 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1219 VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1220 }
1221
1222 ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1223 VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
1224 if (ret)
1225 return ret;
1226
1227 return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1228 VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1229 }
1230
tja1103_config_intr(struct phy_device * phydev)1231 static int tja1103_config_intr(struct phy_device *phydev)
1232 {
1233 int ret;
1234
1235 /* We can't disable the FUSA IRQ for TJA1103, but we can clean it up. */
1236 ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_ALWAYS_ACCESSIBLE,
1237 FUSA_PASS);
1238 if (ret)
1239 return ret;
1240
1241 return nxp_c45_config_intr(phydev);
1242 }
1243
tja1120_config_intr(struct phy_device * phydev)1244 static int tja1120_config_intr(struct phy_device *phydev)
1245 {
1246 int ret;
1247
1248 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
1249 ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1250 TJA1120_GLOBAL_INFRA_IRQ_EN,
1251 TJA1120_DEV_BOOT_DONE);
1252 else
1253 ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1254 TJA1120_GLOBAL_INFRA_IRQ_EN,
1255 TJA1120_DEV_BOOT_DONE);
1256 if (ret)
1257 return ret;
1258
1259 return nxp_c45_config_intr(phydev);
1260 }
1261
nxp_c45_handle_interrupt(struct phy_device * phydev)1262 static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
1263 {
1264 const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1265 struct nxp_c45_phy *priv = phydev->priv;
1266 irqreturn_t ret = IRQ_NONE;
1267 struct nxp_c45_hwts hwts;
1268 int irq;
1269
1270 irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
1271 if (irq & PHY_IRQ_LINK_EVENT) {
1272 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
1273 PHY_IRQ_LINK_EVENT);
1274 phy_trigger_machine(phydev);
1275 ret = IRQ_HANDLED;
1276 }
1277
1278 irq = nxp_c45_read_reg_field(phydev, &data->regmap->irq_egr_ts_status);
1279 if (irq) {
1280 /* If ack_ptp_irq is false, the IRQ bit is self-clear and will
1281 * be cleared when the EGR TS FIFO is empty. Otherwise, the
1282 * IRQ bit should be cleared before reading the timestamp,
1283 */
1284 if (data->ack_ptp_irq)
1285 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1286 VEND1_PTP_IRQ_ACK, EGR_TS_IRQ);
1287 while (data->get_egressts(priv, &hwts))
1288 nxp_c45_process_txts(priv, &hwts);
1289
1290 ret = IRQ_HANDLED;
1291 }
1292
1293 data->nmi_handler(phydev, &ret);
1294 nxp_c45_handle_macsec_interrupt(phydev, &ret);
1295
1296 return ret;
1297 }
1298
nxp_c45_soft_reset(struct phy_device * phydev)1299 static int nxp_c45_soft_reset(struct phy_device *phydev)
1300 {
1301 int ret;
1302
1303 ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1304 DEVICE_CONTROL_RESET);
1305 if (ret)
1306 return ret;
1307
1308 usleep_range(2000, 2050);
1309
1310 return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
1311 VEND1_DEVICE_CONTROL, ret,
1312 !(ret & DEVICE_CONTROL_RESET), 20000,
1313 240000, false);
1314 }
1315
nxp_c45_cable_test_start(struct phy_device * phydev)1316 static int nxp_c45_cable_test_start(struct phy_device *phydev)
1317 {
1318 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1319
1320 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1321 VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1322 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1323 CABLE_TEST_ENABLE | CABLE_TEST_START);
1324 }
1325
nxp_c45_cable_test_get_status(struct phy_device * phydev,bool * finished)1326 static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
1327 bool *finished)
1328 {
1329 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1330 int ret;
1331 u8 cable_test_result;
1332
1333 ret = nxp_c45_read_reg_field(phydev, ®map->cable_test_valid);
1334 if (!ret) {
1335 *finished = false;
1336 return 0;
1337 }
1338
1339 *finished = true;
1340 cable_test_result = nxp_c45_read_reg_field(phydev,
1341 ®map->cable_test_result);
1342
1343 switch (cable_test_result) {
1344 case CABLE_TEST_OK:
1345 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1346 ETHTOOL_A_CABLE_RESULT_CODE_OK);
1347 break;
1348 case CABLE_TEST_SHORTED:
1349 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1350 ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
1351 break;
1352 case CABLE_TEST_OPEN:
1353 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1354 ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
1355 break;
1356 default:
1357 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1358 ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
1359 }
1360
1361 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1362 CABLE_TEST_ENABLE);
1363 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1364 VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1365
1366 return nxp_c45_start_op(phydev);
1367 }
1368
nxp_c45_get_sqi(struct phy_device * phydev)1369 static int nxp_c45_get_sqi(struct phy_device *phydev)
1370 {
1371 int reg;
1372
1373 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
1374 if (!(reg & SQI_VALID))
1375 return -EINVAL;
1376
1377 reg &= SQI_MASK;
1378
1379 return reg;
1380 }
1381
tja1120_link_change_notify(struct phy_device * phydev)1382 static void tja1120_link_change_notify(struct phy_device *phydev)
1383 {
1384 /* Bug workaround for TJA1120 enegineering samples: fix egress
1385 * timestamps lost after link recovery.
1386 */
1387 if (phydev->state == PHY_NOLINK) {
1388 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1389 TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1390 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1391 TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1392 }
1393 }
1394
nxp_c45_get_sqi_max(struct phy_device * phydev)1395 static int nxp_c45_get_sqi_max(struct phy_device *phydev)
1396 {
1397 return MAX_SQI;
1398 }
1399
nxp_c45_check_delay(struct phy_device * phydev,u32 delay)1400 static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
1401 {
1402 if (delay < MIN_ID_PS) {
1403 phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
1404 return -EINVAL;
1405 }
1406
1407 if (delay > MAX_ID_PS) {
1408 phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
1409 return -EINVAL;
1410 }
1411
1412 return 0;
1413 }
1414
nxp_c45_counters_enable(struct phy_device * phydev)1415 static void nxp_c45_counters_enable(struct phy_device *phydev)
1416 {
1417 const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1418
1419 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
1420 COUNTER_EN);
1421
1422 data->counters_enable(phydev);
1423 }
1424
nxp_c45_ptp_init(struct phy_device * phydev)1425 static void nxp_c45_ptp_init(struct phy_device *phydev)
1426 {
1427 const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1428
1429 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1430 data->regmap->vend1_ptp_clk_period,
1431 data->ptp_clk_period);
1432 nxp_c45_clear_reg_field(phydev, &data->regmap->ltc_lock_ctrl);
1433
1434 data->ptp_init(phydev);
1435 }
1436
nxp_c45_get_phase_shift(u64 phase_offset_raw)1437 static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
1438 {
1439 /* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
1440 * To avoid floating point operations we'll multiply by 10
1441 * and get 1 decimal point precision.
1442 */
1443 phase_offset_raw *= 10;
1444 phase_offset_raw -= 738;
1445 return div_u64(phase_offset_raw, 9);
1446 }
1447
nxp_c45_disable_delays(struct phy_device * phydev)1448 static void nxp_c45_disable_delays(struct phy_device *phydev)
1449 {
1450 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
1451 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
1452 }
1453
nxp_c45_set_delays(struct phy_device * phydev)1454 static void nxp_c45_set_delays(struct phy_device *phydev)
1455 {
1456 struct nxp_c45_phy *priv = phydev->priv;
1457 u64 tx_delay = priv->tx_delay;
1458 u64 rx_delay = priv->rx_delay;
1459 u64 degree;
1460
1461 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1462 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1463 degree = div_u64(tx_delay, PS_PER_DEGREE);
1464 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1465 ID_ENABLE | nxp_c45_get_phase_shift(degree));
1466 } else {
1467 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1468 ID_ENABLE);
1469 }
1470
1471 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1472 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1473 degree = div_u64(rx_delay, PS_PER_DEGREE);
1474 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1475 ID_ENABLE | nxp_c45_get_phase_shift(degree));
1476 } else {
1477 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1478 ID_ENABLE);
1479 }
1480 }
1481
nxp_c45_get_delays(struct phy_device * phydev)1482 static int nxp_c45_get_delays(struct phy_device *phydev)
1483 {
1484 struct nxp_c45_phy *priv = phydev->priv;
1485 int ret;
1486
1487 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1488 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1489 ret = device_property_read_u32(&phydev->mdio.dev,
1490 "tx-internal-delay-ps",
1491 &priv->tx_delay);
1492 if (ret)
1493 priv->tx_delay = DEFAULT_ID_PS;
1494
1495 ret = nxp_c45_check_delay(phydev, priv->tx_delay);
1496 if (ret) {
1497 phydev_err(phydev,
1498 "tx-internal-delay-ps invalid value\n");
1499 return ret;
1500 }
1501 }
1502
1503 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1504 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1505 ret = device_property_read_u32(&phydev->mdio.dev,
1506 "rx-internal-delay-ps",
1507 &priv->rx_delay);
1508 if (ret)
1509 priv->rx_delay = DEFAULT_ID_PS;
1510
1511 ret = nxp_c45_check_delay(phydev, priv->rx_delay);
1512 if (ret) {
1513 phydev_err(phydev,
1514 "rx-internal-delay-ps invalid value\n");
1515 return ret;
1516 }
1517 }
1518
1519 return 0;
1520 }
1521
nxp_c45_set_phy_mode(struct phy_device * phydev)1522 static int nxp_c45_set_phy_mode(struct phy_device *phydev)
1523 {
1524 struct nxp_c45_phy *priv = phydev->priv;
1525 u16 basic_config;
1526 int ret;
1527
1528 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
1529 phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
1530
1531 switch (phydev->interface) {
1532 case PHY_INTERFACE_MODE_RGMII:
1533 if (!(ret & RGMII_ABILITY)) {
1534 phydev_err(phydev, "rgmii mode not supported\n");
1535 return -EINVAL;
1536 }
1537 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1538 MII_BASIC_CONFIG_RGMII);
1539 nxp_c45_disable_delays(phydev);
1540 break;
1541 case PHY_INTERFACE_MODE_RGMII_ID:
1542 case PHY_INTERFACE_MODE_RGMII_TXID:
1543 case PHY_INTERFACE_MODE_RGMII_RXID:
1544 if (!(ret & RGMII_ID_ABILITY)) {
1545 phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
1546 return -EINVAL;
1547 }
1548 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1549 MII_BASIC_CONFIG_RGMII);
1550 ret = nxp_c45_get_delays(phydev);
1551 if (ret)
1552 return ret;
1553
1554 nxp_c45_set_delays(phydev);
1555 break;
1556 case PHY_INTERFACE_MODE_MII:
1557 if (!(ret & MII_ABILITY)) {
1558 phydev_err(phydev, "mii mode not supported\n");
1559 return -EINVAL;
1560 }
1561 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1562 MII_BASIC_CONFIG_MII);
1563 break;
1564 case PHY_INTERFACE_MODE_REVMII:
1565 if (!(ret & REVMII_ABILITY)) {
1566 phydev_err(phydev, "rev-mii mode not supported\n");
1567 return -EINVAL;
1568 }
1569 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1570 MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
1571 break;
1572 case PHY_INTERFACE_MODE_RMII:
1573 if (!(ret & RMII_ABILITY)) {
1574 phydev_err(phydev, "rmii mode not supported\n");
1575 return -EINVAL;
1576 }
1577
1578 basic_config = MII_BASIC_CONFIG_RMII;
1579
1580 /* This is not PHY_INTERFACE_MODE_REVRMII */
1581 if (priv->flags & TJA11XX_REVERSE_MODE)
1582 basic_config |= MII_BASIC_CONFIG_REV;
1583
1584 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1585 basic_config);
1586 break;
1587 case PHY_INTERFACE_MODE_SGMII:
1588 if (!(ret & SGMII_ABILITY)) {
1589 phydev_err(phydev, "sgmii mode not supported\n");
1590 return -EINVAL;
1591 }
1592 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1593 MII_BASIC_CONFIG_SGMII);
1594 break;
1595 case PHY_INTERFACE_MODE_INTERNAL:
1596 break;
1597 default:
1598 return -EINVAL;
1599 }
1600
1601 return 0;
1602 }
1603
1604 /* Errata: ES_TJA1120 and ES_TJA1121 Rev. 1.0 — 28 November 2024 Section 3.1 & 3.2 */
nxp_c45_tja1120_errata(struct phy_device * phydev)1605 static void nxp_c45_tja1120_errata(struct phy_device *phydev)
1606 {
1607 bool macsec_ability, sgmii_ability;
1608 int silicon_version, sample_type;
1609 int phy_abilities;
1610 int ret = 0;
1611
1612 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_ID3);
1613 if (ret < 0)
1614 return;
1615
1616 sample_type = FIELD_GET(TJA1120_DEV_ID3_SAMPLE_TYPE, ret);
1617 if (sample_type != DEVICE_ID3_SAMPLE_TYPE_R)
1618 return;
1619
1620 silicon_version = FIELD_GET(TJA1120_DEV_ID3_SILICON_VERSION, ret);
1621
1622 phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1623 VEND1_PORT_ABILITIES);
1624 macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
1625 sgmii_ability = !!(phy_abilities & SGMII_ABILITY);
1626 if ((!macsec_ability && silicon_version == 2) ||
1627 (macsec_ability && silicon_version == 1)) {
1628 /* TJA1120/TJA1121 PHY configuration errata workaround.
1629 * Apply PHY writes sequence before link up.
1630 */
1631 if (!macsec_ability) {
1632 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x4b95);
1633 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0xf3cd);
1634 } else {
1635 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x89c7);
1636 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0893);
1637 }
1638
1639 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x0476, 0x58a0);
1640
1641 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x8921, 0xa3a);
1642 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x89F1, 0x16c1);
1643
1644 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x0);
1645 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0);
1646
1647 if (sgmii_ability) {
1648 /* TJA1120B/TJA1121B SGMII PCS restart errata workaround.
1649 * Put SGMII PCS into power down mode and back up.
1650 */
1651 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1652 VEND1_SGMII_BASIC_CONTROL,
1653 SGMII_LPM);
1654 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1655 VEND1_SGMII_BASIC_CONTROL,
1656 SGMII_LPM);
1657 }
1658 }
1659 }
1660
nxp_c45_config_init(struct phy_device * phydev)1661 static int nxp_c45_config_init(struct phy_device *phydev)
1662 {
1663 int ret;
1664
1665 ret = nxp_c45_config_enable(phydev);
1666 if (ret) {
1667 phydev_err(phydev, "Failed to enable config\n");
1668 return ret;
1669 }
1670
1671 /* Bug workaround for SJA1110 rev B: enable write access
1672 * to MDIO_MMD_PMAPMD
1673 */
1674 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
1675 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
1676
1677 if (phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, GENMASK(31, 4)))
1678 nxp_c45_tja1120_errata(phydev);
1679
1680 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
1681 PHY_CONFIG_AUTO);
1682
1683 ret = nxp_c45_set_phy_mode(phydev);
1684 if (ret)
1685 return ret;
1686
1687 phydev->autoneg = AUTONEG_DISABLE;
1688
1689 nxp_c45_counters_enable(phydev);
1690 nxp_c45_ptp_init(phydev);
1691 ret = nxp_c45_macsec_config_init(phydev);
1692 if (ret)
1693 return ret;
1694
1695 return nxp_c45_start_op(phydev);
1696 }
1697
nxp_c45_get_features(struct phy_device * phydev)1698 static int nxp_c45_get_features(struct phy_device *phydev)
1699 {
1700 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported);
1701 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, phydev->supported);
1702
1703 return genphy_c45_pma_read_abilities(phydev);
1704 }
1705
nxp_c45_parse_dt(struct phy_device * phydev)1706 static int nxp_c45_parse_dt(struct phy_device *phydev)
1707 {
1708 struct device_node *node = phydev->mdio.dev.of_node;
1709 struct nxp_c45_phy *priv = phydev->priv;
1710
1711 if (!IS_ENABLED(CONFIG_OF_MDIO))
1712 return 0;
1713
1714 if (of_property_read_bool(node, "nxp,rmii-refclk-out"))
1715 priv->flags |= TJA11XX_REVERSE_MODE;
1716
1717 return 0;
1718 }
1719
nxp_c45_probe(struct phy_device * phydev)1720 static int nxp_c45_probe(struct phy_device *phydev)
1721 {
1722 struct nxp_c45_phy *priv;
1723 bool macsec_ability;
1724 int phy_abilities;
1725 bool ptp_ability;
1726 int ret = 0;
1727
1728 priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
1729 if (!priv)
1730 return -ENOMEM;
1731
1732 skb_queue_head_init(&priv->tx_queue);
1733 skb_queue_head_init(&priv->rx_queue);
1734
1735 priv->phydev = phydev;
1736
1737 phydev->priv = priv;
1738
1739 nxp_c45_parse_dt(phydev);
1740
1741 mutex_init(&priv->ptp_lock);
1742
1743 phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1744 VEND1_PORT_ABILITIES);
1745 ptp_ability = !!(phy_abilities & PTP_ABILITY);
1746 if (!ptp_ability) {
1747 phydev_dbg(phydev, "the phy does not support PTP");
1748 goto no_ptp_support;
1749 }
1750
1751 if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
1752 IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
1753 priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
1754 priv->mii_ts.txtstamp = nxp_c45_txtstamp;
1755 priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
1756 priv->mii_ts.ts_info = nxp_c45_ts_info;
1757 phydev->mii_ts = &priv->mii_ts;
1758 ret = nxp_c45_init_ptp_clock(priv);
1759
1760 /* Timestamp selected by default to keep legacy API */
1761 phydev->default_timestamp = true;
1762 } else {
1763 phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
1764 }
1765
1766 no_ptp_support:
1767 macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
1768 if (!macsec_ability) {
1769 phydev_info(phydev, "the phy does not support MACsec\n");
1770 goto no_macsec_support;
1771 }
1772
1773 if (IS_ENABLED(CONFIG_MACSEC)) {
1774 ret = nxp_c45_macsec_probe(phydev);
1775 phydev_dbg(phydev, "MACsec support enabled.");
1776 } else {
1777 phydev_dbg(phydev, "MACsec support not enabled even if the phy supports it");
1778 }
1779
1780 no_macsec_support:
1781
1782 return ret;
1783 }
1784
nxp_c45_remove(struct phy_device * phydev)1785 static void nxp_c45_remove(struct phy_device *phydev)
1786 {
1787 struct nxp_c45_phy *priv = phydev->priv;
1788
1789 if (priv->ptp_clock)
1790 ptp_clock_unregister(priv->ptp_clock);
1791
1792 skb_queue_purge(&priv->tx_queue);
1793 skb_queue_purge(&priv->rx_queue);
1794 nxp_c45_macsec_remove(phydev);
1795 }
1796
tja1103_counters_enable(struct phy_device * phydev)1797 static void tja1103_counters_enable(struct phy_device *phydev)
1798 {
1799 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
1800 COUNTER_EN);
1801 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
1802 COUNTER_EN);
1803 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
1804 COUNTER_EN);
1805 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
1806 COUNTER_EN);
1807 }
1808
tja1103_ptp_init(struct phy_device * phydev)1809 static void tja1103_ptp_init(struct phy_device *phydev)
1810 {
1811 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
1812 TJA1103_RX_TS_INSRT_MODE2);
1813 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
1814 PTP_ENABLE);
1815 }
1816
tja1103_ptp_enable(struct phy_device * phydev,bool enable)1817 static void tja1103_ptp_enable(struct phy_device *phydev, bool enable)
1818 {
1819 if (enable)
1820 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1821 VEND1_PORT_PTP_CONTROL,
1822 PORT_PTP_CONTROL_BYPASS);
1823 else
1824 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1825 VEND1_PORT_PTP_CONTROL,
1826 PORT_PTP_CONTROL_BYPASS);
1827 }
1828
tja1103_nmi_handler(struct phy_device * phydev,irqreturn_t * irq_status)1829 static void tja1103_nmi_handler(struct phy_device *phydev,
1830 irqreturn_t *irq_status)
1831 {
1832 int ret;
1833
1834 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1835 VEND1_ALWAYS_ACCESSIBLE);
1836 if (ret & FUSA_PASS) {
1837 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1838 VEND1_ALWAYS_ACCESSIBLE,
1839 FUSA_PASS);
1840 *irq_status = IRQ_HANDLED;
1841 }
1842 }
1843
1844 static const struct nxp_c45_regmap tja1103_regmap = {
1845 .vend1_ptp_clk_period = 0x1104,
1846 .vend1_event_msg_filt = 0x1148,
1847 .pps_enable =
1848 NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 3, 1),
1849 .pps_polarity =
1850 NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 2, 1),
1851 .ltc_lock_ctrl =
1852 NXP_C45_REG_FIELD(0x1115, MDIO_MMD_VEND1, 0, 1),
1853 .ltc_read =
1854 NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 2, 1),
1855 .ltc_write =
1856 NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 0, 1),
1857 .vend1_ltc_wr_nsec_0 = 0x1106,
1858 .vend1_ltc_wr_nsec_1 = 0x1107,
1859 .vend1_ltc_wr_sec_0 = 0x1108,
1860 .vend1_ltc_wr_sec_1 = 0x1109,
1861 .vend1_ltc_rd_nsec_0 = 0x110A,
1862 .vend1_ltc_rd_nsec_1 = 0x110B,
1863 .vend1_ltc_rd_sec_0 = 0x110C,
1864 .vend1_ltc_rd_sec_1 = 0x110D,
1865 .vend1_rate_adj_subns_0 = 0x110F,
1866 .vend1_rate_adj_subns_1 = 0x1110,
1867 .irq_egr_ts_en =
1868 NXP_C45_REG_FIELD(0x1131, MDIO_MMD_VEND1, 0, 1),
1869 .irq_egr_ts_status =
1870 NXP_C45_REG_FIELD(0x1132, MDIO_MMD_VEND1, 0, 1),
1871 .domain_number =
1872 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 0, 8),
1873 .msg_type =
1874 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 8, 4),
1875 .sequence_id =
1876 NXP_C45_REG_FIELD(0x114F, MDIO_MMD_VEND1, 0, 16),
1877 .sec_1_0 =
1878 NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 14, 2),
1879 .sec_4_2 =
1880 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 12, 3),
1881 .nsec_15_0 =
1882 NXP_C45_REG_FIELD(0x1150, MDIO_MMD_VEND1, 0, 16),
1883 .nsec_29_16 =
1884 NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 0, 14),
1885 .vend1_ext_trg_data_0 = 0x1121,
1886 .vend1_ext_trg_data_1 = 0x1122,
1887 .vend1_ext_trg_data_2 = 0x1123,
1888 .vend1_ext_trg_data_3 = 0x1124,
1889 .vend1_ext_trg_ctrl = 0x1126,
1890 .cable_test = 0x8330,
1891 .cable_test_valid =
1892 NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 13, 1),
1893 .cable_test_result =
1894 NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 0, 3),
1895 };
1896
1897 static const struct nxp_c45_phy_data tja1103_phy_data = {
1898 .regmap = &tja1103_regmap,
1899 .stats = tja1103_hw_stats,
1900 .n_stats = ARRAY_SIZE(tja1103_hw_stats),
1901 .ptp_clk_period = PTP_CLK_PERIOD_100BT1,
1902 .ext_ts_both_edges = false,
1903 .ack_ptp_irq = false,
1904 .counters_enable = tja1103_counters_enable,
1905 .get_egressts = nxp_c45_get_hwtxts,
1906 .get_extts = nxp_c45_get_extts,
1907 .ptp_init = tja1103_ptp_init,
1908 .ptp_enable = tja1103_ptp_enable,
1909 .nmi_handler = tja1103_nmi_handler,
1910 };
1911
tja1120_counters_enable(struct phy_device * phydev)1912 static void tja1120_counters_enable(struct phy_device *phydev)
1913 {
1914 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_SYMBOL_ERROR_CNT_XTD,
1915 EXTENDED_CNT_EN);
1916 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_STATUS,
1917 MONITOR_RESET);
1918 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_CONFIG,
1919 ALL_FRAMES_CNT_EN | LOST_FRAMES_CNT_EN);
1920 }
1921
tja1120_ptp_init(struct phy_device * phydev)1922 static void tja1120_ptp_init(struct phy_device *phydev)
1923 {
1924 phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_RX_TS_INSRT_CTRL,
1925 TJA1120_RX_TS_INSRT_EN | TJA1120_TS_INSRT_MODE);
1926 phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_VEND1_EXT_TS_MODE,
1927 TJA1120_TS_INSRT_MODE);
1928 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONFIG,
1929 PTP_ENABLE);
1930 }
1931
tja1120_ptp_enable(struct phy_device * phydev,bool enable)1932 static void tja1120_ptp_enable(struct phy_device *phydev, bool enable)
1933 {
1934 if (enable)
1935 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1936 VEND1_PORT_FUNC_ENABLES,
1937 PTP_ENABLE);
1938 else
1939 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1940 VEND1_PORT_FUNC_ENABLES,
1941 PTP_ENABLE);
1942 }
1943
tja1120_nmi_handler(struct phy_device * phydev,irqreturn_t * irq_status)1944 static void tja1120_nmi_handler(struct phy_device *phydev,
1945 irqreturn_t *irq_status)
1946 {
1947 int ret;
1948
1949 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1950 TJA1120_GLOBAL_INFRA_IRQ_STATUS);
1951 if (ret & TJA1120_DEV_BOOT_DONE) {
1952 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1953 TJA1120_GLOBAL_INFRA_IRQ_ACK,
1954 TJA1120_DEV_BOOT_DONE);
1955 *irq_status = IRQ_HANDLED;
1956 }
1957 }
1958
1959 static const struct nxp_c45_regmap tja1120_regmap = {
1960 .vend1_ptp_clk_period = 0x1020,
1961 .vend1_event_msg_filt = 0x9010,
1962 .pps_enable =
1963 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 4, 1),
1964 .pps_polarity =
1965 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 5, 1),
1966 .ltc_lock_ctrl =
1967 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 2, 1),
1968 .ltc_read =
1969 NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 1, 1),
1970 .ltc_write =
1971 NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 2, 1),
1972 .vend1_ltc_wr_nsec_0 = 0x1040,
1973 .vend1_ltc_wr_nsec_1 = 0x1041,
1974 .vend1_ltc_wr_sec_0 = 0x1042,
1975 .vend1_ltc_wr_sec_1 = 0x1043,
1976 .vend1_ltc_rd_nsec_0 = 0x1048,
1977 .vend1_ltc_rd_nsec_1 = 0x1049,
1978 .vend1_ltc_rd_sec_0 = 0x104A,
1979 .vend1_ltc_rd_sec_1 = 0x104B,
1980 .vend1_rate_adj_subns_0 = 0x1030,
1981 .vend1_rate_adj_subns_1 = 0x1031,
1982 .irq_egr_ts_en =
1983 NXP_C45_REG_FIELD(0x900A, MDIO_MMD_VEND1, 1, 1),
1984 .irq_egr_ts_status =
1985 NXP_C45_REG_FIELD(0x900C, MDIO_MMD_VEND1, 1, 1),
1986 .domain_number =
1987 NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 8, 8),
1988 .msg_type =
1989 NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 4, 4),
1990 .sequence_id =
1991 NXP_C45_REG_FIELD(0x9062, MDIO_MMD_VEND1, 0, 16),
1992 .sec_1_0 =
1993 NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 0, 2),
1994 .sec_4_2 =
1995 NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 2, 3),
1996 .nsec_15_0 =
1997 NXP_C45_REG_FIELD(0x9063, MDIO_MMD_VEND1, 0, 16),
1998 .nsec_29_16 =
1999 NXP_C45_REG_FIELD(0x9064, MDIO_MMD_VEND1, 0, 14),
2000 .vend1_ext_trg_data_0 = 0x1071,
2001 .vend1_ext_trg_data_1 = 0x1072,
2002 .vend1_ext_trg_data_2 = 0x1073,
2003 .vend1_ext_trg_data_3 = 0x1074,
2004 .vend1_ext_trg_ctrl = 0x1075,
2005 .cable_test = 0x8360,
2006 .cable_test_valid =
2007 NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 15, 1),
2008 .cable_test_result =
2009 NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 0, 3),
2010 };
2011
2012 static const struct nxp_c45_phy_data tja1120_phy_data = {
2013 .regmap = &tja1120_regmap,
2014 .stats = tja1120_hw_stats,
2015 .n_stats = ARRAY_SIZE(tja1120_hw_stats),
2016 .ptp_clk_period = PTP_CLK_PERIOD_1000BT1,
2017 .ext_ts_both_edges = true,
2018 .ack_ptp_irq = true,
2019 .counters_enable = tja1120_counters_enable,
2020 .get_egressts = tja1120_get_hwtxts,
2021 .get_extts = tja1120_get_extts,
2022 .ptp_init = tja1120_ptp_init,
2023 .ptp_enable = tja1120_ptp_enable,
2024 .nmi_handler = tja1120_nmi_handler,
2025 };
2026
2027 static struct phy_driver nxp_c45_driver[] = {
2028 {
2029 PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
2030 .name = "NXP C45 TJA1103",
2031 .get_features = nxp_c45_get_features,
2032 .driver_data = &tja1103_phy_data,
2033 .probe = nxp_c45_probe,
2034 .soft_reset = nxp_c45_soft_reset,
2035 .config_aneg = genphy_c45_config_aneg,
2036 .config_init = nxp_c45_config_init,
2037 .config_intr = tja1103_config_intr,
2038 .handle_interrupt = nxp_c45_handle_interrupt,
2039 .read_status = genphy_c45_read_status,
2040 .suspend = genphy_c45_pma_suspend,
2041 .resume = genphy_c45_pma_resume,
2042 .get_sset_count = nxp_c45_get_sset_count,
2043 .get_strings = nxp_c45_get_strings,
2044 .get_stats = nxp_c45_get_stats,
2045 .cable_test_start = nxp_c45_cable_test_start,
2046 .cable_test_get_status = nxp_c45_cable_test_get_status,
2047 .set_loopback = genphy_c45_loopback,
2048 .get_sqi = nxp_c45_get_sqi,
2049 .get_sqi_max = nxp_c45_get_sqi_max,
2050 .remove = nxp_c45_remove,
2051 },
2052 {
2053 PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120),
2054 .name = "NXP C45 TJA1120",
2055 .get_features = nxp_c45_get_features,
2056 .driver_data = &tja1120_phy_data,
2057 .probe = nxp_c45_probe,
2058 .soft_reset = nxp_c45_soft_reset,
2059 .config_aneg = genphy_c45_config_aneg,
2060 .config_init = nxp_c45_config_init,
2061 .config_intr = tja1120_config_intr,
2062 .handle_interrupt = nxp_c45_handle_interrupt,
2063 .read_status = genphy_c45_read_status,
2064 .link_change_notify = tja1120_link_change_notify,
2065 .suspend = genphy_c45_pma_suspend,
2066 .resume = genphy_c45_pma_resume,
2067 .get_sset_count = nxp_c45_get_sset_count,
2068 .get_strings = nxp_c45_get_strings,
2069 .get_stats = nxp_c45_get_stats,
2070 .cable_test_start = nxp_c45_cable_test_start,
2071 .cable_test_get_status = nxp_c45_cable_test_get_status,
2072 .set_loopback = genphy_c45_loopback,
2073 .get_sqi = nxp_c45_get_sqi,
2074 .get_sqi_max = nxp_c45_get_sqi_max,
2075 .remove = nxp_c45_remove,
2076 },
2077 };
2078
2079 module_phy_driver(nxp_c45_driver);
2080
2081 static const struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
2082 { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
2083 { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120) },
2084 { /*sentinel*/ },
2085 };
2086
2087 MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
2088
2089 MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
2090 MODULE_DESCRIPTION("NXP C45 PHY driver");
2091 MODULE_LICENSE("GPL v2");
2092