1 // SPDX-License-Identifier: GPL-2.0
2 /* NXP C45 PHY driver
3 * Copyright 2021-2025 NXP
4 * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
5 */
6
7 #include <linux/delay.h>
8 #include <linux/ethtool.h>
9 #include <linux/ethtool_netlink.h>
10 #include <linux/kernel.h>
11 #include <linux/mii.h>
12 #include <linux/module.h>
13 #include <linux/of.h>
14 #include <linux/phy.h>
15 #include <linux/processor.h>
16 #include <linux/property.h>
17 #include <linux/ptp_classify.h>
18 #include <linux/net_tstamp.h>
19
20 #include "nxp-c45-tja11xx.h"
21
22 #define PHY_ID_MASK GENMASK(31, 4)
23 /* Same id: TJA1103, TJA1104 */
24 #define PHY_ID_TJA_1103 0x001BB010
25 /* Same id: TJA1120, TJA1121 */
26 #define PHY_ID_TJA_1120 0x001BB031
27
28 #define VEND1_DEVICE_ID3 0x0004
29 #define TJA1120_DEV_ID3_SILICON_VERSION GENMASK(15, 12)
30 #define TJA1120_DEV_ID3_SAMPLE_TYPE GENMASK(11, 8)
31 #define DEVICE_ID3_SAMPLE_TYPE_R 0x9
32
33 #define VEND1_DEVICE_CONTROL 0x0040
34 #define DEVICE_CONTROL_RESET BIT(15)
35 #define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14)
36 #define DEVICE_CONTROL_CONFIG_ALL_EN BIT(13)
37
38 #define VEND1_DEVICE_CONFIG 0x0048
39
40 #define TJA1120_VEND1_EXT_TS_MODE 0x1012
41
42 #define TJA1120_GLOBAL_INFRA_IRQ_ACK 0x2C08
43 #define TJA1120_GLOBAL_INFRA_IRQ_EN 0x2C0A
44 #define TJA1120_GLOBAL_INFRA_IRQ_STATUS 0x2C0C
45 #define TJA1120_DEV_BOOT_DONE BIT(1)
46
47 #define TJA1120_VEND1_PTP_TRIG_DATA_S 0x1070
48
49 #define TJA1120_EGRESS_TS_DATA_S 0x9060
50 #define TJA1120_EGRESS_TS_END 0x9067
51 #define TJA1120_TS_VALID BIT(0)
52 #define TJA1120_MORE_TS BIT(15)
53
54 #define VEND1_PHY_IRQ_ACK 0x80A0
55 #define VEND1_PHY_IRQ_EN 0x80A1
56 #define VEND1_PHY_IRQ_STATUS 0x80A2
57 #define PHY_IRQ_LINK_EVENT BIT(1)
58
59 #define VEND1_ALWAYS_ACCESSIBLE 0x801F
60 #define FUSA_PASS BIT(4)
61
62 #define VEND1_PHY_CONTROL 0x8100
63 #define PHY_CONFIG_EN BIT(14)
64 #define PHY_START_OP BIT(0)
65
66 #define VEND1_PHY_CONFIG 0x8108
67 #define PHY_CONFIG_AUTO BIT(0)
68
69 #define TJA1120_EPHY_RESETS 0x810A
70 #define EPHY_PCS_RESET BIT(3)
71
72 #define VEND1_SIGNAL_QUALITY 0x8320
73 #define SQI_VALID BIT(14)
74 #define SQI_MASK GENMASK(2, 0)
75 #define MAX_SQI SQI_MASK
76
77 #define CABLE_TEST_ENABLE BIT(15)
78 #define CABLE_TEST_START BIT(14)
79 #define CABLE_TEST_OK 0x00
80 #define CABLE_TEST_SHORTED 0x01
81 #define CABLE_TEST_OPEN 0x02
82 #define CABLE_TEST_UNKNOWN 0x07
83
84 #define VEND1_PORT_CONTROL 0x8040
85 #define PORT_CONTROL_EN BIT(14)
86
87 #define VEND1_PORT_ABILITIES 0x8046
88 #define MACSEC_ABILITY BIT(5)
89 #define PTP_ABILITY BIT(3)
90
91 #define VEND1_PORT_FUNC_IRQ_EN 0x807A
92 #define MACSEC_IRQS BIT(5)
93 #define PTP_IRQS BIT(3)
94
95 #define VEND1_PTP_IRQ_ACK 0x9008
96 #define EGR_TS_IRQ BIT(1)
97
98 #define VEND1_PORT_INFRA_CONTROL 0xAC00
99 #define PORT_INFRA_CONTROL_EN BIT(14)
100
101 #define VEND1_RXID 0xAFCC
102 #define VEND1_TXID 0xAFCD
103 #define ID_ENABLE BIT(15)
104
105 #define VEND1_ABILITIES 0xAFC4
106 #define RGMII_ID_ABILITY BIT(15)
107 #define RGMII_ABILITY BIT(14)
108 #define RMII_ABILITY BIT(10)
109 #define REVMII_ABILITY BIT(9)
110 #define MII_ABILITY BIT(8)
111 #define SGMII_ABILITY BIT(0)
112
113 #define VEND1_MII_BASIC_CONFIG 0xAFC6
114 #define MII_BASIC_CONFIG_REV BIT(4)
115 #define MII_BASIC_CONFIG_SGMII 0x9
116 #define MII_BASIC_CONFIG_RGMII 0x7
117 #define MII_BASIC_CONFIG_RMII 0x5
118 #define MII_BASIC_CONFIG_MII 0x4
119
120 #define VEND1_SGMII_BASIC_CONTROL 0xB000
121 #define SGMII_LPM BIT(11)
122
123 #define VEND1_SYMBOL_ERROR_CNT_XTD 0x8351
124 #define EXTENDED_CNT_EN BIT(15)
125 #define VEND1_MONITOR_STATUS 0xAC80
126 #define MONITOR_RESET BIT(15)
127 #define VEND1_MONITOR_CONFIG 0xAC86
128 #define LOST_FRAMES_CNT_EN BIT(9)
129 #define ALL_FRAMES_CNT_EN BIT(8)
130
131 #define VEND1_SYMBOL_ERROR_COUNTER 0x8350
132 #define VEND1_LINK_DROP_COUNTER 0x8352
133 #define VEND1_LINK_LOSSES_AND_FAILURES 0x8353
134 #define VEND1_RX_PREAMBLE_COUNT 0xAFCE
135 #define VEND1_TX_PREAMBLE_COUNT 0xAFCF
136 #define VEND1_RX_IPG_LENGTH 0xAFD0
137 #define VEND1_TX_IPG_LENGTH 0xAFD1
138 #define COUNTER_EN BIT(15)
139
140 #define VEND1_PTP_CONFIG 0x1102
141 #define EXT_TRG_EDGE BIT(1)
142
143 #define TJA1120_SYNC_TRIG_FILTER 0x1010
144 #define PTP_TRIG_RISE_TS BIT(3)
145 #define PTP_TRIG_FALLING_TS BIT(2)
146
147 #define CLK_RATE_ADJ_LD BIT(15)
148 #define CLK_RATE_ADJ_DIR BIT(14)
149
150 #define VEND1_RX_TS_INSRT_CTRL 0x114D
151 #define TJA1103_RX_TS_INSRT_MODE2 0x02
152
153 #define TJA1120_RX_TS_INSRT_CTRL 0x9012
154 #define TJA1120_RX_TS_INSRT_EN BIT(15)
155 #define TJA1120_TS_INSRT_MODE BIT(4)
156
157 #define VEND1_EGR_RING_DATA_0 0x114E
158 #define VEND1_EGR_RING_CTRL 0x1154
159
160 #define RING_DATA_0_TS_VALID BIT(15)
161
162 #define RING_DONE BIT(0)
163
164 #define TS_SEC_MASK GENMASK(1, 0)
165
166 #define PTP_ENABLE BIT(3)
167 #define PHY_TEST_ENABLE BIT(0)
168
169 #define VEND1_PORT_PTP_CONTROL 0x9000
170 #define PORT_PTP_CONTROL_BYPASS BIT(11)
171
172 #define PTP_CLK_PERIOD_100BT1 15ULL
173 #define PTP_CLK_PERIOD_1000BT1 8ULL
174
175 #define EVENT_MSG_FILT_ALL 0x0F
176 #define EVENT_MSG_FILT_NONE 0x00
177
178 #define VEND1_GPIO_FUNC_CONFIG_BASE 0x2C40
179 #define GPIO_FUNC_EN BIT(15)
180 #define GPIO_FUNC_PTP BIT(6)
181 #define GPIO_SIGNAL_PTP_TRIGGER 0x01
182 #define GPIO_SIGNAL_PPS_OUT 0x12
183 #define GPIO_DISABLE 0
184 #define GPIO_PPS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
185 GPIO_SIGNAL_PPS_OUT)
186 #define GPIO_EXTTS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
187 GPIO_SIGNAL_PTP_TRIGGER)
188
189 #define RGMII_PERIOD_PS 8000U
190 #define PS_PER_DEGREE div_u64(RGMII_PERIOD_PS, 360)
191 #define MIN_ID_PS 1644U
192 #define MAX_ID_PS 2260U
193 #define DEFAULT_ID_PS 2000U
194
195 #define PPM_TO_SUBNS_INC(ppb, ptp_clk_period) div_u64(GENMASK_ULL(31, 0) * \
196 (ppb) * (ptp_clk_period), NSEC_PER_SEC)
197
198 #define NXP_C45_SKB_CB(skb) ((struct nxp_c45_skb_cb *)(skb)->cb)
199
200 #define TJA11XX_REVERSE_MODE BIT(0)
201
202 struct nxp_c45_phy;
203
204 struct nxp_c45_skb_cb {
205 struct ptp_header *header;
206 unsigned int type;
207 };
208
209 #define NXP_C45_REG_FIELD(_reg, _devad, _offset, _size) \
210 ((struct nxp_c45_reg_field) { \
211 .reg = _reg, \
212 .devad = _devad, \
213 .offset = _offset, \
214 .size = _size, \
215 })
216
217 struct nxp_c45_reg_field {
218 u16 reg;
219 u8 devad;
220 u8 offset;
221 u8 size;
222 };
223
224 struct nxp_c45_hwts {
225 u32 nsec;
226 u32 sec;
227 u8 domain_number;
228 u16 sequence_id;
229 u8 msg_type;
230 };
231
232 struct nxp_c45_regmap {
233 /* PTP config regs. */
234 u16 vend1_ptp_clk_period;
235 u16 vend1_event_msg_filt;
236
237 /* LTC bits and regs. */
238 struct nxp_c45_reg_field ltc_read;
239 struct nxp_c45_reg_field ltc_write;
240 struct nxp_c45_reg_field ltc_lock_ctrl;
241 u16 vend1_ltc_wr_nsec_0;
242 u16 vend1_ltc_wr_nsec_1;
243 u16 vend1_ltc_wr_sec_0;
244 u16 vend1_ltc_wr_sec_1;
245 u16 vend1_ltc_rd_nsec_0;
246 u16 vend1_ltc_rd_nsec_1;
247 u16 vend1_ltc_rd_sec_0;
248 u16 vend1_ltc_rd_sec_1;
249 u16 vend1_rate_adj_subns_0;
250 u16 vend1_rate_adj_subns_1;
251
252 /* External trigger reg fields. */
253 struct nxp_c45_reg_field irq_egr_ts_en;
254 struct nxp_c45_reg_field irq_egr_ts_status;
255 struct nxp_c45_reg_field domain_number;
256 struct nxp_c45_reg_field msg_type;
257 struct nxp_c45_reg_field sequence_id;
258 struct nxp_c45_reg_field sec_1_0;
259 struct nxp_c45_reg_field sec_4_2;
260 struct nxp_c45_reg_field nsec_15_0;
261 struct nxp_c45_reg_field nsec_29_16;
262
263 /* PPS and EXT Trigger bits and regs. */
264 struct nxp_c45_reg_field pps_enable;
265 struct nxp_c45_reg_field pps_polarity;
266 u16 vend1_ext_trg_data_0;
267 u16 vend1_ext_trg_data_1;
268 u16 vend1_ext_trg_data_2;
269 u16 vend1_ext_trg_data_3;
270 u16 vend1_ext_trg_ctrl;
271
272 /* Cable test reg fields. */
273 u16 cable_test;
274 struct nxp_c45_reg_field cable_test_valid;
275 struct nxp_c45_reg_field cable_test_result;
276 };
277
278 struct nxp_c45_phy_stats {
279 const char *name;
280 const struct nxp_c45_reg_field counter;
281 };
282
283 struct nxp_c45_phy_data {
284 const struct nxp_c45_regmap *regmap;
285 const struct nxp_c45_phy_stats *stats;
286 int n_stats;
287 u8 ptp_clk_period;
288 bool ext_ts_both_edges;
289 bool ack_ptp_irq;
290 void (*counters_enable)(struct phy_device *phydev);
291 bool (*get_egressts)(struct nxp_c45_phy *priv,
292 struct nxp_c45_hwts *hwts);
293 bool (*get_extts)(struct nxp_c45_phy *priv, struct timespec64 *extts);
294 void (*ptp_init)(struct phy_device *phydev);
295 void (*ptp_enable)(struct phy_device *phydev, bool enable);
296 void (*nmi_handler)(struct phy_device *phydev,
297 irqreturn_t *irq_status);
298 };
299
300 static const
nxp_c45_get_data(struct phy_device * phydev)301 struct nxp_c45_phy_data *nxp_c45_get_data(struct phy_device *phydev)
302 {
303 return phydev->drv->driver_data;
304 }
305
306 static const
nxp_c45_get_regmap(struct phy_device * phydev)307 struct nxp_c45_regmap *nxp_c45_get_regmap(struct phy_device *phydev)
308 {
309 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
310
311 return phy_data->regmap;
312 }
313
nxp_c45_read_reg_field(struct phy_device * phydev,const struct nxp_c45_reg_field * reg_field)314 static int nxp_c45_read_reg_field(struct phy_device *phydev,
315 const struct nxp_c45_reg_field *reg_field)
316 {
317 u16 mask;
318 int ret;
319
320 if (reg_field->size == 0) {
321 phydev_err(phydev, "Trying to read a reg field of size 0.\n");
322 return -EINVAL;
323 }
324
325 ret = phy_read_mmd(phydev, reg_field->devad, reg_field->reg);
326 if (ret < 0)
327 return ret;
328
329 mask = reg_field->size == 1 ? BIT(reg_field->offset) :
330 GENMASK(reg_field->offset + reg_field->size - 1,
331 reg_field->offset);
332 ret &= mask;
333 ret >>= reg_field->offset;
334
335 return ret;
336 }
337
nxp_c45_write_reg_field(struct phy_device * phydev,const struct nxp_c45_reg_field * reg_field,u16 val)338 static int nxp_c45_write_reg_field(struct phy_device *phydev,
339 const struct nxp_c45_reg_field *reg_field,
340 u16 val)
341 {
342 u16 mask;
343 u16 set;
344
345 if (reg_field->size == 0) {
346 phydev_err(phydev, "Trying to write a reg field of size 0.\n");
347 return -EINVAL;
348 }
349
350 mask = reg_field->size == 1 ? BIT(reg_field->offset) :
351 GENMASK(reg_field->offset + reg_field->size - 1,
352 reg_field->offset);
353 set = val << reg_field->offset;
354
355 return phy_modify_mmd_changed(phydev, reg_field->devad,
356 reg_field->reg, mask, set);
357 }
358
nxp_c45_set_reg_field(struct phy_device * phydev,const struct nxp_c45_reg_field * reg_field)359 static int nxp_c45_set_reg_field(struct phy_device *phydev,
360 const struct nxp_c45_reg_field *reg_field)
361 {
362 if (reg_field->size != 1) {
363 phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
364 return -EINVAL;
365 }
366
367 return nxp_c45_write_reg_field(phydev, reg_field, 1);
368 }
369
nxp_c45_clear_reg_field(struct phy_device * phydev,const struct nxp_c45_reg_field * reg_field)370 static int nxp_c45_clear_reg_field(struct phy_device *phydev,
371 const struct nxp_c45_reg_field *reg_field)
372 {
373 if (reg_field->size != 1) {
374 phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
375 return -EINVAL;
376 }
377
378 return nxp_c45_write_reg_field(phydev, reg_field, 0);
379 }
380
nxp_c45_poll_txts(struct phy_device * phydev)381 static bool nxp_c45_poll_txts(struct phy_device *phydev)
382 {
383 return phydev->irq <= 0;
384 }
385
_nxp_c45_ptp_gettimex64(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)386 static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
387 struct timespec64 *ts,
388 struct ptp_system_timestamp *sts)
389 {
390 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
391 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
392
393 nxp_c45_set_reg_field(priv->phydev, ®map->ltc_read);
394 ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
395 regmap->vend1_ltc_rd_nsec_0);
396 ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
397 regmap->vend1_ltc_rd_nsec_1) << 16;
398 ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
399 regmap->vend1_ltc_rd_sec_0);
400 ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
401 regmap->vend1_ltc_rd_sec_1) << 16;
402
403 return 0;
404 }
405
nxp_c45_ptp_gettimex64(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)406 static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
407 struct timespec64 *ts,
408 struct ptp_system_timestamp *sts)
409 {
410 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
411
412 mutex_lock(&priv->ptp_lock);
413 _nxp_c45_ptp_gettimex64(ptp, ts, sts);
414 mutex_unlock(&priv->ptp_lock);
415
416 return 0;
417 }
418
_nxp_c45_ptp_settime64(struct ptp_clock_info * ptp,const struct timespec64 * ts)419 static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
420 const struct timespec64 *ts)
421 {
422 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
423 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
424
425 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_0,
426 ts->tv_nsec);
427 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_1,
428 ts->tv_nsec >> 16);
429 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_0,
430 ts->tv_sec);
431 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_1,
432 ts->tv_sec >> 16);
433 nxp_c45_set_reg_field(priv->phydev, ®map->ltc_write);
434
435 return 0;
436 }
437
nxp_c45_ptp_settime64(struct ptp_clock_info * ptp,const struct timespec64 * ts)438 static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
439 const struct timespec64 *ts)
440 {
441 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
442
443 mutex_lock(&priv->ptp_lock);
444 _nxp_c45_ptp_settime64(ptp, ts);
445 mutex_unlock(&priv->ptp_lock);
446
447 return 0;
448 }
449
nxp_c45_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)450 static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
451 {
452 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
453 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
454 const struct nxp_c45_regmap *regmap = data->regmap;
455 s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
456 u64 subns_inc_val;
457 bool inc;
458
459 mutex_lock(&priv->ptp_lock);
460 inc = ppb >= 0;
461 ppb = abs(ppb);
462
463 subns_inc_val = PPM_TO_SUBNS_INC(ppb, data->ptp_clk_period);
464
465 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
466 regmap->vend1_rate_adj_subns_0,
467 subns_inc_val);
468 subns_inc_val >>= 16;
469 subns_inc_val |= CLK_RATE_ADJ_LD;
470 if (inc)
471 subns_inc_val |= CLK_RATE_ADJ_DIR;
472
473 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
474 regmap->vend1_rate_adj_subns_1,
475 subns_inc_val);
476 mutex_unlock(&priv->ptp_lock);
477
478 return 0;
479 }
480
nxp_c45_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)481 static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
482 {
483 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
484 struct timespec64 now, then;
485
486 mutex_lock(&priv->ptp_lock);
487 then = ns_to_timespec64(delta);
488 _nxp_c45_ptp_gettimex64(ptp, &now, NULL);
489 now = timespec64_add(now, then);
490 _nxp_c45_ptp_settime64(ptp, &now);
491 mutex_unlock(&priv->ptp_lock);
492
493 return 0;
494 }
495
nxp_c45_reconstruct_ts(struct timespec64 * ts,struct nxp_c45_hwts * hwts)496 static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
497 struct nxp_c45_hwts *hwts)
498 {
499 ts->tv_nsec = hwts->nsec;
500 if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
501 ts->tv_sec -= TS_SEC_MASK + 1;
502 ts->tv_sec &= ~TS_SEC_MASK;
503 ts->tv_sec |= hwts->sec & TS_SEC_MASK;
504 }
505
nxp_c45_match_ts(struct ptp_header * header,struct nxp_c45_hwts * hwts,unsigned int type)506 static bool nxp_c45_match_ts(struct ptp_header *header,
507 struct nxp_c45_hwts *hwts,
508 unsigned int type)
509 {
510 return ntohs(header->sequence_id) == hwts->sequence_id &&
511 ptp_get_msgtype(header, type) == hwts->msg_type &&
512 header->domain_number == hwts->domain_number;
513 }
514
nxp_c45_get_extts(struct nxp_c45_phy * priv,struct timespec64 * extts)515 static bool nxp_c45_get_extts(struct nxp_c45_phy *priv,
516 struct timespec64 *extts)
517 {
518 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
519
520 extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
521 regmap->vend1_ext_trg_data_0);
522 extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
523 regmap->vend1_ext_trg_data_1) << 16;
524 extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
525 regmap->vend1_ext_trg_data_2);
526 extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
527 regmap->vend1_ext_trg_data_3) << 16;
528 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
529 regmap->vend1_ext_trg_ctrl, RING_DONE);
530
531 return true;
532 }
533
tja1120_extts_is_valid(struct phy_device * phydev)534 static bool tja1120_extts_is_valid(struct phy_device *phydev)
535 {
536 bool valid;
537 int reg;
538
539 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
540 TJA1120_VEND1_PTP_TRIG_DATA_S);
541 valid = !!(reg & TJA1120_TS_VALID);
542
543 return valid;
544 }
545
tja1120_get_extts(struct nxp_c45_phy * priv,struct timespec64 * extts)546 static bool tja1120_get_extts(struct nxp_c45_phy *priv,
547 struct timespec64 *extts)
548 {
549 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
550 struct phy_device *phydev = priv->phydev;
551 bool more_ts;
552 bool valid;
553 u16 reg;
554
555 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
556 regmap->vend1_ext_trg_ctrl);
557 more_ts = !!(reg & TJA1120_MORE_TS);
558
559 valid = tja1120_extts_is_valid(phydev);
560 if (!valid) {
561 if (!more_ts)
562 goto tja1120_get_extts_out;
563
564 /* Bug workaround for TJA1120 engineering samples: move the new
565 * timestamp from the FIFO to the buffer.
566 */
567 phy_write_mmd(phydev, MDIO_MMD_VEND1,
568 regmap->vend1_ext_trg_ctrl, RING_DONE);
569 valid = tja1120_extts_is_valid(phydev);
570 if (!valid)
571 goto tja1120_get_extts_out;
572 }
573
574 nxp_c45_get_extts(priv, extts);
575 tja1120_get_extts_out:
576 return valid;
577 }
578
nxp_c45_read_egress_ts(struct nxp_c45_phy * priv,struct nxp_c45_hwts * hwts)579 static void nxp_c45_read_egress_ts(struct nxp_c45_phy *priv,
580 struct nxp_c45_hwts *hwts)
581 {
582 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
583 struct phy_device *phydev = priv->phydev;
584
585 hwts->domain_number =
586 nxp_c45_read_reg_field(phydev, ®map->domain_number);
587 hwts->msg_type =
588 nxp_c45_read_reg_field(phydev, ®map->msg_type);
589 hwts->sequence_id =
590 nxp_c45_read_reg_field(phydev, ®map->sequence_id);
591 hwts->nsec =
592 nxp_c45_read_reg_field(phydev, ®map->nsec_15_0);
593 hwts->nsec |=
594 nxp_c45_read_reg_field(phydev, ®map->nsec_29_16) << 16;
595 hwts->sec = nxp_c45_read_reg_field(phydev, ®map->sec_1_0);
596 hwts->sec |= nxp_c45_read_reg_field(phydev, ®map->sec_4_2) << 2;
597 }
598
nxp_c45_get_hwtxts(struct nxp_c45_phy * priv,struct nxp_c45_hwts * hwts)599 static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
600 struct nxp_c45_hwts *hwts)
601 {
602 bool valid;
603 u16 reg;
604
605 mutex_lock(&priv->ptp_lock);
606 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
607 RING_DONE);
608 reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
609 valid = !!(reg & RING_DATA_0_TS_VALID);
610 if (!valid)
611 goto nxp_c45_get_hwtxts_out;
612
613 nxp_c45_read_egress_ts(priv, hwts);
614 nxp_c45_get_hwtxts_out:
615 mutex_unlock(&priv->ptp_lock);
616 return valid;
617 }
618
tja1120_egress_ts_is_valid(struct phy_device * phydev)619 static bool tja1120_egress_ts_is_valid(struct phy_device *phydev)
620 {
621 bool valid;
622 u16 reg;
623
624 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S);
625 valid = !!(reg & TJA1120_TS_VALID);
626
627 return valid;
628 }
629
tja1120_get_hwtxts(struct nxp_c45_phy * priv,struct nxp_c45_hwts * hwts)630 static bool tja1120_get_hwtxts(struct nxp_c45_phy *priv,
631 struct nxp_c45_hwts *hwts)
632 {
633 struct phy_device *phydev = priv->phydev;
634 bool more_ts;
635 bool valid;
636 u16 reg;
637
638 mutex_lock(&priv->ptp_lock);
639 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_END);
640 more_ts = !!(reg & TJA1120_MORE_TS);
641 valid = tja1120_egress_ts_is_valid(phydev);
642 if (!valid) {
643 if (!more_ts)
644 goto tja1120_get_hwtxts_out;
645
646 /* Bug workaround for TJA1120 engineering samples: move the
647 * new timestamp from the FIFO to the buffer.
648 */
649 phy_write_mmd(phydev, MDIO_MMD_VEND1,
650 TJA1120_EGRESS_TS_END, TJA1120_TS_VALID);
651 valid = tja1120_egress_ts_is_valid(phydev);
652 if (!valid)
653 goto tja1120_get_hwtxts_out;
654 }
655 nxp_c45_read_egress_ts(priv, hwts);
656 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S,
657 TJA1120_TS_VALID);
658 tja1120_get_hwtxts_out:
659 mutex_unlock(&priv->ptp_lock);
660 return valid;
661 }
662
nxp_c45_process_txts(struct nxp_c45_phy * priv,struct nxp_c45_hwts * txts)663 static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
664 struct nxp_c45_hwts *txts)
665 {
666 struct sk_buff *skb, *tmp, *skb_match = NULL;
667 struct skb_shared_hwtstamps shhwtstamps;
668 struct timespec64 ts;
669 unsigned long flags;
670 bool ts_match;
671 s64 ts_ns;
672
673 spin_lock_irqsave(&priv->tx_queue.lock, flags);
674 skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
675 ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
676 NXP_C45_SKB_CB(skb)->type);
677 if (!ts_match)
678 continue;
679 skb_match = skb;
680 __skb_unlink(skb, &priv->tx_queue);
681 break;
682 }
683 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
684
685 if (skb_match) {
686 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
687 nxp_c45_reconstruct_ts(&ts, txts);
688 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
689 ts_ns = timespec64_to_ns(&ts);
690 shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
691 skb_complete_tx_timestamp(skb_match, &shhwtstamps);
692 } else {
693 phydev_warn(priv->phydev,
694 "the tx timestamp doesn't match with any skb\n");
695 }
696 }
697
nxp_c45_do_aux_work(struct ptp_clock_info * ptp)698 static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
699 {
700 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
701 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
702 bool poll_txts = nxp_c45_poll_txts(priv->phydev);
703 struct skb_shared_hwtstamps *shhwtstamps_rx;
704 struct ptp_clock_event event;
705 struct nxp_c45_hwts hwts;
706 bool reschedule = false;
707 struct timespec64 ts;
708 struct sk_buff *skb;
709 bool ts_valid;
710 u32 ts_raw;
711
712 while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
713 ts_valid = data->get_egressts(priv, &hwts);
714 if (unlikely(!ts_valid)) {
715 /* Still more skbs in the queue */
716 reschedule = true;
717 break;
718 }
719
720 nxp_c45_process_txts(priv, &hwts);
721 }
722
723 while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
724 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
725 ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
726 hwts.sec = ts_raw >> 30;
727 hwts.nsec = ts_raw & GENMASK(29, 0);
728 nxp_c45_reconstruct_ts(&ts, &hwts);
729 shhwtstamps_rx = skb_hwtstamps(skb);
730 shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
731 NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
732 netif_rx(skb);
733 }
734
735 if (priv->extts) {
736 ts_valid = data->get_extts(priv, &ts);
737 if (ts_valid && timespec64_compare(&ts, &priv->extts_ts) != 0) {
738 priv->extts_ts = ts;
739 event.index = priv->extts_index;
740 event.type = PTP_CLOCK_EXTTS;
741 event.timestamp = ns_to_ktime(timespec64_to_ns(&ts));
742 ptp_clock_event(priv->ptp_clock, &event);
743 }
744 reschedule = true;
745 }
746
747 return reschedule ? 1 : -1;
748 }
749
nxp_c45_gpio_config(struct nxp_c45_phy * priv,int pin,u16 pin_cfg)750 static void nxp_c45_gpio_config(struct nxp_c45_phy *priv,
751 int pin, u16 pin_cfg)
752 {
753 struct phy_device *phydev = priv->phydev;
754
755 phy_write_mmd(phydev, MDIO_MMD_VEND1,
756 VEND1_GPIO_FUNC_CONFIG_BASE + pin, pin_cfg);
757 }
758
nxp_c45_perout_enable(struct nxp_c45_phy * priv,struct ptp_perout_request * perout,int on)759 static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
760 struct ptp_perout_request *perout, int on)
761 {
762 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
763 struct phy_device *phydev = priv->phydev;
764 int pin;
765
766 if (perout->flags & ~PTP_PEROUT_PHASE)
767 return -EOPNOTSUPP;
768
769 pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
770 if (pin < 0)
771 return pin;
772
773 if (!on) {
774 nxp_c45_clear_reg_field(priv->phydev,
775 ®map->pps_enable);
776 nxp_c45_clear_reg_field(priv->phydev,
777 ®map->pps_polarity);
778
779 nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
780
781 return 0;
782 }
783
784 /* The PPS signal is fixed to 1 second and is always generated when the
785 * seconds counter is incremented. The start time is not configurable.
786 * If the clock is adjusted, the PPS signal is automatically readjusted.
787 */
788 if (perout->period.sec != 1 || perout->period.nsec != 0) {
789 phydev_warn(phydev, "The period can be set only to 1 second.");
790 return -EINVAL;
791 }
792
793 if (!(perout->flags & PTP_PEROUT_PHASE)) {
794 if (perout->start.sec != 0 || perout->start.nsec != 0) {
795 phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds.");
796 return -EINVAL;
797 }
798 } else {
799 if (perout->phase.nsec != 0 &&
800 perout->phase.nsec != (NSEC_PER_SEC >> 1)) {
801 phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds.");
802 return -EINVAL;
803 }
804
805 if (perout->phase.nsec == 0)
806 nxp_c45_clear_reg_field(priv->phydev,
807 ®map->pps_polarity);
808 else
809 nxp_c45_set_reg_field(priv->phydev,
810 ®map->pps_polarity);
811 }
812
813 nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG);
814
815 nxp_c45_set_reg_field(priv->phydev, ®map->pps_enable);
816
817 return 0;
818 }
819
nxp_c45_set_rising_or_falling(struct phy_device * phydev,struct ptp_extts_request * extts)820 static void nxp_c45_set_rising_or_falling(struct phy_device *phydev,
821 struct ptp_extts_request *extts)
822 {
823 if (extts->flags & PTP_RISING_EDGE)
824 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
825 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
826
827 if (extts->flags & PTP_FALLING_EDGE)
828 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
829 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
830 }
831
nxp_c45_set_rising_and_falling(struct phy_device * phydev,struct ptp_extts_request * extts)832 static void nxp_c45_set_rising_and_falling(struct phy_device *phydev,
833 struct ptp_extts_request *extts)
834 {
835 /* PTP_EXTTS_REQUEST may have only the PTP_ENABLE_FEATURE flag set. In
836 * this case external ts will be enabled on rising edge.
837 */
838 if (extts->flags & PTP_RISING_EDGE ||
839 extts->flags == PTP_ENABLE_FEATURE)
840 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
841 TJA1120_SYNC_TRIG_FILTER,
842 PTP_TRIG_RISE_TS);
843 else
844 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
845 TJA1120_SYNC_TRIG_FILTER,
846 PTP_TRIG_RISE_TS);
847
848 if (extts->flags & PTP_FALLING_EDGE)
849 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
850 TJA1120_SYNC_TRIG_FILTER,
851 PTP_TRIG_FALLING_TS);
852 else
853 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
854 TJA1120_SYNC_TRIG_FILTER,
855 PTP_TRIG_FALLING_TS);
856 }
857
nxp_c45_extts_enable(struct nxp_c45_phy * priv,struct ptp_extts_request * extts,int on)858 static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
859 struct ptp_extts_request *extts, int on)
860 {
861 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
862 int pin;
863
864 if (extts->flags & ~(PTP_ENABLE_FEATURE |
865 PTP_RISING_EDGE |
866 PTP_FALLING_EDGE |
867 PTP_STRICT_FLAGS))
868 return -EOPNOTSUPP;
869
870 /* Sampling on both edges is not supported */
871 if ((extts->flags & PTP_RISING_EDGE) &&
872 (extts->flags & PTP_FALLING_EDGE) &&
873 !data->ext_ts_both_edges)
874 return -EOPNOTSUPP;
875
876 pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index);
877 if (pin < 0)
878 return pin;
879
880 if (!on) {
881 nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
882 priv->extts = false;
883
884 return 0;
885 }
886
887 if (data->ext_ts_both_edges)
888 nxp_c45_set_rising_and_falling(priv->phydev, extts);
889 else
890 nxp_c45_set_rising_or_falling(priv->phydev, extts);
891
892 nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG);
893 priv->extts = true;
894 priv->extts_index = extts->index;
895 ptp_schedule_worker(priv->ptp_clock, 0);
896
897 return 0;
898 }
899
nxp_c45_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * req,int on)900 static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp,
901 struct ptp_clock_request *req, int on)
902 {
903 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
904
905 switch (req->type) {
906 case PTP_CLK_REQ_EXTTS:
907 return nxp_c45_extts_enable(priv, &req->extts, on);
908 case PTP_CLK_REQ_PEROUT:
909 return nxp_c45_perout_enable(priv, &req->perout, on);
910 default:
911 return -EOPNOTSUPP;
912 }
913 }
914
915 static struct ptp_pin_desc nxp_c45_ptp_pins[] = {
916 { "nxp_c45_gpio0", 0, PTP_PF_NONE},
917 { "nxp_c45_gpio1", 1, PTP_PF_NONE},
918 { "nxp_c45_gpio2", 2, PTP_PF_NONE},
919 { "nxp_c45_gpio3", 3, PTP_PF_NONE},
920 { "nxp_c45_gpio4", 4, PTP_PF_NONE},
921 { "nxp_c45_gpio5", 5, PTP_PF_NONE},
922 { "nxp_c45_gpio6", 6, PTP_PF_NONE},
923 { "nxp_c45_gpio7", 7, PTP_PF_NONE},
924 { "nxp_c45_gpio8", 8, PTP_PF_NONE},
925 { "nxp_c45_gpio9", 9, PTP_PF_NONE},
926 { "nxp_c45_gpio10", 10, PTP_PF_NONE},
927 { "nxp_c45_gpio11", 11, PTP_PF_NONE},
928 };
929
nxp_c45_ptp_verify_pin(struct ptp_clock_info * ptp,unsigned int pin,enum ptp_pin_function func,unsigned int chan)930 static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
931 enum ptp_pin_function func, unsigned int chan)
932 {
933 if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins))
934 return -EINVAL;
935
936 switch (func) {
937 case PTP_PF_NONE:
938 case PTP_PF_PEROUT:
939 case PTP_PF_EXTTS:
940 break;
941 default:
942 return -EOPNOTSUPP;
943 }
944
945 return 0;
946 }
947
nxp_c45_init_ptp_clock(struct nxp_c45_phy * priv)948 static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
949 {
950 priv->caps = (struct ptp_clock_info) {
951 .owner = THIS_MODULE,
952 .name = "NXP C45 PHC",
953 .max_adj = 16666666,
954 .adjfine = nxp_c45_ptp_adjfine,
955 .adjtime = nxp_c45_ptp_adjtime,
956 .gettimex64 = nxp_c45_ptp_gettimex64,
957 .settime64 = nxp_c45_ptp_settime64,
958 .enable = nxp_c45_ptp_enable,
959 .verify = nxp_c45_ptp_verify_pin,
960 .do_aux_work = nxp_c45_do_aux_work,
961 .pin_config = nxp_c45_ptp_pins,
962 .n_pins = ARRAY_SIZE(nxp_c45_ptp_pins),
963 .n_ext_ts = 1,
964 .n_per_out = 1,
965 };
966
967 priv->ptp_clock = ptp_clock_register(&priv->caps,
968 &priv->phydev->mdio.dev);
969
970 if (IS_ERR(priv->ptp_clock))
971 return PTR_ERR(priv->ptp_clock);
972
973 if (!priv->ptp_clock)
974 return -ENOMEM;
975
976 return 0;
977 }
978
nxp_c45_txtstamp(struct mii_timestamper * mii_ts,struct sk_buff * skb,int type)979 static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
980 struct sk_buff *skb, int type)
981 {
982 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
983 mii_ts);
984
985 switch (priv->hwts_tx) {
986 case HWTSTAMP_TX_ON:
987 NXP_C45_SKB_CB(skb)->type = type;
988 NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
989 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
990 skb_queue_tail(&priv->tx_queue, skb);
991 if (nxp_c45_poll_txts(priv->phydev))
992 ptp_schedule_worker(priv->ptp_clock, 0);
993 break;
994 case HWTSTAMP_TX_OFF:
995 default:
996 kfree_skb(skb);
997 break;
998 }
999 }
1000
nxp_c45_rxtstamp(struct mii_timestamper * mii_ts,struct sk_buff * skb,int type)1001 static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
1002 struct sk_buff *skb, int type)
1003 {
1004 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1005 mii_ts);
1006 struct ptp_header *header = ptp_parse_header(skb, type);
1007
1008 if (!header)
1009 return false;
1010
1011 if (!priv->hwts_rx)
1012 return false;
1013
1014 NXP_C45_SKB_CB(skb)->header = header;
1015 skb_queue_tail(&priv->rx_queue, skb);
1016 ptp_schedule_worker(priv->ptp_clock, 0);
1017
1018 return true;
1019 }
1020
nxp_c45_hwtstamp(struct mii_timestamper * mii_ts,struct kernel_hwtstamp_config * cfg,struct netlink_ext_ack * extack)1021 static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
1022 struct kernel_hwtstamp_config *cfg,
1023 struct netlink_ext_ack *extack)
1024 {
1025 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1026 mii_ts);
1027 struct phy_device *phydev = priv->phydev;
1028 const struct nxp_c45_phy_data *data;
1029
1030 if (cfg->tx_type < 0 || cfg->tx_type > HWTSTAMP_TX_ON)
1031 return -ERANGE;
1032
1033 data = nxp_c45_get_data(phydev);
1034 priv->hwts_tx = cfg->tx_type;
1035
1036 switch (cfg->rx_filter) {
1037 case HWTSTAMP_FILTER_NONE:
1038 priv->hwts_rx = 0;
1039 break;
1040 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1041 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1042 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1043 priv->hwts_rx = 1;
1044 cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1045 break;
1046 default:
1047 return -ERANGE;
1048 }
1049
1050 if (priv->hwts_rx || priv->hwts_tx) {
1051 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1052 data->regmap->vend1_event_msg_filt,
1053 EVENT_MSG_FILT_ALL);
1054 data->ptp_enable(phydev, true);
1055 } else {
1056 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1057 data->regmap->vend1_event_msg_filt,
1058 EVENT_MSG_FILT_NONE);
1059 data->ptp_enable(phydev, false);
1060 }
1061
1062 if (nxp_c45_poll_txts(priv->phydev))
1063 goto nxp_c45_no_ptp_irq;
1064
1065 if (priv->hwts_tx)
1066 nxp_c45_set_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1067 else
1068 nxp_c45_clear_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1069
1070 nxp_c45_no_ptp_irq:
1071 return 0;
1072 }
1073
nxp_c45_ts_info(struct mii_timestamper * mii_ts,struct kernel_ethtool_ts_info * ts_info)1074 static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
1075 struct kernel_ethtool_ts_info *ts_info)
1076 {
1077 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1078 mii_ts);
1079
1080 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1081 SOF_TIMESTAMPING_RX_HARDWARE |
1082 SOF_TIMESTAMPING_RAW_HARDWARE;
1083 ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
1084 ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1085 ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1086 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
1087 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
1088 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
1089
1090 return 0;
1091 }
1092
1093 static const struct nxp_c45_phy_stats common_hw_stats[] = {
1094 { "phy_link_status_drop_cnt",
1095 NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 8, 6), },
1096 { "phy_link_availability_drop_cnt",
1097 NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 0, 6), },
1098 { "phy_link_loss_cnt",
1099 NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 10, 6), },
1100 { "phy_link_failure_cnt",
1101 NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 0, 10), },
1102 { "phy_symbol_error_cnt",
1103 NXP_C45_REG_FIELD(0x8350, MDIO_MMD_VEND1, 0, 16) },
1104 };
1105
1106 static const struct nxp_c45_phy_stats tja1103_hw_stats[] = {
1107 { "rx_preamble_count",
1108 NXP_C45_REG_FIELD(0xAFCE, MDIO_MMD_VEND1, 0, 6), },
1109 { "tx_preamble_count",
1110 NXP_C45_REG_FIELD(0xAFCF, MDIO_MMD_VEND1, 0, 6), },
1111 { "rx_ipg_length",
1112 NXP_C45_REG_FIELD(0xAFD0, MDIO_MMD_VEND1, 0, 9), },
1113 { "tx_ipg_length",
1114 NXP_C45_REG_FIELD(0xAFD1, MDIO_MMD_VEND1, 0, 9), },
1115 };
1116
1117 static const struct nxp_c45_phy_stats tja1120_hw_stats[] = {
1118 { "phy_symbol_error_cnt_ext",
1119 NXP_C45_REG_FIELD(0x8351, MDIO_MMD_VEND1, 0, 14) },
1120 { "tx_frames_xtd",
1121 NXP_C45_REG_FIELD(0xACA1, MDIO_MMD_VEND1, 0, 8), },
1122 { "tx_frames",
1123 NXP_C45_REG_FIELD(0xACA0, MDIO_MMD_VEND1, 0, 16), },
1124 { "rx_frames_xtd",
1125 NXP_C45_REG_FIELD(0xACA3, MDIO_MMD_VEND1, 0, 8), },
1126 { "rx_frames",
1127 NXP_C45_REG_FIELD(0xACA2, MDIO_MMD_VEND1, 0, 16), },
1128 { "tx_lost_frames_xtd",
1129 NXP_C45_REG_FIELD(0xACA5, MDIO_MMD_VEND1, 0, 8), },
1130 { "tx_lost_frames",
1131 NXP_C45_REG_FIELD(0xACA4, MDIO_MMD_VEND1, 0, 16), },
1132 { "rx_lost_frames_xtd",
1133 NXP_C45_REG_FIELD(0xACA7, MDIO_MMD_VEND1, 0, 8), },
1134 { "rx_lost_frames",
1135 NXP_C45_REG_FIELD(0xACA6, MDIO_MMD_VEND1, 0, 16), },
1136 };
1137
nxp_c45_get_sset_count(struct phy_device * phydev)1138 static int nxp_c45_get_sset_count(struct phy_device *phydev)
1139 {
1140 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1141
1142 return ARRAY_SIZE(common_hw_stats) + (phy_data ? phy_data->n_stats : 0);
1143 }
1144
nxp_c45_get_strings(struct phy_device * phydev,u8 * data)1145 static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
1146 {
1147 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1148 size_t count = nxp_c45_get_sset_count(phydev);
1149 size_t idx;
1150 size_t i;
1151
1152 for (i = 0; i < count; i++) {
1153 if (i < ARRAY_SIZE(common_hw_stats)) {
1154 ethtool_puts(&data, common_hw_stats[i].name);
1155 continue;
1156 }
1157 idx = i - ARRAY_SIZE(common_hw_stats);
1158 ethtool_puts(&data, phy_data->stats[idx].name);
1159 }
1160 }
1161
nxp_c45_get_stats(struct phy_device * phydev,struct ethtool_stats * stats,u64 * data)1162 static void nxp_c45_get_stats(struct phy_device *phydev,
1163 struct ethtool_stats *stats, u64 *data)
1164 {
1165 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1166 size_t count = nxp_c45_get_sset_count(phydev);
1167 const struct nxp_c45_reg_field *reg_field;
1168 size_t idx;
1169 size_t i;
1170 int ret;
1171
1172 for (i = 0; i < count; i++) {
1173 if (i < ARRAY_SIZE(common_hw_stats)) {
1174 reg_field = &common_hw_stats[i].counter;
1175 } else {
1176 idx = i - ARRAY_SIZE(common_hw_stats);
1177 reg_field = &phy_data->stats[idx].counter;
1178 }
1179
1180 ret = nxp_c45_read_reg_field(phydev, reg_field);
1181 if (ret < 0)
1182 data[i] = U64_MAX;
1183 else
1184 data[i] = ret;
1185 }
1186 }
1187
nxp_c45_config_enable(struct phy_device * phydev)1188 static int nxp_c45_config_enable(struct phy_device *phydev)
1189 {
1190 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1191 DEVICE_CONTROL_CONFIG_GLOBAL_EN |
1192 DEVICE_CONTROL_CONFIG_ALL_EN);
1193 usleep_range(400, 450);
1194
1195 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
1196 PORT_CONTROL_EN);
1197 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1198 PHY_CONFIG_EN);
1199 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
1200 PORT_INFRA_CONTROL_EN);
1201
1202 return 0;
1203 }
1204
nxp_c45_start_op(struct phy_device * phydev)1205 static int nxp_c45_start_op(struct phy_device *phydev)
1206 {
1207 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1208 PHY_START_OP);
1209 }
1210
nxp_c45_config_intr(struct phy_device * phydev)1211 static int nxp_c45_config_intr(struct phy_device *phydev)
1212 {
1213 int ret;
1214
1215 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
1216 ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1217 VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
1218 if (ret)
1219 return ret;
1220
1221 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1222 VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1223 }
1224
1225 ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1226 VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
1227 if (ret)
1228 return ret;
1229
1230 return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1231 VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1232 }
1233
tja1103_config_intr(struct phy_device * phydev)1234 static int tja1103_config_intr(struct phy_device *phydev)
1235 {
1236 int ret;
1237
1238 /* We can't disable the FUSA IRQ for TJA1103, but we can clean it up. */
1239 ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_ALWAYS_ACCESSIBLE,
1240 FUSA_PASS);
1241 if (ret)
1242 return ret;
1243
1244 return nxp_c45_config_intr(phydev);
1245 }
1246
tja1120_config_intr(struct phy_device * phydev)1247 static int tja1120_config_intr(struct phy_device *phydev)
1248 {
1249 int ret;
1250
1251 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
1252 ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1253 TJA1120_GLOBAL_INFRA_IRQ_EN,
1254 TJA1120_DEV_BOOT_DONE);
1255 else
1256 ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1257 TJA1120_GLOBAL_INFRA_IRQ_EN,
1258 TJA1120_DEV_BOOT_DONE);
1259 if (ret)
1260 return ret;
1261
1262 return nxp_c45_config_intr(phydev);
1263 }
1264
nxp_c45_handle_interrupt(struct phy_device * phydev)1265 static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
1266 {
1267 const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1268 struct nxp_c45_phy *priv = phydev->priv;
1269 irqreturn_t ret = IRQ_NONE;
1270 struct nxp_c45_hwts hwts;
1271 int irq;
1272
1273 irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
1274 if (irq & PHY_IRQ_LINK_EVENT) {
1275 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
1276 PHY_IRQ_LINK_EVENT);
1277 phy_trigger_machine(phydev);
1278 ret = IRQ_HANDLED;
1279 }
1280
1281 irq = nxp_c45_read_reg_field(phydev, &data->regmap->irq_egr_ts_status);
1282 if (irq) {
1283 /* If ack_ptp_irq is false, the IRQ bit is self-clear and will
1284 * be cleared when the EGR TS FIFO is empty. Otherwise, the
1285 * IRQ bit should be cleared before reading the timestamp,
1286 */
1287 if (data->ack_ptp_irq)
1288 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1289 VEND1_PTP_IRQ_ACK, EGR_TS_IRQ);
1290 while (data->get_egressts(priv, &hwts))
1291 nxp_c45_process_txts(priv, &hwts);
1292
1293 ret = IRQ_HANDLED;
1294 }
1295
1296 data->nmi_handler(phydev, &ret);
1297 nxp_c45_handle_macsec_interrupt(phydev, &ret);
1298
1299 return ret;
1300 }
1301
nxp_c45_soft_reset(struct phy_device * phydev)1302 static int nxp_c45_soft_reset(struct phy_device *phydev)
1303 {
1304 int ret;
1305
1306 ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1307 DEVICE_CONTROL_RESET);
1308 if (ret)
1309 return ret;
1310
1311 usleep_range(2000, 2050);
1312
1313 return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
1314 VEND1_DEVICE_CONTROL, ret,
1315 !(ret & DEVICE_CONTROL_RESET), 20000,
1316 240000, false);
1317 }
1318
nxp_c45_cable_test_start(struct phy_device * phydev)1319 static int nxp_c45_cable_test_start(struct phy_device *phydev)
1320 {
1321 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1322
1323 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1324 VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1325 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1326 CABLE_TEST_ENABLE | CABLE_TEST_START);
1327 }
1328
nxp_c45_cable_test_get_status(struct phy_device * phydev,bool * finished)1329 static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
1330 bool *finished)
1331 {
1332 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1333 int ret;
1334 u8 cable_test_result;
1335
1336 ret = nxp_c45_read_reg_field(phydev, ®map->cable_test_valid);
1337 if (!ret) {
1338 *finished = false;
1339 return 0;
1340 }
1341
1342 *finished = true;
1343 cable_test_result = nxp_c45_read_reg_field(phydev,
1344 ®map->cable_test_result);
1345
1346 switch (cable_test_result) {
1347 case CABLE_TEST_OK:
1348 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1349 ETHTOOL_A_CABLE_RESULT_CODE_OK);
1350 break;
1351 case CABLE_TEST_SHORTED:
1352 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1353 ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
1354 break;
1355 case CABLE_TEST_OPEN:
1356 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1357 ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
1358 break;
1359 default:
1360 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1361 ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
1362 }
1363
1364 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1365 CABLE_TEST_ENABLE);
1366 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1367 VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1368
1369 return nxp_c45_start_op(phydev);
1370 }
1371
nxp_c45_get_sqi(struct phy_device * phydev)1372 static int nxp_c45_get_sqi(struct phy_device *phydev)
1373 {
1374 int reg;
1375
1376 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
1377 if (!(reg & SQI_VALID))
1378 return -EINVAL;
1379
1380 reg &= SQI_MASK;
1381
1382 return reg;
1383 }
1384
tja1120_link_change_notify(struct phy_device * phydev)1385 static void tja1120_link_change_notify(struct phy_device *phydev)
1386 {
1387 /* Bug workaround for TJA1120 enegineering samples: fix egress
1388 * timestamps lost after link recovery.
1389 */
1390 if (phydev->state == PHY_NOLINK) {
1391 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1392 TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1393 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1394 TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1395 }
1396 }
1397
nxp_c45_get_sqi_max(struct phy_device * phydev)1398 static int nxp_c45_get_sqi_max(struct phy_device *phydev)
1399 {
1400 return MAX_SQI;
1401 }
1402
nxp_c45_check_delay(struct phy_device * phydev,u32 delay)1403 static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
1404 {
1405 if (delay < MIN_ID_PS) {
1406 phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
1407 return -EINVAL;
1408 }
1409
1410 if (delay > MAX_ID_PS) {
1411 phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
1412 return -EINVAL;
1413 }
1414
1415 return 0;
1416 }
1417
nxp_c45_counters_enable(struct phy_device * phydev)1418 static void nxp_c45_counters_enable(struct phy_device *phydev)
1419 {
1420 const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1421
1422 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
1423 COUNTER_EN);
1424
1425 data->counters_enable(phydev);
1426 }
1427
nxp_c45_ptp_init(struct phy_device * phydev)1428 static void nxp_c45_ptp_init(struct phy_device *phydev)
1429 {
1430 const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1431
1432 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1433 data->regmap->vend1_ptp_clk_period,
1434 data->ptp_clk_period);
1435 nxp_c45_clear_reg_field(phydev, &data->regmap->ltc_lock_ctrl);
1436
1437 data->ptp_init(phydev);
1438 }
1439
nxp_c45_get_phase_shift(u64 phase_offset_raw)1440 static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
1441 {
1442 /* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
1443 * To avoid floating point operations we'll multiply by 10
1444 * and get 1 decimal point precision.
1445 */
1446 phase_offset_raw *= 10;
1447 phase_offset_raw -= 738;
1448 return div_u64(phase_offset_raw, 9);
1449 }
1450
nxp_c45_disable_delays(struct phy_device * phydev)1451 static void nxp_c45_disable_delays(struct phy_device *phydev)
1452 {
1453 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
1454 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
1455 }
1456
nxp_c45_set_delays(struct phy_device * phydev)1457 static void nxp_c45_set_delays(struct phy_device *phydev)
1458 {
1459 struct nxp_c45_phy *priv = phydev->priv;
1460 u64 tx_delay = priv->tx_delay;
1461 u64 rx_delay = priv->rx_delay;
1462 u64 degree;
1463
1464 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1465 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1466 degree = div_u64(tx_delay, PS_PER_DEGREE);
1467 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1468 ID_ENABLE | nxp_c45_get_phase_shift(degree));
1469 } else {
1470 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1471 ID_ENABLE);
1472 }
1473
1474 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1475 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1476 degree = div_u64(rx_delay, PS_PER_DEGREE);
1477 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1478 ID_ENABLE | nxp_c45_get_phase_shift(degree));
1479 } else {
1480 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1481 ID_ENABLE);
1482 }
1483 }
1484
nxp_c45_get_delays(struct phy_device * phydev)1485 static int nxp_c45_get_delays(struct phy_device *phydev)
1486 {
1487 struct nxp_c45_phy *priv = phydev->priv;
1488 int ret;
1489
1490 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1491 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1492 ret = device_property_read_u32(&phydev->mdio.dev,
1493 "tx-internal-delay-ps",
1494 &priv->tx_delay);
1495 if (ret)
1496 priv->tx_delay = DEFAULT_ID_PS;
1497
1498 ret = nxp_c45_check_delay(phydev, priv->tx_delay);
1499 if (ret) {
1500 phydev_err(phydev,
1501 "tx-internal-delay-ps invalid value\n");
1502 return ret;
1503 }
1504 }
1505
1506 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1507 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1508 ret = device_property_read_u32(&phydev->mdio.dev,
1509 "rx-internal-delay-ps",
1510 &priv->rx_delay);
1511 if (ret)
1512 priv->rx_delay = DEFAULT_ID_PS;
1513
1514 ret = nxp_c45_check_delay(phydev, priv->rx_delay);
1515 if (ret) {
1516 phydev_err(phydev,
1517 "rx-internal-delay-ps invalid value\n");
1518 return ret;
1519 }
1520 }
1521
1522 return 0;
1523 }
1524
nxp_c45_set_phy_mode(struct phy_device * phydev)1525 static int nxp_c45_set_phy_mode(struct phy_device *phydev)
1526 {
1527 struct nxp_c45_phy *priv = phydev->priv;
1528 u16 basic_config;
1529 int ret;
1530
1531 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
1532 phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
1533
1534 switch (phydev->interface) {
1535 case PHY_INTERFACE_MODE_RGMII:
1536 if (!(ret & RGMII_ABILITY)) {
1537 phydev_err(phydev, "rgmii mode not supported\n");
1538 return -EINVAL;
1539 }
1540 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1541 MII_BASIC_CONFIG_RGMII);
1542 nxp_c45_disable_delays(phydev);
1543 break;
1544 case PHY_INTERFACE_MODE_RGMII_ID:
1545 case PHY_INTERFACE_MODE_RGMII_TXID:
1546 case PHY_INTERFACE_MODE_RGMII_RXID:
1547 if (!(ret & RGMII_ID_ABILITY)) {
1548 phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
1549 return -EINVAL;
1550 }
1551 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1552 MII_BASIC_CONFIG_RGMII);
1553 ret = nxp_c45_get_delays(phydev);
1554 if (ret)
1555 return ret;
1556
1557 nxp_c45_set_delays(phydev);
1558 break;
1559 case PHY_INTERFACE_MODE_MII:
1560 if (!(ret & MII_ABILITY)) {
1561 phydev_err(phydev, "mii mode not supported\n");
1562 return -EINVAL;
1563 }
1564 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1565 MII_BASIC_CONFIG_MII);
1566 break;
1567 case PHY_INTERFACE_MODE_REVMII:
1568 if (!(ret & REVMII_ABILITY)) {
1569 phydev_err(phydev, "rev-mii mode not supported\n");
1570 return -EINVAL;
1571 }
1572 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1573 MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
1574 break;
1575 case PHY_INTERFACE_MODE_RMII:
1576 if (!(ret & RMII_ABILITY)) {
1577 phydev_err(phydev, "rmii mode not supported\n");
1578 return -EINVAL;
1579 }
1580
1581 basic_config = MII_BASIC_CONFIG_RMII;
1582
1583 /* This is not PHY_INTERFACE_MODE_REVRMII */
1584 if (priv->flags & TJA11XX_REVERSE_MODE)
1585 basic_config |= MII_BASIC_CONFIG_REV;
1586
1587 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1588 basic_config);
1589 break;
1590 case PHY_INTERFACE_MODE_SGMII:
1591 if (!(ret & SGMII_ABILITY)) {
1592 phydev_err(phydev, "sgmii mode not supported\n");
1593 return -EINVAL;
1594 }
1595 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1596 MII_BASIC_CONFIG_SGMII);
1597 break;
1598 case PHY_INTERFACE_MODE_INTERNAL:
1599 break;
1600 default:
1601 return -EINVAL;
1602 }
1603
1604 return 0;
1605 }
1606
1607 /* Errata: ES_TJA1120 and ES_TJA1121 Rev. 1.0 — 28 November 2024 Section 3.1 & 3.2 */
nxp_c45_tja1120_errata(struct phy_device * phydev)1608 static void nxp_c45_tja1120_errata(struct phy_device *phydev)
1609 {
1610 bool macsec_ability, sgmii_ability;
1611 int silicon_version, sample_type;
1612 int phy_abilities;
1613 int ret = 0;
1614
1615 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_ID3);
1616 if (ret < 0)
1617 return;
1618
1619 sample_type = FIELD_GET(TJA1120_DEV_ID3_SAMPLE_TYPE, ret);
1620 if (sample_type != DEVICE_ID3_SAMPLE_TYPE_R)
1621 return;
1622
1623 silicon_version = FIELD_GET(TJA1120_DEV_ID3_SILICON_VERSION, ret);
1624
1625 phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1626 VEND1_PORT_ABILITIES);
1627 macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
1628 sgmii_ability = !!(phy_abilities & SGMII_ABILITY);
1629 if ((!macsec_ability && silicon_version == 2) ||
1630 (macsec_ability && silicon_version == 1)) {
1631 /* TJA1120/TJA1121 PHY configuration errata workaround.
1632 * Apply PHY writes sequence before link up.
1633 */
1634 if (!macsec_ability) {
1635 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x4b95);
1636 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0xf3cd);
1637 } else {
1638 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x89c7);
1639 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0893);
1640 }
1641
1642 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x0476, 0x58a0);
1643
1644 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x8921, 0xa3a);
1645 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x89F1, 0x16c1);
1646
1647 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x0);
1648 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0);
1649
1650 if (sgmii_ability) {
1651 /* TJA1120B/TJA1121B SGMII PCS restart errata workaround.
1652 * Put SGMII PCS into power down mode and back up.
1653 */
1654 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1655 VEND1_SGMII_BASIC_CONTROL,
1656 SGMII_LPM);
1657 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1658 VEND1_SGMII_BASIC_CONTROL,
1659 SGMII_LPM);
1660 }
1661 }
1662 }
1663
nxp_c45_config_init(struct phy_device * phydev)1664 static int nxp_c45_config_init(struct phy_device *phydev)
1665 {
1666 int ret;
1667
1668 ret = nxp_c45_config_enable(phydev);
1669 if (ret) {
1670 phydev_err(phydev, "Failed to enable config\n");
1671 return ret;
1672 }
1673
1674 /* Bug workaround for SJA1110 rev B: enable write access
1675 * to MDIO_MMD_PMAPMD
1676 */
1677 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
1678 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
1679
1680 if (phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, GENMASK(31, 4)))
1681 nxp_c45_tja1120_errata(phydev);
1682
1683 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
1684 PHY_CONFIG_AUTO);
1685
1686 ret = nxp_c45_set_phy_mode(phydev);
1687 if (ret)
1688 return ret;
1689
1690 phydev->autoneg = AUTONEG_DISABLE;
1691
1692 nxp_c45_counters_enable(phydev);
1693 nxp_c45_ptp_init(phydev);
1694 ret = nxp_c45_macsec_config_init(phydev);
1695 if (ret)
1696 return ret;
1697
1698 return nxp_c45_start_op(phydev);
1699 }
1700
nxp_c45_get_features(struct phy_device * phydev)1701 static int nxp_c45_get_features(struct phy_device *phydev)
1702 {
1703 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported);
1704 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, phydev->supported);
1705
1706 return genphy_c45_pma_read_abilities(phydev);
1707 }
1708
nxp_c45_parse_dt(struct phy_device * phydev)1709 static int nxp_c45_parse_dt(struct phy_device *phydev)
1710 {
1711 struct device_node *node = phydev->mdio.dev.of_node;
1712 struct nxp_c45_phy *priv = phydev->priv;
1713
1714 if (!IS_ENABLED(CONFIG_OF_MDIO))
1715 return 0;
1716
1717 if (of_property_read_bool(node, "nxp,rmii-refclk-out"))
1718 priv->flags |= TJA11XX_REVERSE_MODE;
1719
1720 return 0;
1721 }
1722
nxp_c45_probe(struct phy_device * phydev)1723 static int nxp_c45_probe(struct phy_device *phydev)
1724 {
1725 struct nxp_c45_phy *priv;
1726 bool macsec_ability;
1727 int phy_abilities;
1728 bool ptp_ability;
1729 int ret = 0;
1730
1731 priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
1732 if (!priv)
1733 return -ENOMEM;
1734
1735 skb_queue_head_init(&priv->tx_queue);
1736 skb_queue_head_init(&priv->rx_queue);
1737
1738 priv->phydev = phydev;
1739
1740 phydev->priv = priv;
1741
1742 nxp_c45_parse_dt(phydev);
1743
1744 mutex_init(&priv->ptp_lock);
1745
1746 phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1747 VEND1_PORT_ABILITIES);
1748 ptp_ability = !!(phy_abilities & PTP_ABILITY);
1749 if (!ptp_ability) {
1750 phydev_dbg(phydev, "the phy does not support PTP");
1751 goto no_ptp_support;
1752 }
1753
1754 if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
1755 IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
1756 priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
1757 priv->mii_ts.txtstamp = nxp_c45_txtstamp;
1758 priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
1759 priv->mii_ts.ts_info = nxp_c45_ts_info;
1760 phydev->mii_ts = &priv->mii_ts;
1761 ret = nxp_c45_init_ptp_clock(priv);
1762
1763 /* Timestamp selected by default to keep legacy API */
1764 phydev->default_timestamp = true;
1765 } else {
1766 phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
1767 }
1768
1769 no_ptp_support:
1770 macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
1771 if (!macsec_ability) {
1772 phydev_info(phydev, "the phy does not support MACsec\n");
1773 goto no_macsec_support;
1774 }
1775
1776 if (IS_ENABLED(CONFIG_MACSEC)) {
1777 ret = nxp_c45_macsec_probe(phydev);
1778 phydev_dbg(phydev, "MACsec support enabled.");
1779 } else {
1780 phydev_dbg(phydev, "MACsec support not enabled even if the phy supports it");
1781 }
1782
1783 no_macsec_support:
1784
1785 return ret;
1786 }
1787
nxp_c45_remove(struct phy_device * phydev)1788 static void nxp_c45_remove(struct phy_device *phydev)
1789 {
1790 struct nxp_c45_phy *priv = phydev->priv;
1791
1792 if (priv->ptp_clock)
1793 ptp_clock_unregister(priv->ptp_clock);
1794
1795 skb_queue_purge(&priv->tx_queue);
1796 skb_queue_purge(&priv->rx_queue);
1797 nxp_c45_macsec_remove(phydev);
1798 }
1799
tja1103_counters_enable(struct phy_device * phydev)1800 static void tja1103_counters_enable(struct phy_device *phydev)
1801 {
1802 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
1803 COUNTER_EN);
1804 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
1805 COUNTER_EN);
1806 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
1807 COUNTER_EN);
1808 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
1809 COUNTER_EN);
1810 }
1811
tja1103_ptp_init(struct phy_device * phydev)1812 static void tja1103_ptp_init(struct phy_device *phydev)
1813 {
1814 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
1815 TJA1103_RX_TS_INSRT_MODE2);
1816 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
1817 PTP_ENABLE);
1818 }
1819
tja1103_ptp_enable(struct phy_device * phydev,bool enable)1820 static void tja1103_ptp_enable(struct phy_device *phydev, bool enable)
1821 {
1822 if (enable)
1823 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1824 VEND1_PORT_PTP_CONTROL,
1825 PORT_PTP_CONTROL_BYPASS);
1826 else
1827 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1828 VEND1_PORT_PTP_CONTROL,
1829 PORT_PTP_CONTROL_BYPASS);
1830 }
1831
tja1103_nmi_handler(struct phy_device * phydev,irqreturn_t * irq_status)1832 static void tja1103_nmi_handler(struct phy_device *phydev,
1833 irqreturn_t *irq_status)
1834 {
1835 int ret;
1836
1837 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1838 VEND1_ALWAYS_ACCESSIBLE);
1839 if (ret & FUSA_PASS) {
1840 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1841 VEND1_ALWAYS_ACCESSIBLE,
1842 FUSA_PASS);
1843 *irq_status = IRQ_HANDLED;
1844 }
1845 }
1846
1847 static const struct nxp_c45_regmap tja1103_regmap = {
1848 .vend1_ptp_clk_period = 0x1104,
1849 .vend1_event_msg_filt = 0x1148,
1850 .pps_enable =
1851 NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 3, 1),
1852 .pps_polarity =
1853 NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 2, 1),
1854 .ltc_lock_ctrl =
1855 NXP_C45_REG_FIELD(0x1115, MDIO_MMD_VEND1, 0, 1),
1856 .ltc_read =
1857 NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 2, 1),
1858 .ltc_write =
1859 NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 0, 1),
1860 .vend1_ltc_wr_nsec_0 = 0x1106,
1861 .vend1_ltc_wr_nsec_1 = 0x1107,
1862 .vend1_ltc_wr_sec_0 = 0x1108,
1863 .vend1_ltc_wr_sec_1 = 0x1109,
1864 .vend1_ltc_rd_nsec_0 = 0x110A,
1865 .vend1_ltc_rd_nsec_1 = 0x110B,
1866 .vend1_ltc_rd_sec_0 = 0x110C,
1867 .vend1_ltc_rd_sec_1 = 0x110D,
1868 .vend1_rate_adj_subns_0 = 0x110F,
1869 .vend1_rate_adj_subns_1 = 0x1110,
1870 .irq_egr_ts_en =
1871 NXP_C45_REG_FIELD(0x1131, MDIO_MMD_VEND1, 0, 1),
1872 .irq_egr_ts_status =
1873 NXP_C45_REG_FIELD(0x1132, MDIO_MMD_VEND1, 0, 1),
1874 .domain_number =
1875 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 0, 8),
1876 .msg_type =
1877 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 8, 4),
1878 .sequence_id =
1879 NXP_C45_REG_FIELD(0x114F, MDIO_MMD_VEND1, 0, 16),
1880 .sec_1_0 =
1881 NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 14, 2),
1882 .sec_4_2 =
1883 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 12, 3),
1884 .nsec_15_0 =
1885 NXP_C45_REG_FIELD(0x1150, MDIO_MMD_VEND1, 0, 16),
1886 .nsec_29_16 =
1887 NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 0, 14),
1888 .vend1_ext_trg_data_0 = 0x1121,
1889 .vend1_ext_trg_data_1 = 0x1122,
1890 .vend1_ext_trg_data_2 = 0x1123,
1891 .vend1_ext_trg_data_3 = 0x1124,
1892 .vend1_ext_trg_ctrl = 0x1126,
1893 .cable_test = 0x8330,
1894 .cable_test_valid =
1895 NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 13, 1),
1896 .cable_test_result =
1897 NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 0, 3),
1898 };
1899
1900 static const struct nxp_c45_phy_data tja1103_phy_data = {
1901 .regmap = &tja1103_regmap,
1902 .stats = tja1103_hw_stats,
1903 .n_stats = ARRAY_SIZE(tja1103_hw_stats),
1904 .ptp_clk_period = PTP_CLK_PERIOD_100BT1,
1905 .ext_ts_both_edges = false,
1906 .ack_ptp_irq = false,
1907 .counters_enable = tja1103_counters_enable,
1908 .get_egressts = nxp_c45_get_hwtxts,
1909 .get_extts = nxp_c45_get_extts,
1910 .ptp_init = tja1103_ptp_init,
1911 .ptp_enable = tja1103_ptp_enable,
1912 .nmi_handler = tja1103_nmi_handler,
1913 };
1914
tja1120_counters_enable(struct phy_device * phydev)1915 static void tja1120_counters_enable(struct phy_device *phydev)
1916 {
1917 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_SYMBOL_ERROR_CNT_XTD,
1918 EXTENDED_CNT_EN);
1919 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_STATUS,
1920 MONITOR_RESET);
1921 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_CONFIG,
1922 ALL_FRAMES_CNT_EN | LOST_FRAMES_CNT_EN);
1923 }
1924
tja1120_ptp_init(struct phy_device * phydev)1925 static void tja1120_ptp_init(struct phy_device *phydev)
1926 {
1927 phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_RX_TS_INSRT_CTRL,
1928 TJA1120_RX_TS_INSRT_EN | TJA1120_TS_INSRT_MODE);
1929 phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_VEND1_EXT_TS_MODE,
1930 TJA1120_TS_INSRT_MODE);
1931 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONFIG,
1932 PTP_ENABLE);
1933 }
1934
tja1120_ptp_enable(struct phy_device * phydev,bool enable)1935 static void tja1120_ptp_enable(struct phy_device *phydev, bool enable)
1936 {
1937 if (enable)
1938 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1939 VEND1_PORT_FUNC_ENABLES,
1940 PTP_ENABLE);
1941 else
1942 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1943 VEND1_PORT_FUNC_ENABLES,
1944 PTP_ENABLE);
1945 }
1946
tja1120_nmi_handler(struct phy_device * phydev,irqreturn_t * irq_status)1947 static void tja1120_nmi_handler(struct phy_device *phydev,
1948 irqreturn_t *irq_status)
1949 {
1950 int ret;
1951
1952 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1953 TJA1120_GLOBAL_INFRA_IRQ_STATUS);
1954 if (ret & TJA1120_DEV_BOOT_DONE) {
1955 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1956 TJA1120_GLOBAL_INFRA_IRQ_ACK,
1957 TJA1120_DEV_BOOT_DONE);
1958 *irq_status = IRQ_HANDLED;
1959 }
1960 }
1961
nxp_c45_macsec_ability(struct phy_device * phydev)1962 static int nxp_c45_macsec_ability(struct phy_device *phydev)
1963 {
1964 bool macsec_ability;
1965 int phy_abilities;
1966
1967 phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1968 VEND1_PORT_ABILITIES);
1969 macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
1970
1971 return macsec_ability;
1972 }
1973
tja1103_match_phy_device(struct phy_device * phydev)1974 static int tja1103_match_phy_device(struct phy_device *phydev)
1975 {
1976 return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) &&
1977 !nxp_c45_macsec_ability(phydev);
1978 }
1979
tja1104_match_phy_device(struct phy_device * phydev)1980 static int tja1104_match_phy_device(struct phy_device *phydev)
1981 {
1982 return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) &&
1983 nxp_c45_macsec_ability(phydev);
1984 }
1985
tja1120_match_phy_device(struct phy_device * phydev)1986 static int tja1120_match_phy_device(struct phy_device *phydev)
1987 {
1988 return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, PHY_ID_MASK) &&
1989 !nxp_c45_macsec_ability(phydev);
1990 }
1991
tja1121_match_phy_device(struct phy_device * phydev)1992 static int tja1121_match_phy_device(struct phy_device *phydev)
1993 {
1994 return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, PHY_ID_MASK) &&
1995 nxp_c45_macsec_ability(phydev);
1996 }
1997
1998 static const struct nxp_c45_regmap tja1120_regmap = {
1999 .vend1_ptp_clk_period = 0x1020,
2000 .vend1_event_msg_filt = 0x9010,
2001 .pps_enable =
2002 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 4, 1),
2003 .pps_polarity =
2004 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 5, 1),
2005 .ltc_lock_ctrl =
2006 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 2, 1),
2007 .ltc_read =
2008 NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 1, 1),
2009 .ltc_write =
2010 NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 2, 1),
2011 .vend1_ltc_wr_nsec_0 = 0x1040,
2012 .vend1_ltc_wr_nsec_1 = 0x1041,
2013 .vend1_ltc_wr_sec_0 = 0x1042,
2014 .vend1_ltc_wr_sec_1 = 0x1043,
2015 .vend1_ltc_rd_nsec_0 = 0x1048,
2016 .vend1_ltc_rd_nsec_1 = 0x1049,
2017 .vend1_ltc_rd_sec_0 = 0x104A,
2018 .vend1_ltc_rd_sec_1 = 0x104B,
2019 .vend1_rate_adj_subns_0 = 0x1030,
2020 .vend1_rate_adj_subns_1 = 0x1031,
2021 .irq_egr_ts_en =
2022 NXP_C45_REG_FIELD(0x900A, MDIO_MMD_VEND1, 1, 1),
2023 .irq_egr_ts_status =
2024 NXP_C45_REG_FIELD(0x900C, MDIO_MMD_VEND1, 1, 1),
2025 .domain_number =
2026 NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 8, 8),
2027 .msg_type =
2028 NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 4, 4),
2029 .sequence_id =
2030 NXP_C45_REG_FIELD(0x9062, MDIO_MMD_VEND1, 0, 16),
2031 .sec_1_0 =
2032 NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 0, 2),
2033 .sec_4_2 =
2034 NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 2, 3),
2035 .nsec_15_0 =
2036 NXP_C45_REG_FIELD(0x9063, MDIO_MMD_VEND1, 0, 16),
2037 .nsec_29_16 =
2038 NXP_C45_REG_FIELD(0x9064, MDIO_MMD_VEND1, 0, 14),
2039 .vend1_ext_trg_data_0 = 0x1071,
2040 .vend1_ext_trg_data_1 = 0x1072,
2041 .vend1_ext_trg_data_2 = 0x1073,
2042 .vend1_ext_trg_data_3 = 0x1074,
2043 .vend1_ext_trg_ctrl = 0x1075,
2044 .cable_test = 0x8360,
2045 .cable_test_valid =
2046 NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 15, 1),
2047 .cable_test_result =
2048 NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 0, 3),
2049 };
2050
2051 static const struct nxp_c45_phy_data tja1120_phy_data = {
2052 .regmap = &tja1120_regmap,
2053 .stats = tja1120_hw_stats,
2054 .n_stats = ARRAY_SIZE(tja1120_hw_stats),
2055 .ptp_clk_period = PTP_CLK_PERIOD_1000BT1,
2056 .ext_ts_both_edges = true,
2057 .ack_ptp_irq = true,
2058 .counters_enable = tja1120_counters_enable,
2059 .get_egressts = tja1120_get_hwtxts,
2060 .get_extts = tja1120_get_extts,
2061 .ptp_init = tja1120_ptp_init,
2062 .ptp_enable = tja1120_ptp_enable,
2063 .nmi_handler = tja1120_nmi_handler,
2064 };
2065
2066 static struct phy_driver nxp_c45_driver[] = {
2067 {
2068 .name = "NXP C45 TJA1103",
2069 .get_features = nxp_c45_get_features,
2070 .driver_data = &tja1103_phy_data,
2071 .probe = nxp_c45_probe,
2072 .soft_reset = nxp_c45_soft_reset,
2073 .config_aneg = genphy_c45_config_aneg,
2074 .config_init = nxp_c45_config_init,
2075 .config_intr = tja1103_config_intr,
2076 .handle_interrupt = nxp_c45_handle_interrupt,
2077 .read_status = genphy_c45_read_status,
2078 .suspend = genphy_c45_pma_suspend,
2079 .resume = genphy_c45_pma_resume,
2080 .get_sset_count = nxp_c45_get_sset_count,
2081 .get_strings = nxp_c45_get_strings,
2082 .get_stats = nxp_c45_get_stats,
2083 .cable_test_start = nxp_c45_cable_test_start,
2084 .cable_test_get_status = nxp_c45_cable_test_get_status,
2085 .set_loopback = genphy_c45_loopback,
2086 .get_sqi = nxp_c45_get_sqi,
2087 .get_sqi_max = nxp_c45_get_sqi_max,
2088 .remove = nxp_c45_remove,
2089 .match_phy_device = tja1103_match_phy_device,
2090 },
2091 {
2092 .name = "NXP C45 TJA1104",
2093 .get_features = nxp_c45_get_features,
2094 .driver_data = &tja1103_phy_data,
2095 .probe = nxp_c45_probe,
2096 .soft_reset = nxp_c45_soft_reset,
2097 .config_aneg = genphy_c45_config_aneg,
2098 .config_init = nxp_c45_config_init,
2099 .config_intr = tja1103_config_intr,
2100 .handle_interrupt = nxp_c45_handle_interrupt,
2101 .read_status = genphy_c45_read_status,
2102 .suspend = genphy_c45_pma_suspend,
2103 .resume = genphy_c45_pma_resume,
2104 .get_sset_count = nxp_c45_get_sset_count,
2105 .get_strings = nxp_c45_get_strings,
2106 .get_stats = nxp_c45_get_stats,
2107 .cable_test_start = nxp_c45_cable_test_start,
2108 .cable_test_get_status = nxp_c45_cable_test_get_status,
2109 .set_loopback = genphy_c45_loopback,
2110 .get_sqi = nxp_c45_get_sqi,
2111 .get_sqi_max = nxp_c45_get_sqi_max,
2112 .remove = nxp_c45_remove,
2113 .match_phy_device = tja1104_match_phy_device,
2114 },
2115 {
2116 .name = "NXP C45 TJA1120",
2117 .get_features = nxp_c45_get_features,
2118 .driver_data = &tja1120_phy_data,
2119 .probe = nxp_c45_probe,
2120 .soft_reset = nxp_c45_soft_reset,
2121 .config_aneg = genphy_c45_config_aneg,
2122 .config_init = nxp_c45_config_init,
2123 .config_intr = tja1120_config_intr,
2124 .handle_interrupt = nxp_c45_handle_interrupt,
2125 .read_status = genphy_c45_read_status,
2126 .link_change_notify = tja1120_link_change_notify,
2127 .suspend = genphy_c45_pma_suspend,
2128 .resume = genphy_c45_pma_resume,
2129 .get_sset_count = nxp_c45_get_sset_count,
2130 .get_strings = nxp_c45_get_strings,
2131 .get_stats = nxp_c45_get_stats,
2132 .cable_test_start = nxp_c45_cable_test_start,
2133 .cable_test_get_status = nxp_c45_cable_test_get_status,
2134 .set_loopback = genphy_c45_loopback,
2135 .get_sqi = nxp_c45_get_sqi,
2136 .get_sqi_max = nxp_c45_get_sqi_max,
2137 .remove = nxp_c45_remove,
2138 .match_phy_device = tja1120_match_phy_device,
2139 },
2140 {
2141 .name = "NXP C45 TJA1121",
2142 .get_features = nxp_c45_get_features,
2143 .driver_data = &tja1120_phy_data,
2144 .probe = nxp_c45_probe,
2145 .soft_reset = nxp_c45_soft_reset,
2146 .config_aneg = genphy_c45_config_aneg,
2147 .config_init = nxp_c45_config_init,
2148 .config_intr = tja1120_config_intr,
2149 .handle_interrupt = nxp_c45_handle_interrupt,
2150 .read_status = genphy_c45_read_status,
2151 .link_change_notify = tja1120_link_change_notify,
2152 .suspend = genphy_c45_pma_suspend,
2153 .resume = genphy_c45_pma_resume,
2154 .get_sset_count = nxp_c45_get_sset_count,
2155 .get_strings = nxp_c45_get_strings,
2156 .get_stats = nxp_c45_get_stats,
2157 .cable_test_start = nxp_c45_cable_test_start,
2158 .cable_test_get_status = nxp_c45_cable_test_get_status,
2159 .set_loopback = genphy_c45_loopback,
2160 .get_sqi = nxp_c45_get_sqi,
2161 .get_sqi_max = nxp_c45_get_sqi_max,
2162 .remove = nxp_c45_remove,
2163 .match_phy_device = tja1121_match_phy_device,
2164 },
2165 };
2166
2167 module_phy_driver(nxp_c45_driver);
2168
2169 static const struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
2170 { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
2171 { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120) },
2172 { /*sentinel*/ },
2173 };
2174
2175 MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
2176
2177 MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
2178 MODULE_DESCRIPTION("NXP C45 PHY driver");
2179 MODULE_LICENSE("GPL v2");
2180