1 // SPDX-License-Identifier: GPL-2.0
2 /* Microchip KSZ PTP Implementation
3 *
4 * Copyright (C) 2020 ARRI Lighting
5 * Copyright (C) 2022 Microchip Technology Inc.
6 */
7
8 #include <linux/dsa/ksz_common.h>
9 #include <linux/irq.h>
10 #include <linux/irqdomain.h>
11 #include <linux/kernel.h>
12 #include <linux/ptp_classify.h>
13 #include <linux/ptp_clock_kernel.h>
14
15 #include "ksz_common.h"
16 #include "ksz_ptp.h"
17 #include "ksz_ptp_reg.h"
18
19 #define ptp_caps_to_data(d) container_of((d), struct ksz_ptp_data, caps)
20 #define ptp_data_to_ksz_dev(d) container_of((d), struct ksz_device, ptp_data)
21 #define work_to_xmit_work(w) \
22 container_of((w), struct ksz_deferred_xmit_work, work)
23
24 /* Sub-nanoseconds-adj,max * sub-nanoseconds / 40ns * 1ns
25 * = (2^30-1) * (2 ^ 32) / 40 ns * 1 ns = 6249999
26 */
27 #define KSZ_MAX_DRIFT_CORR 6249999
28 #define KSZ_MAX_PULSE_WIDTH 125000000LL
29
30 #define KSZ_PTP_INC_NS 40ULL /* HW clock is incremented every 40 ns (by 40) */
31 #define KSZ_PTP_SUBNS_BITS 32
32
33 #define KSZ_PTP_INT_START 13
34
ksz_ptp_tou_gpio(struct ksz_device * dev)35 static int ksz_ptp_tou_gpio(struct ksz_device *dev)
36 {
37 int ret;
38
39 if (!is_lan937x(dev))
40 return 0;
41
42 ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, GPIO_OUT,
43 GPIO_OUT);
44 if (ret)
45 return ret;
46
47 ret = ksz_rmw32(dev, REG_SW_GLOBAL_LED_OVR__4, LED_OVR_1 | LED_OVR_2,
48 LED_OVR_1 | LED_OVR_2);
49 if (ret)
50 return ret;
51
52 return ksz_rmw32(dev, REG_SW_GLOBAL_LED_SRC__4,
53 LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2,
54 LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2);
55 }
56
ksz_ptp_tou_reset(struct ksz_device * dev,u8 unit)57 static int ksz_ptp_tou_reset(struct ksz_device *dev, u8 unit)
58 {
59 u32 data;
60 int ret;
61
62 /* Reset trigger unit (clears TRIGGER_EN, but not GPIOSTATx) */
63 ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_RESET, TRIG_RESET);
64
65 data = FIELD_PREP(TRIG_DONE_M, BIT(unit));
66 ret = ksz_write32(dev, REG_PTP_TRIG_STATUS__4, data);
67 if (ret)
68 return ret;
69
70 data = FIELD_PREP(TRIG_INT_M, BIT(unit));
71 ret = ksz_write32(dev, REG_PTP_INT_STATUS__4, data);
72 if (ret)
73 return ret;
74
75 /* Clear reset and set GPIO direction */
76 return ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, (TRIG_RESET | TRIG_ENABLE),
77 0);
78 }
79
ksz_ptp_tou_pulse_verify(u64 pulse_ns)80 static int ksz_ptp_tou_pulse_verify(u64 pulse_ns)
81 {
82 u32 data;
83
84 if (pulse_ns & 0x3)
85 return -EINVAL;
86
87 data = (pulse_ns / 8);
88 if (!FIELD_FIT(TRIG_PULSE_WIDTH_M, data))
89 return -ERANGE;
90
91 return 0;
92 }
93
ksz_ptp_tou_target_time_set(struct ksz_device * dev,struct timespec64 const * ts)94 static int ksz_ptp_tou_target_time_set(struct ksz_device *dev,
95 struct timespec64 const *ts)
96 {
97 int ret;
98
99 /* Hardware has only 32 bit */
100 if ((ts->tv_sec & 0xffffffff) != ts->tv_sec)
101 return -EINVAL;
102
103 ret = ksz_write32(dev, REG_TRIG_TARGET_NANOSEC, ts->tv_nsec);
104 if (ret)
105 return ret;
106
107 ret = ksz_write32(dev, REG_TRIG_TARGET_SEC, ts->tv_sec);
108 if (ret)
109 return ret;
110
111 return 0;
112 }
113
ksz_ptp_tou_start(struct ksz_device * dev,u8 unit)114 static int ksz_ptp_tou_start(struct ksz_device *dev, u8 unit)
115 {
116 u32 data;
117 int ret;
118
119 ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_ENABLE, TRIG_ENABLE);
120 if (ret)
121 return ret;
122
123 /* Check error flag:
124 * - the ACTIVE flag is NOT cleared an error!
125 */
126 ret = ksz_read32(dev, REG_PTP_TRIG_STATUS__4, &data);
127 if (ret)
128 return ret;
129
130 if (FIELD_GET(TRIG_ERROR_M, data) & (1 << unit)) {
131 dev_err(dev->dev, "%s: Trigger unit%d error!\n", __func__,
132 unit);
133 ret = -EIO;
134 /* Unit will be reset on next access */
135 return ret;
136 }
137
138 return 0;
139 }
140
ksz_ptp_configure_perout(struct ksz_device * dev,u32 cycle_width_ns,u32 pulse_width_ns,struct timespec64 const * target_time,u8 index)141 static int ksz_ptp_configure_perout(struct ksz_device *dev,
142 u32 cycle_width_ns, u32 pulse_width_ns,
143 struct timespec64 const *target_time,
144 u8 index)
145 {
146 u32 data;
147 int ret;
148
149 data = FIELD_PREP(TRIG_NOTIFY, 1) |
150 FIELD_PREP(TRIG_GPO_M, index) |
151 FIELD_PREP(TRIG_PATTERN_M, TRIG_POS_PERIOD);
152 ret = ksz_write32(dev, REG_TRIG_CTRL__4, data);
153 if (ret)
154 return ret;
155
156 ret = ksz_write32(dev, REG_TRIG_CYCLE_WIDTH, cycle_width_ns);
157 if (ret)
158 return ret;
159
160 /* Set cycle count 0 - Infinite */
161 ret = ksz_rmw32(dev, REG_TRIG_CYCLE_CNT, TRIG_CYCLE_CNT_M, 0);
162 if (ret)
163 return ret;
164
165 data = (pulse_width_ns / 8);
166 ret = ksz_write32(dev, REG_TRIG_PULSE_WIDTH__4, data);
167 if (ret)
168 return ret;
169
170 ret = ksz_ptp_tou_target_time_set(dev, target_time);
171 if (ret)
172 return ret;
173
174 return 0;
175 }
176
ksz_ptp_enable_perout(struct ksz_device * dev,struct ptp_perout_request const * request,int on)177 static int ksz_ptp_enable_perout(struct ksz_device *dev,
178 struct ptp_perout_request const *request,
179 int on)
180 {
181 struct ksz_ptp_data *ptp_data = &dev->ptp_data;
182 u64 req_pulse_width_ns;
183 u64 cycle_width_ns;
184 u64 pulse_width_ns;
185 int pin = 0;
186 u32 data32;
187 int ret;
188
189 if (request->flags & ~PTP_PEROUT_DUTY_CYCLE)
190 return -EOPNOTSUPP;
191
192 if (ptp_data->tou_mode != KSZ_PTP_TOU_PEROUT &&
193 ptp_data->tou_mode != KSZ_PTP_TOU_IDLE)
194 return -EBUSY;
195
196 pin = ptp_find_pin(ptp_data->clock, PTP_PF_PEROUT, request->index);
197 if (pin < 0)
198 return -EINVAL;
199
200 data32 = FIELD_PREP(PTP_GPIO_INDEX, pin) |
201 FIELD_PREP(PTP_TOU_INDEX, request->index);
202 ret = ksz_rmw32(dev, REG_PTP_UNIT_INDEX__4,
203 PTP_GPIO_INDEX | PTP_TOU_INDEX, data32);
204 if (ret)
205 return ret;
206
207 ret = ksz_ptp_tou_reset(dev, request->index);
208 if (ret)
209 return ret;
210
211 if (!on) {
212 ptp_data->tou_mode = KSZ_PTP_TOU_IDLE;
213 return 0;
214 }
215
216 ptp_data->perout_target_time_first.tv_sec = request->start.sec;
217 ptp_data->perout_target_time_first.tv_nsec = request->start.nsec;
218
219 ptp_data->perout_period.tv_sec = request->period.sec;
220 ptp_data->perout_period.tv_nsec = request->period.nsec;
221
222 cycle_width_ns = timespec64_to_ns(&ptp_data->perout_period);
223 if ((cycle_width_ns & TRIG_CYCLE_WIDTH_M) != cycle_width_ns)
224 return -EINVAL;
225
226 if (request->flags & PTP_PEROUT_DUTY_CYCLE) {
227 pulse_width_ns = request->on.sec * NSEC_PER_SEC +
228 request->on.nsec;
229 } else {
230 /* Use a duty cycle of 50%. Maximum pulse width supported by the
231 * hardware is a little bit more than 125 ms.
232 */
233 req_pulse_width_ns = (request->period.sec * NSEC_PER_SEC +
234 request->period.nsec) / 2;
235 pulse_width_ns = min_t(u64, req_pulse_width_ns,
236 KSZ_MAX_PULSE_WIDTH);
237 }
238
239 ret = ksz_ptp_tou_pulse_verify(pulse_width_ns);
240 if (ret)
241 return ret;
242
243 ret = ksz_ptp_configure_perout(dev, cycle_width_ns, pulse_width_ns,
244 &ptp_data->perout_target_time_first,
245 pin);
246 if (ret)
247 return ret;
248
249 ret = ksz_ptp_tou_gpio(dev);
250 if (ret)
251 return ret;
252
253 ret = ksz_ptp_tou_start(dev, request->index);
254 if (ret)
255 return ret;
256
257 ptp_data->tou_mode = KSZ_PTP_TOU_PEROUT;
258
259 return 0;
260 }
261
ksz_ptp_enable_mode(struct ksz_device * dev)262 static int ksz_ptp_enable_mode(struct ksz_device *dev)
263 {
264 struct ksz_tagger_data *tagger_data = ksz_tagger_data(dev->ds);
265 struct ksz_ptp_data *ptp_data = &dev->ptp_data;
266 const u16 *regs = dev->info->regs;
267 struct ksz_port *prt;
268 struct dsa_port *dp;
269 bool tag_en = false;
270
271 dsa_switch_for_each_user_port(dp, dev->ds) {
272 prt = &dev->ports[dp->index];
273 if (prt->hwts_tx_en || prt->hwts_rx_en) {
274 tag_en = true;
275 break;
276 }
277 }
278
279 if (tag_en) {
280 ptp_schedule_worker(ptp_data->clock, 0);
281 } else {
282 ptp_cancel_worker_sync(ptp_data->clock);
283 }
284
285 tagger_data->hwtstamp_set_state(dev->ds, tag_en);
286
287 return ksz_rmw16(dev, regs[PTP_MSG_CONF1], PTP_ENABLE,
288 tag_en ? PTP_ENABLE : 0);
289 }
290
291 /* The function is return back the capability of timestamping feature when
292 * requested through ethtool -T <interface> utility
293 */
ksz_get_ts_info(struct dsa_switch * ds,int port,struct kernel_ethtool_ts_info * ts)294 int ksz_get_ts_info(struct dsa_switch *ds, int port, struct kernel_ethtool_ts_info *ts)
295 {
296 struct ksz_device *dev = ds->priv;
297 struct ksz_ptp_data *ptp_data;
298
299 ptp_data = &dev->ptp_data;
300
301 if (!ptp_data->clock)
302 return -ENODEV;
303
304 ts->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
305 SOF_TIMESTAMPING_RX_HARDWARE |
306 SOF_TIMESTAMPING_RAW_HARDWARE;
307
308 ts->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ONESTEP_P2P);
309
310 if (is_lan937x(dev))
311 ts->tx_types |= BIT(HWTSTAMP_TX_ON);
312
313 ts->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
314 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
315 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
316 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
317
318 ts->phc_index = ptp_clock_index(ptp_data->clock);
319
320 return 0;
321 }
322
ksz_hwtstamp_get(struct dsa_switch * ds,int port,struct kernel_hwtstamp_config * config)323 int ksz_hwtstamp_get(struct dsa_switch *ds, int port,
324 struct kernel_hwtstamp_config *config)
325 {
326 struct ksz_device *dev = ds->priv;
327 struct ksz_port *prt;
328
329 prt = &dev->ports[port];
330 *config = prt->tstamp_config;
331
332 return 0;
333 }
334
ksz_set_hwtstamp_config(struct ksz_device * dev,struct ksz_port * prt,struct kernel_hwtstamp_config * config)335 static int ksz_set_hwtstamp_config(struct ksz_device *dev,
336 struct ksz_port *prt,
337 struct kernel_hwtstamp_config *config)
338 {
339 const u16 *regs = dev->info->regs;
340 int ret;
341
342 if (config->flags)
343 return -EINVAL;
344
345 switch (config->tx_type) {
346 case HWTSTAMP_TX_OFF:
347 prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = false;
348 prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = false;
349 prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
350 prt->hwts_tx_en = false;
351 break;
352 case HWTSTAMP_TX_ONESTEP_P2P:
353 prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = false;
354 prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true;
355 prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
356 prt->hwts_tx_en = true;
357
358 ret = ksz_rmw16(dev, regs[PTP_MSG_CONF1], PTP_1STEP, PTP_1STEP);
359 if (ret)
360 return ret;
361
362 break;
363 case HWTSTAMP_TX_ON:
364 if (!is_lan937x(dev))
365 return -ERANGE;
366
367 prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = true;
368 prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true;
369 prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = true;
370 prt->hwts_tx_en = true;
371
372 ret = ksz_rmw16(dev, regs[PTP_MSG_CONF1], PTP_1STEP, 0);
373 if (ret)
374 return ret;
375
376 break;
377 default:
378 return -ERANGE;
379 }
380
381 switch (config->rx_filter) {
382 case HWTSTAMP_FILTER_NONE:
383 prt->hwts_rx_en = false;
384 break;
385 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
386 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
387 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
388 prt->hwts_rx_en = true;
389 break;
390 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
391 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
392 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
393 prt->hwts_rx_en = true;
394 break;
395 case HWTSTAMP_FILTER_PTP_V2_EVENT:
396 case HWTSTAMP_FILTER_PTP_V2_SYNC:
397 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
398 prt->hwts_rx_en = true;
399 break;
400 default:
401 config->rx_filter = HWTSTAMP_FILTER_NONE;
402 return -ERANGE;
403 }
404
405 return ksz_ptp_enable_mode(dev);
406 }
407
ksz_hwtstamp_set(struct dsa_switch * ds,int port,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)408 int ksz_hwtstamp_set(struct dsa_switch *ds, int port,
409 struct kernel_hwtstamp_config *config,
410 struct netlink_ext_ack *extack)
411 {
412 struct ksz_device *dev = ds->priv;
413 struct ksz_port *prt;
414 int ret;
415
416 prt = &dev->ports[port];
417
418 ret = ksz_set_hwtstamp_config(dev, prt, config);
419 if (ret)
420 return ret;
421
422 prt->tstamp_config = *config;
423
424 return 0;
425 }
426
ksz_tstamp_reconstruct(struct ksz_device * dev,ktime_t tstamp)427 static ktime_t ksz_tstamp_reconstruct(struct ksz_device *dev, ktime_t tstamp)
428 {
429 struct timespec64 ptp_clock_time;
430 struct ksz_ptp_data *ptp_data;
431 struct timespec64 diff;
432 struct timespec64 ts;
433
434 ptp_data = &dev->ptp_data;
435 ts = ktime_to_timespec64(tstamp);
436
437 spin_lock_bh(&ptp_data->clock_lock);
438 ptp_clock_time = ptp_data->clock_time;
439 spin_unlock_bh(&ptp_data->clock_lock);
440
441 /* calculate full time from partial time stamp */
442 ts.tv_sec = (ptp_clock_time.tv_sec & ~3) | ts.tv_sec;
443
444 /* find nearest possible point in time */
445 diff = timespec64_sub(ts, ptp_clock_time);
446 if (diff.tv_sec > 2)
447 ts.tv_sec -= 4;
448 else if (diff.tv_sec < -2)
449 ts.tv_sec += 4;
450
451 return timespec64_to_ktime(ts);
452 }
453
ksz_port_rxtstamp(struct dsa_switch * ds,int port,struct sk_buff * skb,unsigned int type)454 bool ksz_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb,
455 unsigned int type)
456 {
457 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
458 struct ksz_device *dev = ds->priv;
459 struct ptp_header *ptp_hdr;
460 struct ksz_port *prt;
461 u8 ptp_msg_type;
462 ktime_t tstamp;
463 s64 correction;
464
465 prt = &dev->ports[port];
466
467 tstamp = KSZ_SKB_CB(skb)->tstamp;
468 memset(hwtstamps, 0, sizeof(*hwtstamps));
469 hwtstamps->hwtstamp = ksz_tstamp_reconstruct(dev, tstamp);
470
471 if (prt->tstamp_config.tx_type != HWTSTAMP_TX_ONESTEP_P2P)
472 goto out;
473
474 ptp_hdr = ptp_parse_header(skb, type);
475 if (!ptp_hdr)
476 goto out;
477
478 ptp_msg_type = ptp_get_msgtype(ptp_hdr, type);
479 if (ptp_msg_type != PTP_MSGTYPE_PDELAY_REQ)
480 goto out;
481
482 /* Only subtract the partial time stamp from the correction field. When
483 * the hardware adds the egress time stamp to the correction field of
484 * the PDelay_Resp message on tx, also only the partial time stamp will
485 * be added.
486 */
487 correction = (s64)get_unaligned_be64(&ptp_hdr->correction);
488 correction -= ktime_to_ns(tstamp) << 16;
489
490 ptp_header_update_correction(skb, type, ptp_hdr, correction);
491
492 out:
493 return false;
494 }
495
ksz_port_txtstamp(struct dsa_switch * ds,int port,struct sk_buff * skb)496 void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
497 {
498 struct ksz_device *dev = ds->priv;
499 struct ptp_header *hdr;
500 struct sk_buff *clone;
501 struct ksz_port *prt;
502 unsigned int type;
503 u8 ptp_msg_type;
504
505 prt = &dev->ports[port];
506
507 if (!prt->hwts_tx_en)
508 return;
509
510 type = ptp_classify_raw(skb);
511 if (type == PTP_CLASS_NONE)
512 return;
513
514 hdr = ptp_parse_header(skb, type);
515 if (!hdr)
516 return;
517
518 ptp_msg_type = ptp_get_msgtype(hdr, type);
519
520 switch (ptp_msg_type) {
521 case PTP_MSGTYPE_SYNC:
522 if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P)
523 return;
524 break;
525 case PTP_MSGTYPE_PDELAY_REQ:
526 break;
527 case PTP_MSGTYPE_PDELAY_RESP:
528 if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P) {
529 KSZ_SKB_CB(skb)->ptp_type = type;
530 KSZ_SKB_CB(skb)->update_correction = true;
531 return;
532 }
533 break;
534
535 default:
536 return;
537 }
538
539 clone = skb_clone_sk(skb);
540 if (!clone)
541 return;
542
543 /* caching the value to be used in tag_ksz.c */
544 KSZ_SKB_CB(skb)->clone = clone;
545 }
546
ksz_ptp_txtstamp_skb(struct ksz_device * dev,struct ksz_port * prt,struct sk_buff * skb)547 static void ksz_ptp_txtstamp_skb(struct ksz_device *dev,
548 struct ksz_port *prt, struct sk_buff *skb)
549 {
550 struct skb_shared_hwtstamps hwtstamps = {};
551 int ret;
552
553 /* timeout must include DSA conduit to transmit data, tstamp latency,
554 * IRQ latency and time for reading the time stamp.
555 */
556 ret = wait_for_completion_timeout(&prt->tstamp_msg_comp,
557 msecs_to_jiffies(100));
558 if (!ret)
559 return;
560
561 hwtstamps.hwtstamp = prt->tstamp_msg;
562 skb_complete_tx_timestamp(skb, &hwtstamps);
563 }
564
ksz_port_deferred_xmit(struct kthread_work * work)565 void ksz_port_deferred_xmit(struct kthread_work *work)
566 {
567 struct ksz_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
568 struct sk_buff *clone, *skb = xmit_work->skb;
569 struct dsa_switch *ds = xmit_work->dp->ds;
570 struct ksz_device *dev = ds->priv;
571 struct ksz_port *prt;
572
573 prt = &dev->ports[xmit_work->dp->index];
574
575 clone = KSZ_SKB_CB(skb)->clone;
576
577 skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
578
579 reinit_completion(&prt->tstamp_msg_comp);
580
581 dsa_enqueue_skb(skb, skb->dev);
582
583 ksz_ptp_txtstamp_skb(dev, prt, clone);
584
585 kfree(xmit_work);
586 }
587
_ksz_ptp_gettime(struct ksz_device * dev,struct timespec64 * ts)588 static int _ksz_ptp_gettime(struct ksz_device *dev, struct timespec64 *ts)
589 {
590 const u16 *regs = dev->info->regs;
591 u32 nanoseconds;
592 u32 seconds;
593 u8 phase;
594 int ret;
595
596 /* Copy current PTP clock into shadow registers and read */
597 ret = ksz_rmw16(dev, regs[PTP_CLK_CTRL], PTP_READ_TIME, PTP_READ_TIME);
598 if (ret)
599 return ret;
600
601 ret = ksz_read8(dev, regs[PTP_RTC_SUB_NANOSEC], &phase);
602 if (ret)
603 return ret;
604
605 ret = ksz_read32(dev, regs[PTP_RTC_NANOSEC], &nanoseconds);
606 if (ret)
607 return ret;
608
609 ret = ksz_read32(dev, regs[PTP_RTC_SEC], &seconds);
610 if (ret)
611 return ret;
612
613 ts->tv_sec = seconds;
614 ts->tv_nsec = nanoseconds + phase * 8;
615
616 return 0;
617 }
618
ksz_ptp_gettime(struct ptp_clock_info * ptp,struct timespec64 * ts)619 static int ksz_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
620 {
621 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
622 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
623 int ret;
624
625 mutex_lock(&ptp_data->lock);
626 ret = _ksz_ptp_gettime(dev, ts);
627 mutex_unlock(&ptp_data->lock);
628
629 return ret;
630 }
631
ksz_ptp_restart_perout(struct ksz_device * dev)632 static int ksz_ptp_restart_perout(struct ksz_device *dev)
633 {
634 struct ksz_ptp_data *ptp_data = &dev->ptp_data;
635 s64 now_ns, first_ns, period_ns, next_ns;
636 struct ptp_perout_request request;
637 struct timespec64 next;
638 struct timespec64 now;
639 unsigned int count;
640 int ret;
641
642 dev_info(dev->dev, "Restarting periodic output signal\n");
643
644 ret = _ksz_ptp_gettime(dev, &now);
645 if (ret)
646 return ret;
647
648 now_ns = timespec64_to_ns(&now);
649 first_ns = timespec64_to_ns(&ptp_data->perout_target_time_first);
650
651 /* Calculate next perout event based on start time and period */
652 period_ns = timespec64_to_ns(&ptp_data->perout_period);
653
654 if (first_ns < now_ns) {
655 count = div_u64(now_ns - first_ns, period_ns);
656 next_ns = first_ns + count * period_ns;
657 } else {
658 next_ns = first_ns;
659 }
660
661 /* Ensure 100 ms guard time prior next event */
662 while (next_ns < now_ns + 100000000)
663 next_ns += period_ns;
664
665 /* Restart periodic output signal */
666 next = ns_to_timespec64(next_ns);
667 request.start.sec = next.tv_sec;
668 request.start.nsec = next.tv_nsec;
669 request.period.sec = ptp_data->perout_period.tv_sec;
670 request.period.nsec = ptp_data->perout_period.tv_nsec;
671 request.index = 0;
672 request.flags = 0;
673
674 return ksz_ptp_enable_perout(dev, &request, 1);
675 }
676
ksz_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)677 static int ksz_ptp_settime(struct ptp_clock_info *ptp,
678 const struct timespec64 *ts)
679 {
680 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
681 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
682 const u16 *regs = dev->info->regs;
683 int ret;
684
685 mutex_lock(&ptp_data->lock);
686
687 /* Write to shadow registers and Load PTP clock */
688 ret = ksz_write16(dev, regs[PTP_RTC_SUB_NANOSEC], PTP_RTC_0NS);
689 if (ret)
690 goto unlock;
691
692 ret = ksz_write32(dev, regs[PTP_RTC_NANOSEC], ts->tv_nsec);
693 if (ret)
694 goto unlock;
695
696 ret = ksz_write32(dev, regs[PTP_RTC_SEC], ts->tv_sec);
697 if (ret)
698 goto unlock;
699
700 ret = ksz_rmw16(dev, regs[PTP_CLK_CTRL], PTP_LOAD_TIME, PTP_LOAD_TIME);
701 if (ret)
702 goto unlock;
703
704 switch (ptp_data->tou_mode) {
705 case KSZ_PTP_TOU_IDLE:
706 break;
707
708 case KSZ_PTP_TOU_PEROUT:
709 ret = ksz_ptp_restart_perout(dev);
710 if (ret)
711 goto unlock;
712
713 break;
714 }
715
716 spin_lock_bh(&ptp_data->clock_lock);
717 ptp_data->clock_time = *ts;
718 spin_unlock_bh(&ptp_data->clock_lock);
719
720 unlock:
721 mutex_unlock(&ptp_data->lock);
722
723 return ret;
724 }
725
ksz_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)726 static int ksz_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
727 {
728 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
729 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
730 const u16 *regs = dev->info->regs;
731 u64 base, adj;
732 bool negative;
733 u32 data32;
734 int ret;
735
736 mutex_lock(&ptp_data->lock);
737
738 if (scaled_ppm) {
739 base = KSZ_PTP_INC_NS << KSZ_PTP_SUBNS_BITS;
740 negative = diff_by_scaled_ppm(base, scaled_ppm, &adj);
741
742 data32 = (u32)adj;
743 data32 &= PTP_SUBNANOSEC_M;
744 if (!negative)
745 data32 |= PTP_RATE_DIR;
746
747 ret = ksz_write32(dev, regs[PTP_SUBNANOSEC_RATE], data32);
748 if (ret)
749 goto unlock;
750
751 ret = ksz_rmw16(dev, regs[PTP_CLK_CTRL], PTP_CLK_ADJ_ENABLE,
752 PTP_CLK_ADJ_ENABLE);
753 if (ret)
754 goto unlock;
755 } else {
756 ret = ksz_rmw16(dev, regs[PTP_CLK_CTRL], PTP_CLK_ADJ_ENABLE, 0);
757 if (ret)
758 goto unlock;
759 }
760
761 unlock:
762 mutex_unlock(&ptp_data->lock);
763 return ret;
764 }
765
ksz_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)766 static int ksz_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
767 {
768 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
769 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
770 struct timespec64 delta64 = ns_to_timespec64(delta);
771 const u16 *regs = dev->info->regs;
772 s32 sec, nsec;
773 u16 data16;
774 int ret;
775
776 mutex_lock(&ptp_data->lock);
777
778 /* do not use ns_to_timespec64(),
779 * both sec and nsec are subtracted by hw
780 */
781 sec = div_s64_rem(delta, NSEC_PER_SEC, &nsec);
782
783 ret = ksz_write32(dev, regs[PTP_RTC_NANOSEC], abs(nsec));
784 if (ret)
785 goto unlock;
786
787 ret = ksz_write32(dev, regs[PTP_RTC_SEC], abs(sec));
788 if (ret)
789 goto unlock;
790
791 ret = ksz_read16(dev, regs[PTP_CLK_CTRL], &data16);
792 if (ret)
793 goto unlock;
794
795 data16 |= PTP_STEP_ADJ;
796
797 /* PTP_STEP_DIR -- 0: subtract, 1: add */
798 if (delta < 0)
799 data16 &= ~PTP_STEP_DIR;
800 else
801 data16 |= PTP_STEP_DIR;
802
803 ret = ksz_write16(dev, regs[PTP_CLK_CTRL], data16);
804 if (ret)
805 goto unlock;
806
807 switch (ptp_data->tou_mode) {
808 case KSZ_PTP_TOU_IDLE:
809 break;
810
811 case KSZ_PTP_TOU_PEROUT:
812 ret = ksz_ptp_restart_perout(dev);
813 if (ret)
814 goto unlock;
815
816 break;
817 }
818
819 spin_lock_bh(&ptp_data->clock_lock);
820 ptp_data->clock_time = timespec64_add(ptp_data->clock_time, delta64);
821 spin_unlock_bh(&ptp_data->clock_lock);
822
823 unlock:
824 mutex_unlock(&ptp_data->lock);
825 return ret;
826 }
827
ksz_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * req,int on)828 static int ksz_ptp_enable(struct ptp_clock_info *ptp,
829 struct ptp_clock_request *req, int on)
830 {
831 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
832 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
833 int ret;
834
835 switch (req->type) {
836 case PTP_CLK_REQ_PEROUT:
837 mutex_lock(&ptp_data->lock);
838 ret = ksz_ptp_enable_perout(dev, &req->perout, on);
839 mutex_unlock(&ptp_data->lock);
840 break;
841 default:
842 return -EOPNOTSUPP;
843 }
844
845 return ret;
846 }
847
ksz_ptp_verify_pin(struct ptp_clock_info * ptp,unsigned int pin,enum ptp_pin_function func,unsigned int chan)848 static int ksz_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
849 enum ptp_pin_function func, unsigned int chan)
850 {
851 int ret = 0;
852
853 switch (func) {
854 case PTP_PF_NONE:
855 case PTP_PF_PEROUT:
856 break;
857 default:
858 ret = -1;
859 break;
860 }
861
862 return ret;
863 }
864
865 /* Function is pointer to the do_aux_work in the ptp_clock capability */
ksz_ptp_do_aux_work(struct ptp_clock_info * ptp)866 static long ksz_ptp_do_aux_work(struct ptp_clock_info *ptp)
867 {
868 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
869 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
870 struct timespec64 ts;
871 int ret;
872
873 mutex_lock(&ptp_data->lock);
874 ret = _ksz_ptp_gettime(dev, &ts);
875 if (ret)
876 goto out;
877
878 spin_lock_bh(&ptp_data->clock_lock);
879 ptp_data->clock_time = ts;
880 spin_unlock_bh(&ptp_data->clock_lock);
881
882 out:
883 mutex_unlock(&ptp_data->lock);
884
885 return HZ; /* reschedule in 1 second */
886 }
887
ksz_ptp_start_clock(struct ksz_device * dev)888 static int ksz_ptp_start_clock(struct ksz_device *dev)
889 {
890 struct ksz_ptp_data *ptp_data = &dev->ptp_data;
891 const u16 *regs = dev->info->regs;
892 int ret;
893
894 ret = ksz_rmw16(dev, regs[PTP_CLK_CTRL], PTP_CLK_ENABLE, PTP_CLK_ENABLE);
895 if (ret)
896 return ret;
897
898 ptp_data->clock_time.tv_sec = 0;
899 ptp_data->clock_time.tv_nsec = 0;
900
901 return 0;
902 }
903
ksz_ptp_clock_register(struct dsa_switch * ds)904 int ksz_ptp_clock_register(struct dsa_switch *ds)
905 {
906 struct ksz_device *dev = ds->priv;
907 const u16 *regs = dev->info->regs;
908 struct ksz_ptp_data *ptp_data;
909 int ret;
910 u8 i;
911
912 ptp_data = &dev->ptp_data;
913 mutex_init(&ptp_data->lock);
914 spin_lock_init(&ptp_data->clock_lock);
915
916 ptp_data->caps.owner = THIS_MODULE;
917 snprintf(ptp_data->caps.name, 16, "Microchip Clock");
918 ptp_data->caps.max_adj = KSZ_MAX_DRIFT_CORR;
919 ptp_data->caps.gettime64 = ksz_ptp_gettime;
920 ptp_data->caps.settime64 = ksz_ptp_settime;
921 ptp_data->caps.adjfine = ksz_ptp_adjfine;
922 ptp_data->caps.adjtime = ksz_ptp_adjtime;
923 ptp_data->caps.do_aux_work = ksz_ptp_do_aux_work;
924 ptp_data->caps.enable = ksz_ptp_enable;
925 ptp_data->caps.verify = ksz_ptp_verify_pin;
926 ptp_data->caps.n_pins = KSZ_PTP_N_GPIO;
927 ptp_data->caps.n_per_out = 3;
928
929 ret = ksz_ptp_start_clock(dev);
930 if (ret)
931 return ret;
932
933 for (i = 0; i < KSZ_PTP_N_GPIO; i++) {
934 struct ptp_pin_desc *ptp_pin = &ptp_data->pin_config[i];
935
936 snprintf(ptp_pin->name,
937 sizeof(ptp_pin->name), "ksz_ptp_pin_%02d", i);
938 ptp_pin->index = i;
939 ptp_pin->func = PTP_PF_NONE;
940 }
941
942 ptp_data->caps.pin_config = ptp_data->pin_config;
943
944 /* Currently only P2P mode is supported. When 802_1AS bit is set, it
945 * forwards all PTP packets to host port and none to other ports.
946 */
947 ret = ksz_rmw16(dev, regs[PTP_MSG_CONF1], PTP_TC_P2P | PTP_802_1AS,
948 PTP_TC_P2P | PTP_802_1AS);
949 if (ret)
950 return ret;
951
952 ptp_data->clock = ptp_clock_register(&ptp_data->caps, dev->dev);
953 if (IS_ERR_OR_NULL(ptp_data->clock))
954 return PTR_ERR(ptp_data->clock);
955
956 return 0;
957 }
958
ksz_ptp_clock_unregister(struct dsa_switch * ds)959 void ksz_ptp_clock_unregister(struct dsa_switch *ds)
960 {
961 struct ksz_device *dev = ds->priv;
962 struct ksz_ptp_data *ptp_data;
963
964 ptp_data = &dev->ptp_data;
965
966 if (ptp_data->clock)
967 ptp_clock_unregister(ptp_data->clock);
968 }
969
ksz_read_ts(struct ksz_port * port,u16 reg,u32 * ts)970 static int ksz_read_ts(struct ksz_port *port, u16 reg, u32 *ts)
971 {
972 return ksz_read32(port->ksz_dev, reg, ts);
973 }
974
ksz_ptp_msg_thread_fn(int irq,void * dev_id)975 static irqreturn_t ksz_ptp_msg_thread_fn(int irq, void *dev_id)
976 {
977 struct ksz_ptp_irq *ptpmsg_irq = dev_id;
978 struct ksz_device *dev;
979 struct ksz_port *port;
980 u32 tstamp_raw;
981 ktime_t tstamp;
982 int ret;
983
984 port = ptpmsg_irq->port;
985 dev = port->ksz_dev;
986
987 if (ptpmsg_irq->ts_en) {
988 ret = ksz_read_ts(port, ptpmsg_irq->ts_reg, &tstamp_raw);
989 if (ret)
990 return IRQ_NONE;
991
992 tstamp = ksz_decode_tstamp(tstamp_raw);
993
994 port->tstamp_msg = ksz_tstamp_reconstruct(dev, tstamp);
995
996 complete(&port->tstamp_msg_comp);
997 }
998
999 return IRQ_HANDLED;
1000 }
1001
ksz_ptp_irq_thread_fn(int irq,void * dev_id)1002 static irqreturn_t ksz_ptp_irq_thread_fn(int irq, void *dev_id)
1003 {
1004 struct ksz_irq *ptpirq = dev_id;
1005 unsigned int nhandled = 0;
1006 struct ksz_device *dev;
1007 unsigned int sub_irq;
1008 u16 data;
1009 int ret;
1010 u8 n;
1011
1012 dev = ptpirq->dev;
1013
1014 ret = ksz_read16(dev, ptpirq->reg_status, &data);
1015 if (ret)
1016 goto out;
1017
1018 /* Clear the interrupts W1C */
1019 ret = ksz_write16(dev, ptpirq->reg_status, data);
1020 if (ret)
1021 return IRQ_NONE;
1022
1023 for (n = 0; n < ptpirq->nirqs; ++n) {
1024 if (data & BIT(n + ptpirq->irq0_offset)) {
1025 sub_irq = irq_find_mapping(ptpirq->domain, n);
1026 handle_nested_irq(sub_irq);
1027 ++nhandled;
1028 }
1029 }
1030
1031 out:
1032 return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
1033 }
1034
ksz_ptp_irq_mask(struct irq_data * d)1035 static void ksz_ptp_irq_mask(struct irq_data *d)
1036 {
1037 struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
1038
1039 kirq->masked &= ~BIT(d->hwirq + kirq->irq0_offset);
1040 }
1041
ksz_ptp_irq_unmask(struct irq_data * d)1042 static void ksz_ptp_irq_unmask(struct irq_data *d)
1043 {
1044 struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
1045
1046 kirq->masked |= BIT(d->hwirq + kirq->irq0_offset);
1047 }
1048
ksz_ptp_irq_bus_lock(struct irq_data * d)1049 static void ksz_ptp_irq_bus_lock(struct irq_data *d)
1050 {
1051 struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
1052
1053 mutex_lock(&kirq->dev->lock_irq);
1054 }
1055
ksz_ptp_irq_bus_sync_unlock(struct irq_data * d)1056 static void ksz_ptp_irq_bus_sync_unlock(struct irq_data *d)
1057 {
1058 struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
1059 struct ksz_device *dev = kirq->dev;
1060 int ret;
1061
1062 ret = ksz_write16(dev, kirq->reg_mask, kirq->masked);
1063 if (ret)
1064 dev_err(dev->dev, "failed to change IRQ mask\n");
1065
1066 mutex_unlock(&dev->lock_irq);
1067 }
1068
1069 static const struct irq_chip ksz_ptp_irq_chip = {
1070 .name = "ksz-irq",
1071 .irq_mask = ksz_ptp_irq_mask,
1072 .irq_unmask = ksz_ptp_irq_unmask,
1073 .irq_bus_lock = ksz_ptp_irq_bus_lock,
1074 .irq_bus_sync_unlock = ksz_ptp_irq_bus_sync_unlock,
1075 };
1076
ksz_ptp_irq_domain_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hwirq)1077 static int ksz_ptp_irq_domain_map(struct irq_domain *d,
1078 unsigned int irq, irq_hw_number_t hwirq)
1079 {
1080 irq_set_chip_data(irq, d->host_data);
1081 irq_set_chip_and_handler(irq, &ksz_ptp_irq_chip, handle_level_irq);
1082 irq_set_noprobe(irq);
1083
1084 return 0;
1085 }
1086
1087 static const struct irq_domain_ops ksz_ptp_irq_domain_ops = {
1088 .map = ksz_ptp_irq_domain_map,
1089 .xlate = irq_domain_xlate_twocell,
1090 };
1091
ksz_ptp_msg_irq_free(struct ksz_port * port,u8 n)1092 static void ksz_ptp_msg_irq_free(struct ksz_port *port, u8 n)
1093 {
1094 struct ksz_ptp_irq *ptpmsg_irq;
1095
1096 ptpmsg_irq = &port->ptpmsg_irq[n];
1097
1098 free_irq(ptpmsg_irq->num, ptpmsg_irq);
1099 irq_dispose_mapping(ptpmsg_irq->num);
1100 }
1101
ksz_ptp_msg_irq_setup(struct ksz_port * port,u8 n)1102 static int ksz_ptp_msg_irq_setup(struct ksz_port *port, u8 n)
1103 {
1104 u16 ts_reg[] = {REG_PTP_PORT_PDRESP_TS, REG_PTP_PORT_XDELAY_TS,
1105 REG_PTP_PORT_SYNC_TS};
1106 static const char * const name[] = {"pdresp-msg", "xdreq-msg",
1107 "sync-msg"};
1108 const struct ksz_dev_ops *ops = port->ksz_dev->dev_ops;
1109 struct ksz_irq *ptpirq = &port->ptpirq;
1110 struct ksz_ptp_irq *ptpmsg_irq;
1111 int ret;
1112
1113 ptpmsg_irq = &port->ptpmsg_irq[n];
1114 ptpmsg_irq->num = irq_create_mapping(ptpirq->domain, n);
1115 if (!ptpmsg_irq->num)
1116 return -EINVAL;
1117
1118 ptpmsg_irq->port = port;
1119 ptpmsg_irq->ts_reg = ops->get_port_addr(port->num, ts_reg[n]);
1120
1121 strscpy(ptpmsg_irq->name, name[n]);
1122
1123 ret = request_threaded_irq(ptpmsg_irq->num, NULL,
1124 ksz_ptp_msg_thread_fn, IRQF_ONESHOT,
1125 ptpmsg_irq->name, ptpmsg_irq);
1126 if (ret)
1127 irq_dispose_mapping(ptpmsg_irq->num);
1128
1129 return ret;
1130 }
1131
ksz_ptp_irq_setup(struct dsa_switch * ds,u8 p)1132 int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
1133 {
1134 struct ksz_device *dev = ds->priv;
1135 const struct ksz_dev_ops *ops = dev->dev_ops;
1136 struct ksz_port *port = &dev->ports[p];
1137 struct ksz_irq *ptpirq = &port->ptpirq;
1138 int irq;
1139 int ret;
1140
1141 ptpirq->dev = dev;
1142 ptpirq->masked = 0;
1143 ptpirq->nirqs = 3;
1144 ptpirq->reg_mask = ops->get_port_addr(p, REG_PTP_PORT_TX_INT_ENABLE__2);
1145 ptpirq->reg_status = ops->get_port_addr(p,
1146 REG_PTP_PORT_TX_INT_STATUS__2);
1147 ptpirq->irq0_offset = KSZ_PTP_INT_START;
1148
1149 snprintf(ptpirq->name, sizeof(ptpirq->name), "ptp-irq-%d", p);
1150
1151 init_completion(&port->tstamp_msg_comp);
1152
1153 ptpirq->domain = irq_domain_create_linear(dev_fwnode(dev->dev), ptpirq->nirqs,
1154 &ksz_ptp_irq_domain_ops, ptpirq);
1155 if (!ptpirq->domain)
1156 return -ENOMEM;
1157
1158 ptpirq->irq_num = irq_find_mapping(port->pirq.domain, PORT_SRC_PTP_INT);
1159 if (!ptpirq->irq_num) {
1160 ret = -EINVAL;
1161 goto out;
1162 }
1163
1164 ret = request_threaded_irq(ptpirq->irq_num, NULL, ksz_ptp_irq_thread_fn,
1165 IRQF_ONESHOT, ptpirq->name, ptpirq);
1166 if (ret)
1167 goto out;
1168
1169 for (irq = 0; irq < ptpirq->nirqs; irq++) {
1170 ret = ksz_ptp_msg_irq_setup(port, irq);
1171 if (ret)
1172 goto out_ptp_msg;
1173 }
1174
1175 return 0;
1176
1177 out_ptp_msg:
1178 free_irq(ptpirq->irq_num, ptpirq);
1179 while (irq--) {
1180 free_irq(port->ptpmsg_irq[irq].num, &port->ptpmsg_irq[irq]);
1181 irq_dispose_mapping(port->ptpmsg_irq[irq].num);
1182 }
1183 out:
1184 irq_domain_remove(ptpirq->domain);
1185
1186 return ret;
1187 }
1188
ksz_ptp_irq_free(struct dsa_switch * ds,u8 p)1189 void ksz_ptp_irq_free(struct dsa_switch *ds, u8 p)
1190 {
1191 struct ksz_device *dev = ds->priv;
1192 struct ksz_port *port = &dev->ports[p];
1193 struct ksz_irq *ptpirq = &port->ptpirq;
1194 u8 n;
1195
1196 for (n = 0; n < ptpirq->nirqs; n++)
1197 ksz_ptp_msg_irq_free(port, n);
1198
1199 free_irq(ptpirq->irq_num, ptpirq);
1200 irq_dispose_mapping(ptpirq->irq_num);
1201
1202 irq_domain_remove(ptpirq->domain);
1203 }
1204
1205 MODULE_AUTHOR("Christian Eggers <ceggers@arri.de>");
1206 MODULE_AUTHOR("Arun Ramadoss <arun.ramadoss@microchip.com>");
1207 MODULE_DESCRIPTION("PTP support for KSZ switch");
1208 MODULE_LICENSE("GPL");
1209