xref: /linux/drivers/net/dsa/microchip/ksz_ptp.c (revision aa0743a229366e8c1963f1b72a1c974a9d15f08f)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Microchip KSZ PTP Implementation
3  *
4  * Copyright (C) 2020 ARRI Lighting
5  * Copyright (C) 2022 Microchip Technology Inc.
6  */
7 
8 #include <linux/dsa/ksz_common.h>
9 #include <linux/irq.h>
10 #include <linux/irqdomain.h>
11 #include <linux/kernel.h>
12 #include <linux/ptp_classify.h>
13 #include <linux/ptp_clock_kernel.h>
14 
15 #include "ksz_common.h"
16 #include "ksz_ptp.h"
17 #include "ksz_ptp_reg.h"
18 
19 #define ptp_caps_to_data(d) container_of((d), struct ksz_ptp_data, caps)
20 #define ptp_data_to_ksz_dev(d) container_of((d), struct ksz_device, ptp_data)
21 #define work_to_xmit_work(w) \
22 		container_of((w), struct ksz_deferred_xmit_work, work)
23 
24 /* Sub-nanoseconds-adj,max * sub-nanoseconds / 40ns * 1ns
25  * = (2^30-1) * (2 ^ 32) / 40 ns * 1 ns = 6249999
26  */
27 #define KSZ_MAX_DRIFT_CORR 6249999
28 #define KSZ_MAX_PULSE_WIDTH 125000000LL
29 
30 #define KSZ_PTP_INC_NS 40ULL  /* HW clock is incremented every 40 ns (by 40) */
31 #define KSZ_PTP_SUBNS_BITS 32
32 
33 #define KSZ_PTP_INT_START 13
34 
ksz_ptp_tou_gpio(struct ksz_device * dev)35 static int ksz_ptp_tou_gpio(struct ksz_device *dev)
36 {
37 	int ret;
38 
39 	if (!is_lan937x(dev))
40 		return 0;
41 
42 	ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, GPIO_OUT,
43 			GPIO_OUT);
44 	if (ret)
45 		return ret;
46 
47 	ret = ksz_rmw32(dev, REG_SW_GLOBAL_LED_OVR__4, LED_OVR_1 | LED_OVR_2,
48 			LED_OVR_1 | LED_OVR_2);
49 	if (ret)
50 		return ret;
51 
52 	return ksz_rmw32(dev, REG_SW_GLOBAL_LED_SRC__4,
53 			 LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2,
54 			 LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2);
55 }
56 
ksz_ptp_tou_reset(struct ksz_device * dev,u8 unit)57 static int ksz_ptp_tou_reset(struct ksz_device *dev, u8 unit)
58 {
59 	u32 data;
60 	int ret;
61 
62 	/* Reset trigger unit (clears TRIGGER_EN, but not GPIOSTATx) */
63 	ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_RESET, TRIG_RESET);
64 
65 	data = FIELD_PREP(TRIG_DONE_M, BIT(unit));
66 	ret = ksz_write32(dev, REG_PTP_TRIG_STATUS__4, data);
67 	if (ret)
68 		return ret;
69 
70 	data = FIELD_PREP(TRIG_INT_M, BIT(unit));
71 	ret = ksz_write32(dev, REG_PTP_INT_STATUS__4, data);
72 	if (ret)
73 		return ret;
74 
75 	/* Clear reset and set GPIO direction */
76 	return ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, (TRIG_RESET | TRIG_ENABLE),
77 			 0);
78 }
79 
ksz_ptp_tou_pulse_verify(u64 pulse_ns)80 static int ksz_ptp_tou_pulse_verify(u64 pulse_ns)
81 {
82 	u32 data;
83 
84 	if (pulse_ns & 0x3)
85 		return -EINVAL;
86 
87 	data = (pulse_ns / 8);
88 	if (!FIELD_FIT(TRIG_PULSE_WIDTH_M, data))
89 		return -ERANGE;
90 
91 	return 0;
92 }
93 
ksz_ptp_tou_target_time_set(struct ksz_device * dev,struct timespec64 const * ts)94 static int ksz_ptp_tou_target_time_set(struct ksz_device *dev,
95 				       struct timespec64 const *ts)
96 {
97 	int ret;
98 
99 	/* Hardware has only 32 bit */
100 	if ((ts->tv_sec & 0xffffffff) != ts->tv_sec)
101 		return -EINVAL;
102 
103 	ret = ksz_write32(dev, REG_TRIG_TARGET_NANOSEC, ts->tv_nsec);
104 	if (ret)
105 		return ret;
106 
107 	ret = ksz_write32(dev, REG_TRIG_TARGET_SEC, ts->tv_sec);
108 	if (ret)
109 		return ret;
110 
111 	return 0;
112 }
113 
ksz_ptp_tou_start(struct ksz_device * dev,u8 unit)114 static int ksz_ptp_tou_start(struct ksz_device *dev, u8 unit)
115 {
116 	u32 data;
117 	int ret;
118 
119 	ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_ENABLE, TRIG_ENABLE);
120 	if (ret)
121 		return ret;
122 
123 	/* Check error flag:
124 	 * - the ACTIVE flag is NOT cleared an error!
125 	 */
126 	ret = ksz_read32(dev, REG_PTP_TRIG_STATUS__4, &data);
127 	if (ret)
128 		return ret;
129 
130 	if (FIELD_GET(TRIG_ERROR_M, data) & (1 << unit)) {
131 		dev_err(dev->dev, "%s: Trigger unit%d error!\n", __func__,
132 			unit);
133 		ret = -EIO;
134 		/* Unit will be reset on next access */
135 		return ret;
136 	}
137 
138 	return 0;
139 }
140 
ksz_ptp_configure_perout(struct ksz_device * dev,u32 cycle_width_ns,u32 pulse_width_ns,struct timespec64 const * target_time,u8 index)141 static int ksz_ptp_configure_perout(struct ksz_device *dev,
142 				    u32 cycle_width_ns, u32 pulse_width_ns,
143 				    struct timespec64 const *target_time,
144 				    u8 index)
145 {
146 	u32 data;
147 	int ret;
148 
149 	data = FIELD_PREP(TRIG_NOTIFY, 1) |
150 		FIELD_PREP(TRIG_GPO_M, index) |
151 		FIELD_PREP(TRIG_PATTERN_M, TRIG_POS_PERIOD);
152 	ret = ksz_write32(dev, REG_TRIG_CTRL__4, data);
153 	if (ret)
154 		return ret;
155 
156 	ret = ksz_write32(dev, REG_TRIG_CYCLE_WIDTH, cycle_width_ns);
157 	if (ret)
158 		return ret;
159 
160 	/* Set cycle count 0 - Infinite */
161 	ret = ksz_rmw32(dev, REG_TRIG_CYCLE_CNT, TRIG_CYCLE_CNT_M, 0);
162 	if (ret)
163 		return ret;
164 
165 	data = (pulse_width_ns / 8);
166 	ret = ksz_write32(dev, REG_TRIG_PULSE_WIDTH__4, data);
167 	if (ret)
168 		return ret;
169 
170 	ret = ksz_ptp_tou_target_time_set(dev, target_time);
171 	if (ret)
172 		return ret;
173 
174 	return 0;
175 }
176 
ksz_ptp_enable_perout(struct ksz_device * dev,struct ptp_perout_request const * request,int on)177 static int ksz_ptp_enable_perout(struct ksz_device *dev,
178 				 struct ptp_perout_request const *request,
179 				 int on)
180 {
181 	struct ksz_ptp_data *ptp_data = &dev->ptp_data;
182 	u64 req_pulse_width_ns;
183 	u64 cycle_width_ns;
184 	u64 pulse_width_ns;
185 	int pin = 0;
186 	u32 data32;
187 	int ret;
188 
189 	if (request->flags & ~PTP_PEROUT_DUTY_CYCLE)
190 		return -EOPNOTSUPP;
191 
192 	if (ptp_data->tou_mode != KSZ_PTP_TOU_PEROUT &&
193 	    ptp_data->tou_mode != KSZ_PTP_TOU_IDLE)
194 		return -EBUSY;
195 
196 	pin = ptp_find_pin(ptp_data->clock, PTP_PF_PEROUT, request->index);
197 	if (pin < 0)
198 		return -EINVAL;
199 
200 	data32 = FIELD_PREP(PTP_GPIO_INDEX, pin) |
201 		 FIELD_PREP(PTP_TOU_INDEX, request->index);
202 	ret = ksz_rmw32(dev, REG_PTP_UNIT_INDEX__4,
203 			PTP_GPIO_INDEX | PTP_TOU_INDEX, data32);
204 	if (ret)
205 		return ret;
206 
207 	ret = ksz_ptp_tou_reset(dev, request->index);
208 	if (ret)
209 		return ret;
210 
211 	if (!on) {
212 		ptp_data->tou_mode = KSZ_PTP_TOU_IDLE;
213 		return 0;
214 	}
215 
216 	ptp_data->perout_target_time_first.tv_sec  = request->start.sec;
217 	ptp_data->perout_target_time_first.tv_nsec = request->start.nsec;
218 
219 	ptp_data->perout_period.tv_sec = request->period.sec;
220 	ptp_data->perout_period.tv_nsec = request->period.nsec;
221 
222 	cycle_width_ns = timespec64_to_ns(&ptp_data->perout_period);
223 	if ((cycle_width_ns & TRIG_CYCLE_WIDTH_M) != cycle_width_ns)
224 		return -EINVAL;
225 
226 	if (request->flags & PTP_PEROUT_DUTY_CYCLE) {
227 		pulse_width_ns = request->on.sec * NSEC_PER_SEC +
228 			request->on.nsec;
229 	} else {
230 		/* Use a duty cycle of 50%. Maximum pulse width supported by the
231 		 * hardware is a little bit more than 125 ms.
232 		 */
233 		req_pulse_width_ns = (request->period.sec * NSEC_PER_SEC +
234 				      request->period.nsec) / 2;
235 		pulse_width_ns = min_t(u64, req_pulse_width_ns,
236 				       KSZ_MAX_PULSE_WIDTH);
237 	}
238 
239 	ret = ksz_ptp_tou_pulse_verify(pulse_width_ns);
240 	if (ret)
241 		return ret;
242 
243 	ret = ksz_ptp_configure_perout(dev, cycle_width_ns, pulse_width_ns,
244 				       &ptp_data->perout_target_time_first,
245 				       pin);
246 	if (ret)
247 		return ret;
248 
249 	ret = ksz_ptp_tou_gpio(dev);
250 	if (ret)
251 		return ret;
252 
253 	ret = ksz_ptp_tou_start(dev, request->index);
254 	if (ret)
255 		return ret;
256 
257 	ptp_data->tou_mode = KSZ_PTP_TOU_PEROUT;
258 
259 	return 0;
260 }
261 
ksz_ptp_enable_mode(struct ksz_device * dev)262 static int ksz_ptp_enable_mode(struct ksz_device *dev)
263 {
264 	struct ksz_tagger_data *tagger_data = ksz_tagger_data(dev->ds);
265 	struct ksz_ptp_data *ptp_data = &dev->ptp_data;
266 	struct ksz_port *prt;
267 	struct dsa_port *dp;
268 	bool tag_en = false;
269 
270 	dsa_switch_for_each_user_port(dp, dev->ds) {
271 		prt = &dev->ports[dp->index];
272 		if (prt->hwts_tx_en || prt->hwts_rx_en) {
273 			tag_en = true;
274 			break;
275 		}
276 	}
277 
278 	if (tag_en) {
279 		ptp_schedule_worker(ptp_data->clock, 0);
280 	} else {
281 		ptp_cancel_worker_sync(ptp_data->clock);
282 	}
283 
284 	tagger_data->hwtstamp_set_state(dev->ds, tag_en);
285 
286 	return ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_ENABLE,
287 			 tag_en ? PTP_ENABLE : 0);
288 }
289 
290 /* The function is return back the capability of timestamping feature when
291  * requested through ethtool -T <interface> utility
292  */
ksz_get_ts_info(struct dsa_switch * ds,int port,struct kernel_ethtool_ts_info * ts)293 int ksz_get_ts_info(struct dsa_switch *ds, int port, struct kernel_ethtool_ts_info *ts)
294 {
295 	struct ksz_device *dev = ds->priv;
296 	struct ksz_ptp_data *ptp_data;
297 
298 	ptp_data = &dev->ptp_data;
299 
300 	if (!ptp_data->clock)
301 		return -ENODEV;
302 
303 	ts->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
304 			      SOF_TIMESTAMPING_RX_HARDWARE |
305 			      SOF_TIMESTAMPING_RAW_HARDWARE;
306 
307 	ts->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ONESTEP_P2P);
308 
309 	if (is_lan937x(dev))
310 		ts->tx_types |= BIT(HWTSTAMP_TX_ON);
311 
312 	ts->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
313 			 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
314 			 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
315 			 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
316 
317 	ts->phc_index = ptp_clock_index(ptp_data->clock);
318 
319 	return 0;
320 }
321 
ksz_hwtstamp_get(struct dsa_switch * ds,int port,struct ifreq * ifr)322 int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
323 {
324 	struct ksz_device *dev = ds->priv;
325 	struct hwtstamp_config *config;
326 	struct ksz_port *prt;
327 
328 	prt = &dev->ports[port];
329 	config = &prt->tstamp_config;
330 
331 	return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
332 		-EFAULT : 0;
333 }
334 
ksz_set_hwtstamp_config(struct ksz_device * dev,struct ksz_port * prt,struct hwtstamp_config * config)335 static int ksz_set_hwtstamp_config(struct ksz_device *dev,
336 				   struct ksz_port *prt,
337 				   struct hwtstamp_config *config)
338 {
339 	int ret;
340 
341 	if (config->flags)
342 		return -EINVAL;
343 
344 	switch (config->tx_type) {
345 	case HWTSTAMP_TX_OFF:
346 		prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en  = false;
347 		prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = false;
348 		prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
349 		prt->hwts_tx_en = false;
350 		break;
351 	case HWTSTAMP_TX_ONESTEP_P2P:
352 		prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en  = false;
353 		prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true;
354 		prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
355 		prt->hwts_tx_en = true;
356 
357 		ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, PTP_1STEP);
358 		if (ret)
359 			return ret;
360 
361 		break;
362 	case HWTSTAMP_TX_ON:
363 		if (!is_lan937x(dev))
364 			return -ERANGE;
365 
366 		prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en  = true;
367 		prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true;
368 		prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = true;
369 		prt->hwts_tx_en = true;
370 
371 		ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, 0);
372 		if (ret)
373 			return ret;
374 
375 		break;
376 	default:
377 		return -ERANGE;
378 	}
379 
380 	switch (config->rx_filter) {
381 	case HWTSTAMP_FILTER_NONE:
382 		prt->hwts_rx_en = false;
383 		break;
384 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
385 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
386 		config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
387 		prt->hwts_rx_en = true;
388 		break;
389 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
390 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
391 		config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
392 		prt->hwts_rx_en = true;
393 		break;
394 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
395 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
396 		config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
397 		prt->hwts_rx_en = true;
398 		break;
399 	default:
400 		config->rx_filter = HWTSTAMP_FILTER_NONE;
401 		return -ERANGE;
402 	}
403 
404 	return ksz_ptp_enable_mode(dev);
405 }
406 
ksz_hwtstamp_set(struct dsa_switch * ds,int port,struct ifreq * ifr)407 int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
408 {
409 	struct ksz_device *dev = ds->priv;
410 	struct hwtstamp_config config;
411 	struct ksz_port *prt;
412 	int ret;
413 
414 	prt = &dev->ports[port];
415 
416 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
417 		return -EFAULT;
418 
419 	ret = ksz_set_hwtstamp_config(dev, prt, &config);
420 	if (ret)
421 		return ret;
422 
423 	memcpy(&prt->tstamp_config, &config, sizeof(config));
424 
425 	if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
426 		return -EFAULT;
427 
428 	return 0;
429 }
430 
ksz_tstamp_reconstruct(struct ksz_device * dev,ktime_t tstamp)431 static ktime_t ksz_tstamp_reconstruct(struct ksz_device *dev, ktime_t tstamp)
432 {
433 	struct timespec64 ptp_clock_time;
434 	struct ksz_ptp_data *ptp_data;
435 	struct timespec64 diff;
436 	struct timespec64 ts;
437 
438 	ptp_data = &dev->ptp_data;
439 	ts = ktime_to_timespec64(tstamp);
440 
441 	spin_lock_bh(&ptp_data->clock_lock);
442 	ptp_clock_time = ptp_data->clock_time;
443 	spin_unlock_bh(&ptp_data->clock_lock);
444 
445 	/* calculate full time from partial time stamp */
446 	ts.tv_sec = (ptp_clock_time.tv_sec & ~3) | ts.tv_sec;
447 
448 	/* find nearest possible point in time */
449 	diff = timespec64_sub(ts, ptp_clock_time);
450 	if (diff.tv_sec > 2)
451 		ts.tv_sec -= 4;
452 	else if (diff.tv_sec < -2)
453 		ts.tv_sec += 4;
454 
455 	return timespec64_to_ktime(ts);
456 }
457 
ksz_port_rxtstamp(struct dsa_switch * ds,int port,struct sk_buff * skb,unsigned int type)458 bool ksz_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb,
459 		       unsigned int type)
460 {
461 	struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
462 	struct ksz_device *dev = ds->priv;
463 	struct ptp_header *ptp_hdr;
464 	struct ksz_port *prt;
465 	u8 ptp_msg_type;
466 	ktime_t tstamp;
467 	s64 correction;
468 
469 	prt = &dev->ports[port];
470 
471 	tstamp = KSZ_SKB_CB(skb)->tstamp;
472 	memset(hwtstamps, 0, sizeof(*hwtstamps));
473 	hwtstamps->hwtstamp = ksz_tstamp_reconstruct(dev, tstamp);
474 
475 	if (prt->tstamp_config.tx_type != HWTSTAMP_TX_ONESTEP_P2P)
476 		goto out;
477 
478 	ptp_hdr = ptp_parse_header(skb, type);
479 	if (!ptp_hdr)
480 		goto out;
481 
482 	ptp_msg_type = ptp_get_msgtype(ptp_hdr, type);
483 	if (ptp_msg_type != PTP_MSGTYPE_PDELAY_REQ)
484 		goto out;
485 
486 	/* Only subtract the partial time stamp from the correction field.  When
487 	 * the hardware adds the egress time stamp to the correction field of
488 	 * the PDelay_Resp message on tx, also only the partial time stamp will
489 	 * be added.
490 	 */
491 	correction = (s64)get_unaligned_be64(&ptp_hdr->correction);
492 	correction -= ktime_to_ns(tstamp) << 16;
493 
494 	ptp_header_update_correction(skb, type, ptp_hdr, correction);
495 
496 out:
497 	return false;
498 }
499 
ksz_port_txtstamp(struct dsa_switch * ds,int port,struct sk_buff * skb)500 void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
501 {
502 	struct ksz_device *dev = ds->priv;
503 	struct ptp_header *hdr;
504 	struct sk_buff *clone;
505 	struct ksz_port *prt;
506 	unsigned int type;
507 	u8 ptp_msg_type;
508 
509 	prt = &dev->ports[port];
510 
511 	if (!prt->hwts_tx_en)
512 		return;
513 
514 	type = ptp_classify_raw(skb);
515 	if (type == PTP_CLASS_NONE)
516 		return;
517 
518 	hdr = ptp_parse_header(skb, type);
519 	if (!hdr)
520 		return;
521 
522 	ptp_msg_type = ptp_get_msgtype(hdr, type);
523 
524 	switch (ptp_msg_type) {
525 	case PTP_MSGTYPE_SYNC:
526 		if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P)
527 			return;
528 		break;
529 	case PTP_MSGTYPE_PDELAY_REQ:
530 		break;
531 	case PTP_MSGTYPE_PDELAY_RESP:
532 		if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P) {
533 			KSZ_SKB_CB(skb)->ptp_type = type;
534 			KSZ_SKB_CB(skb)->update_correction = true;
535 			return;
536 		}
537 		break;
538 
539 	default:
540 		return;
541 	}
542 
543 	clone = skb_clone_sk(skb);
544 	if (!clone)
545 		return;
546 
547 	/* caching the value to be used in tag_ksz.c */
548 	KSZ_SKB_CB(skb)->clone = clone;
549 }
550 
ksz_ptp_txtstamp_skb(struct ksz_device * dev,struct ksz_port * prt,struct sk_buff * skb)551 static void ksz_ptp_txtstamp_skb(struct ksz_device *dev,
552 				 struct ksz_port *prt, struct sk_buff *skb)
553 {
554 	struct skb_shared_hwtstamps hwtstamps = {};
555 	int ret;
556 
557 	/* timeout must include DSA conduit to transmit data, tstamp latency,
558 	 * IRQ latency and time for reading the time stamp.
559 	 */
560 	ret = wait_for_completion_timeout(&prt->tstamp_msg_comp,
561 					  msecs_to_jiffies(100));
562 	if (!ret)
563 		return;
564 
565 	hwtstamps.hwtstamp = prt->tstamp_msg;
566 	skb_complete_tx_timestamp(skb, &hwtstamps);
567 }
568 
ksz_port_deferred_xmit(struct kthread_work * work)569 void ksz_port_deferred_xmit(struct kthread_work *work)
570 {
571 	struct ksz_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
572 	struct sk_buff *clone, *skb = xmit_work->skb;
573 	struct dsa_switch *ds = xmit_work->dp->ds;
574 	struct ksz_device *dev = ds->priv;
575 	struct ksz_port *prt;
576 
577 	prt = &dev->ports[xmit_work->dp->index];
578 
579 	clone = KSZ_SKB_CB(skb)->clone;
580 
581 	skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
582 
583 	reinit_completion(&prt->tstamp_msg_comp);
584 
585 	dsa_enqueue_skb(skb, skb->dev);
586 
587 	ksz_ptp_txtstamp_skb(dev, prt, clone);
588 
589 	kfree(xmit_work);
590 }
591 
_ksz_ptp_gettime(struct ksz_device * dev,struct timespec64 * ts)592 static int _ksz_ptp_gettime(struct ksz_device *dev, struct timespec64 *ts)
593 {
594 	u32 nanoseconds;
595 	u32 seconds;
596 	u8 phase;
597 	int ret;
598 
599 	/* Copy current PTP clock into shadow registers and read */
600 	ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_READ_TIME, PTP_READ_TIME);
601 	if (ret)
602 		return ret;
603 
604 	ret = ksz_read8(dev, REG_PTP_RTC_SUB_NANOSEC__2, &phase);
605 	if (ret)
606 		return ret;
607 
608 	ret = ksz_read32(dev, REG_PTP_RTC_NANOSEC, &nanoseconds);
609 	if (ret)
610 		return ret;
611 
612 	ret = ksz_read32(dev, REG_PTP_RTC_SEC, &seconds);
613 	if (ret)
614 		return ret;
615 
616 	ts->tv_sec = seconds;
617 	ts->tv_nsec = nanoseconds + phase * 8;
618 
619 	return 0;
620 }
621 
ksz_ptp_gettime(struct ptp_clock_info * ptp,struct timespec64 * ts)622 static int ksz_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
623 {
624 	struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
625 	struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
626 	int ret;
627 
628 	mutex_lock(&ptp_data->lock);
629 	ret = _ksz_ptp_gettime(dev, ts);
630 	mutex_unlock(&ptp_data->lock);
631 
632 	return ret;
633 }
634 
ksz_ptp_restart_perout(struct ksz_device * dev)635 static int ksz_ptp_restart_perout(struct ksz_device *dev)
636 {
637 	struct ksz_ptp_data *ptp_data = &dev->ptp_data;
638 	s64 now_ns, first_ns, period_ns, next_ns;
639 	struct ptp_perout_request request;
640 	struct timespec64 next;
641 	struct timespec64 now;
642 	unsigned int count;
643 	int ret;
644 
645 	dev_info(dev->dev, "Restarting periodic output signal\n");
646 
647 	ret = _ksz_ptp_gettime(dev, &now);
648 	if (ret)
649 		return ret;
650 
651 	now_ns = timespec64_to_ns(&now);
652 	first_ns = timespec64_to_ns(&ptp_data->perout_target_time_first);
653 
654 	/* Calculate next perout event based on start time and period */
655 	period_ns = timespec64_to_ns(&ptp_data->perout_period);
656 
657 	if (first_ns < now_ns) {
658 		count = div_u64(now_ns - first_ns, period_ns);
659 		next_ns = first_ns + count * period_ns;
660 	} else {
661 		next_ns = first_ns;
662 	}
663 
664 	/* Ensure 100 ms guard time prior next event */
665 	while (next_ns < now_ns + 100000000)
666 		next_ns += period_ns;
667 
668 	/* Restart periodic output signal */
669 	next = ns_to_timespec64(next_ns);
670 	request.start.sec  = next.tv_sec;
671 	request.start.nsec = next.tv_nsec;
672 	request.period.sec  = ptp_data->perout_period.tv_sec;
673 	request.period.nsec = ptp_data->perout_period.tv_nsec;
674 	request.index = 0;
675 	request.flags = 0;
676 
677 	return ksz_ptp_enable_perout(dev, &request, 1);
678 }
679 
ksz_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)680 static int ksz_ptp_settime(struct ptp_clock_info *ptp,
681 			   const struct timespec64 *ts)
682 {
683 	struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
684 	struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
685 	int ret;
686 
687 	mutex_lock(&ptp_data->lock);
688 
689 	/* Write to shadow registers and Load PTP clock */
690 	ret = ksz_write16(dev, REG_PTP_RTC_SUB_NANOSEC__2, PTP_RTC_0NS);
691 	if (ret)
692 		goto unlock;
693 
694 	ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, ts->tv_nsec);
695 	if (ret)
696 		goto unlock;
697 
698 	ret = ksz_write32(dev, REG_PTP_RTC_SEC, ts->tv_sec);
699 	if (ret)
700 		goto unlock;
701 
702 	ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_LOAD_TIME, PTP_LOAD_TIME);
703 	if (ret)
704 		goto unlock;
705 
706 	switch (ptp_data->tou_mode) {
707 	case KSZ_PTP_TOU_IDLE:
708 		break;
709 
710 	case KSZ_PTP_TOU_PEROUT:
711 		ret = ksz_ptp_restart_perout(dev);
712 		if (ret)
713 			goto unlock;
714 
715 		break;
716 	}
717 
718 	spin_lock_bh(&ptp_data->clock_lock);
719 	ptp_data->clock_time = *ts;
720 	spin_unlock_bh(&ptp_data->clock_lock);
721 
722 unlock:
723 	mutex_unlock(&ptp_data->lock);
724 
725 	return ret;
726 }
727 
ksz_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)728 static int ksz_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
729 {
730 	struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
731 	struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
732 	u64 base, adj;
733 	bool negative;
734 	u32 data32;
735 	int ret;
736 
737 	mutex_lock(&ptp_data->lock);
738 
739 	if (scaled_ppm) {
740 		base = KSZ_PTP_INC_NS << KSZ_PTP_SUBNS_BITS;
741 		negative = diff_by_scaled_ppm(base, scaled_ppm, &adj);
742 
743 		data32 = (u32)adj;
744 		data32 &= PTP_SUBNANOSEC_M;
745 		if (!negative)
746 			data32 |= PTP_RATE_DIR;
747 
748 		ret = ksz_write32(dev, REG_PTP_SUBNANOSEC_RATE, data32);
749 		if (ret)
750 			goto unlock;
751 
752 		ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE,
753 				PTP_CLK_ADJ_ENABLE);
754 		if (ret)
755 			goto unlock;
756 	} else {
757 		ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE, 0);
758 		if (ret)
759 			goto unlock;
760 	}
761 
762 unlock:
763 	mutex_unlock(&ptp_data->lock);
764 	return ret;
765 }
766 
ksz_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)767 static int ksz_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
768 {
769 	struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
770 	struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
771 	struct timespec64 delta64 = ns_to_timespec64(delta);
772 	s32 sec, nsec;
773 	u16 data16;
774 	int ret;
775 
776 	mutex_lock(&ptp_data->lock);
777 
778 	/* do not use ns_to_timespec64(),
779 	 * both sec and nsec are subtracted by hw
780 	 */
781 	sec = div_s64_rem(delta, NSEC_PER_SEC, &nsec);
782 
783 	ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, abs(nsec));
784 	if (ret)
785 		goto unlock;
786 
787 	ret = ksz_write32(dev, REG_PTP_RTC_SEC, abs(sec));
788 	if (ret)
789 		goto unlock;
790 
791 	ret = ksz_read16(dev, REG_PTP_CLK_CTRL, &data16);
792 	if (ret)
793 		goto unlock;
794 
795 	data16 |= PTP_STEP_ADJ;
796 
797 	/* PTP_STEP_DIR -- 0: subtract, 1: add */
798 	if (delta < 0)
799 		data16 &= ~PTP_STEP_DIR;
800 	else
801 		data16 |= PTP_STEP_DIR;
802 
803 	ret = ksz_write16(dev, REG_PTP_CLK_CTRL, data16);
804 	if (ret)
805 		goto unlock;
806 
807 	switch (ptp_data->tou_mode) {
808 	case KSZ_PTP_TOU_IDLE:
809 		break;
810 
811 	case KSZ_PTP_TOU_PEROUT:
812 		ret = ksz_ptp_restart_perout(dev);
813 		if (ret)
814 			goto unlock;
815 
816 		break;
817 	}
818 
819 	spin_lock_bh(&ptp_data->clock_lock);
820 	ptp_data->clock_time = timespec64_add(ptp_data->clock_time, delta64);
821 	spin_unlock_bh(&ptp_data->clock_lock);
822 
823 unlock:
824 	mutex_unlock(&ptp_data->lock);
825 	return ret;
826 }
827 
ksz_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * req,int on)828 static int ksz_ptp_enable(struct ptp_clock_info *ptp,
829 			  struct ptp_clock_request *req, int on)
830 {
831 	struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
832 	struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
833 	int ret;
834 
835 	switch (req->type) {
836 	case PTP_CLK_REQ_PEROUT:
837 		mutex_lock(&ptp_data->lock);
838 		ret = ksz_ptp_enable_perout(dev, &req->perout, on);
839 		mutex_unlock(&ptp_data->lock);
840 		break;
841 	default:
842 		return -EOPNOTSUPP;
843 	}
844 
845 	return ret;
846 }
847 
ksz_ptp_verify_pin(struct ptp_clock_info * ptp,unsigned int pin,enum ptp_pin_function func,unsigned int chan)848 static int ksz_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
849 			      enum ptp_pin_function func, unsigned int chan)
850 {
851 	int ret = 0;
852 
853 	switch (func) {
854 	case PTP_PF_NONE:
855 	case PTP_PF_PEROUT:
856 		break;
857 	default:
858 		ret = -1;
859 		break;
860 	}
861 
862 	return ret;
863 }
864 
865 /*  Function is pointer to the do_aux_work in the ptp_clock capability */
ksz_ptp_do_aux_work(struct ptp_clock_info * ptp)866 static long ksz_ptp_do_aux_work(struct ptp_clock_info *ptp)
867 {
868 	struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
869 	struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
870 	struct timespec64 ts;
871 	int ret;
872 
873 	mutex_lock(&ptp_data->lock);
874 	ret = _ksz_ptp_gettime(dev, &ts);
875 	if (ret)
876 		goto out;
877 
878 	spin_lock_bh(&ptp_data->clock_lock);
879 	ptp_data->clock_time = ts;
880 	spin_unlock_bh(&ptp_data->clock_lock);
881 
882 out:
883 	mutex_unlock(&ptp_data->lock);
884 
885 	return HZ;  /* reschedule in 1 second */
886 }
887 
ksz_ptp_start_clock(struct ksz_device * dev)888 static int ksz_ptp_start_clock(struct ksz_device *dev)
889 {
890 	struct ksz_ptp_data *ptp_data = &dev->ptp_data;
891 	int ret;
892 
893 	ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ENABLE, PTP_CLK_ENABLE);
894 	if (ret)
895 		return ret;
896 
897 	ptp_data->clock_time.tv_sec = 0;
898 	ptp_data->clock_time.tv_nsec = 0;
899 
900 	return 0;
901 }
902 
ksz_ptp_clock_register(struct dsa_switch * ds)903 int ksz_ptp_clock_register(struct dsa_switch *ds)
904 {
905 	struct ksz_device *dev = ds->priv;
906 	struct ksz_ptp_data *ptp_data;
907 	int ret;
908 	u8 i;
909 
910 	ptp_data = &dev->ptp_data;
911 	mutex_init(&ptp_data->lock);
912 	spin_lock_init(&ptp_data->clock_lock);
913 
914 	ptp_data->caps.owner		= THIS_MODULE;
915 	snprintf(ptp_data->caps.name, 16, "Microchip Clock");
916 	ptp_data->caps.max_adj		= KSZ_MAX_DRIFT_CORR;
917 	ptp_data->caps.gettime64	= ksz_ptp_gettime;
918 	ptp_data->caps.settime64	= ksz_ptp_settime;
919 	ptp_data->caps.adjfine		= ksz_ptp_adjfine;
920 	ptp_data->caps.adjtime		= ksz_ptp_adjtime;
921 	ptp_data->caps.do_aux_work	= ksz_ptp_do_aux_work;
922 	ptp_data->caps.enable		= ksz_ptp_enable;
923 	ptp_data->caps.verify		= ksz_ptp_verify_pin;
924 	ptp_data->caps.n_pins		= KSZ_PTP_N_GPIO;
925 	ptp_data->caps.n_per_out	= 3;
926 
927 	ret = ksz_ptp_start_clock(dev);
928 	if (ret)
929 		return ret;
930 
931 	for (i = 0; i < KSZ_PTP_N_GPIO; i++) {
932 		struct ptp_pin_desc *ptp_pin = &ptp_data->pin_config[i];
933 
934 		snprintf(ptp_pin->name,
935 			 sizeof(ptp_pin->name), "ksz_ptp_pin_%02d", i);
936 		ptp_pin->index = i;
937 		ptp_pin->func = PTP_PF_NONE;
938 	}
939 
940 	ptp_data->caps.pin_config = ptp_data->pin_config;
941 
942 	/* Currently only P2P mode is supported. When 802_1AS bit is set, it
943 	 * forwards all PTP packets to host port and none to other ports.
944 	 */
945 	ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_TC_P2P | PTP_802_1AS,
946 			PTP_TC_P2P | PTP_802_1AS);
947 	if (ret)
948 		return ret;
949 
950 	ptp_data->clock = ptp_clock_register(&ptp_data->caps, dev->dev);
951 	if (IS_ERR_OR_NULL(ptp_data->clock))
952 		return PTR_ERR(ptp_data->clock);
953 
954 	return 0;
955 }
956 
ksz_ptp_clock_unregister(struct dsa_switch * ds)957 void ksz_ptp_clock_unregister(struct dsa_switch *ds)
958 {
959 	struct ksz_device *dev = ds->priv;
960 	struct ksz_ptp_data *ptp_data;
961 
962 	ptp_data = &dev->ptp_data;
963 
964 	if (ptp_data->clock)
965 		ptp_clock_unregister(ptp_data->clock);
966 }
967 
ksz_ptp_msg_thread_fn(int irq,void * dev_id)968 static irqreturn_t ksz_ptp_msg_thread_fn(int irq, void *dev_id)
969 {
970 	struct ksz_ptp_irq *ptpmsg_irq = dev_id;
971 	struct ksz_device *dev;
972 	struct ksz_port *port;
973 	u32 tstamp_raw;
974 	ktime_t tstamp;
975 	int ret;
976 
977 	port = ptpmsg_irq->port;
978 	dev = port->ksz_dev;
979 
980 	if (ptpmsg_irq->ts_en) {
981 		ret = ksz_read32(dev, ptpmsg_irq->ts_reg, &tstamp_raw);
982 		if (ret)
983 			return IRQ_NONE;
984 
985 		tstamp = ksz_decode_tstamp(tstamp_raw);
986 
987 		port->tstamp_msg = ksz_tstamp_reconstruct(dev, tstamp);
988 
989 		complete(&port->tstamp_msg_comp);
990 	}
991 
992 	return IRQ_HANDLED;
993 }
994 
ksz_ptp_irq_thread_fn(int irq,void * dev_id)995 static irqreturn_t ksz_ptp_irq_thread_fn(int irq, void *dev_id)
996 {
997 	struct ksz_irq *ptpirq = dev_id;
998 	unsigned int nhandled = 0;
999 	struct ksz_device *dev;
1000 	unsigned int sub_irq;
1001 	u16 data;
1002 	int ret;
1003 	u8 n;
1004 
1005 	dev = ptpirq->dev;
1006 
1007 	ret = ksz_read16(dev, ptpirq->reg_status, &data);
1008 	if (ret)
1009 		goto out;
1010 
1011 	/* Clear the interrupts W1C */
1012 	ret = ksz_write16(dev, ptpirq->reg_status, data);
1013 	if (ret)
1014 		return IRQ_NONE;
1015 
1016 	for (n = 0; n < ptpirq->nirqs; ++n) {
1017 		if (data & BIT(n + KSZ_PTP_INT_START)) {
1018 			sub_irq = irq_find_mapping(ptpirq->domain, n);
1019 			handle_nested_irq(sub_irq);
1020 			++nhandled;
1021 		}
1022 	}
1023 
1024 out:
1025 	return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
1026 }
1027 
ksz_ptp_irq_mask(struct irq_data * d)1028 static void ksz_ptp_irq_mask(struct irq_data *d)
1029 {
1030 	struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
1031 
1032 	kirq->masked &= ~BIT(d->hwirq + KSZ_PTP_INT_START);
1033 }
1034 
ksz_ptp_irq_unmask(struct irq_data * d)1035 static void ksz_ptp_irq_unmask(struct irq_data *d)
1036 {
1037 	struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
1038 
1039 	kirq->masked |= BIT(d->hwirq + KSZ_PTP_INT_START);
1040 }
1041 
ksz_ptp_irq_bus_lock(struct irq_data * d)1042 static void ksz_ptp_irq_bus_lock(struct irq_data *d)
1043 {
1044 	struct ksz_irq *kirq  = irq_data_get_irq_chip_data(d);
1045 
1046 	mutex_lock(&kirq->dev->lock_irq);
1047 }
1048 
ksz_ptp_irq_bus_sync_unlock(struct irq_data * d)1049 static void ksz_ptp_irq_bus_sync_unlock(struct irq_data *d)
1050 {
1051 	struct ksz_irq *kirq  = irq_data_get_irq_chip_data(d);
1052 	struct ksz_device *dev = kirq->dev;
1053 	int ret;
1054 
1055 	ret = ksz_write16(dev, kirq->reg_mask, kirq->masked);
1056 	if (ret)
1057 		dev_err(dev->dev, "failed to change IRQ mask\n");
1058 
1059 	mutex_unlock(&dev->lock_irq);
1060 }
1061 
1062 static const struct irq_chip ksz_ptp_irq_chip = {
1063 	.name			= "ksz-irq",
1064 	.irq_mask		= ksz_ptp_irq_mask,
1065 	.irq_unmask		= ksz_ptp_irq_unmask,
1066 	.irq_bus_lock		= ksz_ptp_irq_bus_lock,
1067 	.irq_bus_sync_unlock	= ksz_ptp_irq_bus_sync_unlock,
1068 };
1069 
ksz_ptp_irq_domain_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hwirq)1070 static int ksz_ptp_irq_domain_map(struct irq_domain *d,
1071 				  unsigned int irq, irq_hw_number_t hwirq)
1072 {
1073 	irq_set_chip_data(irq, d->host_data);
1074 	irq_set_chip_and_handler(irq, &ksz_ptp_irq_chip, handle_level_irq);
1075 	irq_set_noprobe(irq);
1076 
1077 	return 0;
1078 }
1079 
1080 static const struct irq_domain_ops ksz_ptp_irq_domain_ops = {
1081 	.map	= ksz_ptp_irq_domain_map,
1082 	.xlate	= irq_domain_xlate_twocell,
1083 };
1084 
ksz_ptp_msg_irq_free(struct ksz_port * port,u8 n)1085 static void ksz_ptp_msg_irq_free(struct ksz_port *port, u8 n)
1086 {
1087 	struct ksz_ptp_irq *ptpmsg_irq;
1088 
1089 	ptpmsg_irq = &port->ptpmsg_irq[n];
1090 
1091 	free_irq(ptpmsg_irq->num, ptpmsg_irq);
1092 	irq_dispose_mapping(ptpmsg_irq->num);
1093 }
1094 
ksz_ptp_msg_irq_setup(struct ksz_port * port,u8 n)1095 static int ksz_ptp_msg_irq_setup(struct ksz_port *port, u8 n)
1096 {
1097 	u16 ts_reg[] = {REG_PTP_PORT_PDRESP_TS, REG_PTP_PORT_XDELAY_TS,
1098 			REG_PTP_PORT_SYNC_TS};
1099 	static const char * const name[] = {"pdresp-msg", "xdreq-msg",
1100 					    "sync-msg"};
1101 	const struct ksz_dev_ops *ops = port->ksz_dev->dev_ops;
1102 	struct ksz_ptp_irq *ptpmsg_irq;
1103 
1104 	ptpmsg_irq = &port->ptpmsg_irq[n];
1105 
1106 	ptpmsg_irq->port = port;
1107 	ptpmsg_irq->ts_reg = ops->get_port_addr(port->num, ts_reg[n]);
1108 
1109 	snprintf(ptpmsg_irq->name, sizeof(ptpmsg_irq->name), name[n]);
1110 
1111 	ptpmsg_irq->num = irq_find_mapping(port->ptpirq.domain, n);
1112 	if (ptpmsg_irq->num < 0)
1113 		return ptpmsg_irq->num;
1114 
1115 	return request_threaded_irq(ptpmsg_irq->num, NULL,
1116 				    ksz_ptp_msg_thread_fn, IRQF_ONESHOT,
1117 				    ptpmsg_irq->name, ptpmsg_irq);
1118 }
1119 
ksz_ptp_irq_setup(struct dsa_switch * ds,u8 p)1120 int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
1121 {
1122 	struct ksz_device *dev = ds->priv;
1123 	const struct ksz_dev_ops *ops = dev->dev_ops;
1124 	struct ksz_port *port = &dev->ports[p];
1125 	struct ksz_irq *ptpirq = &port->ptpirq;
1126 	int irq;
1127 	int ret;
1128 
1129 	ptpirq->dev = dev;
1130 	ptpirq->masked = 0;
1131 	ptpirq->nirqs = 3;
1132 	ptpirq->reg_mask = ops->get_port_addr(p, REG_PTP_PORT_TX_INT_ENABLE__2);
1133 	ptpirq->reg_status = ops->get_port_addr(p,
1134 						REG_PTP_PORT_TX_INT_STATUS__2);
1135 	snprintf(ptpirq->name, sizeof(ptpirq->name), "ptp-irq-%d", p);
1136 
1137 	init_completion(&port->tstamp_msg_comp);
1138 
1139 	ptpirq->domain = irq_domain_add_linear(dev->dev->of_node, ptpirq->nirqs,
1140 					       &ksz_ptp_irq_domain_ops, ptpirq);
1141 	if (!ptpirq->domain)
1142 		return -ENOMEM;
1143 
1144 	for (irq = 0; irq < ptpirq->nirqs; irq++)
1145 		irq_create_mapping(ptpirq->domain, irq);
1146 
1147 	ptpirq->irq_num = irq_find_mapping(port->pirq.domain, PORT_SRC_PTP_INT);
1148 	if (ptpirq->irq_num < 0) {
1149 		ret = ptpirq->irq_num;
1150 		goto out;
1151 	}
1152 
1153 	ret = request_threaded_irq(ptpirq->irq_num, NULL, ksz_ptp_irq_thread_fn,
1154 				   IRQF_ONESHOT, ptpirq->name, ptpirq);
1155 	if (ret)
1156 		goto out;
1157 
1158 	for (irq = 0; irq < ptpirq->nirqs; irq++) {
1159 		ret = ksz_ptp_msg_irq_setup(port, irq);
1160 		if (ret)
1161 			goto out_ptp_msg;
1162 	}
1163 
1164 	return 0;
1165 
1166 out_ptp_msg:
1167 	free_irq(ptpirq->irq_num, ptpirq);
1168 	while (irq--)
1169 		free_irq(port->ptpmsg_irq[irq].num, &port->ptpmsg_irq[irq]);
1170 out:
1171 	for (irq = 0; irq < ptpirq->nirqs; irq++)
1172 		irq_dispose_mapping(port->ptpmsg_irq[irq].num);
1173 
1174 	irq_domain_remove(ptpirq->domain);
1175 
1176 	return ret;
1177 }
1178 
ksz_ptp_irq_free(struct dsa_switch * ds,u8 p)1179 void ksz_ptp_irq_free(struct dsa_switch *ds, u8 p)
1180 {
1181 	struct ksz_device *dev = ds->priv;
1182 	struct ksz_port *port = &dev->ports[p];
1183 	struct ksz_irq *ptpirq = &port->ptpirq;
1184 	u8 n;
1185 
1186 	for (n = 0; n < ptpirq->nirqs; n++)
1187 		ksz_ptp_msg_irq_free(port, n);
1188 
1189 	free_irq(ptpirq->irq_num, ptpirq);
1190 	irq_dispose_mapping(ptpirq->irq_num);
1191 
1192 	irq_domain_remove(ptpirq->domain);
1193 }
1194 
1195 MODULE_AUTHOR("Christian Eggers <ceggers@arri.de>");
1196 MODULE_AUTHOR("Arun Ramadoss <arun.ramadoss@microchip.com>");
1197 MODULE_DESCRIPTION("PTP support for KSZ switch");
1198 MODULE_LICENSE("GPL");
1199