xref: /linux/drivers/ptp/ptp_chardev.c (revision ccde82e909467abdf098a8ee6f63e1ecf9a47ce5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * PTP 1588 clock support - character device implementation.
4  *
5  * Copyright (C) 2010 OMICRON electronics GmbH
6  */
7 #include <linux/compat.h>
8 #include <linux/module.h>
9 #include <linux/posix-clock.h>
10 #include <linux/poll.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/timekeeping.h>
14 #include <linux/debugfs.h>
15 
16 #include <linux/nospec.h>
17 
18 #include "ptp_private.h"
19 
20 static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
21 			       enum ptp_pin_function func, unsigned int chan)
22 {
23 	struct ptp_clock_request rq;
24 	int err = 0;
25 
26 	memset(&rq, 0, sizeof(rq));
27 
28 	switch (func) {
29 	case PTP_PF_NONE:
30 		break;
31 	case PTP_PF_EXTTS:
32 		rq.type = PTP_CLK_REQ_EXTTS;
33 		rq.extts.index = chan;
34 		err = ops->enable(ops, &rq, 0);
35 		break;
36 	case PTP_PF_PEROUT:
37 		rq.type = PTP_CLK_REQ_PEROUT;
38 		rq.perout.index = chan;
39 		err = ops->enable(ops, &rq, 0);
40 		break;
41 	case PTP_PF_PHYSYNC:
42 		break;
43 	default:
44 		return -EINVAL;
45 	}
46 
47 	return err;
48 }
49 
50 void ptp_disable_all_events(struct ptp_clock *ptp)
51 {
52 	struct ptp_clock_info *info = ptp->info;
53 	unsigned int i;
54 
55 	mutex_lock(&ptp->pincfg_mux);
56 	/* Disable any pins that may raise EXTTS events */
57 	for (i = 0; i < info->n_pins; i++)
58 		if (info->pin_config[i].func == PTP_PF_EXTTS)
59 			ptp_disable_pinfunc(info, info->pin_config[i].func,
60 					    info->pin_config[i].chan);
61 
62 	/* Disable the PPS event if the driver has PPS support */
63 	if (info->pps) {
64 		struct ptp_clock_request req = { .type = PTP_CLK_REQ_PPS };
65 		info->enable(info, &req, 0);
66 	}
67 	mutex_unlock(&ptp->pincfg_mux);
68 }
69 
70 int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
71 		    enum ptp_pin_function func, unsigned int chan)
72 {
73 	struct ptp_clock_info *info = ptp->info;
74 	struct ptp_pin_desc *pin1 = NULL, *pin2 = &info->pin_config[pin];
75 	unsigned int i;
76 
77 	/* Check to see if any other pin previously had this function. */
78 	for (i = 0; i < info->n_pins; i++) {
79 		if (info->pin_config[i].func == func &&
80 		    info->pin_config[i].chan == chan) {
81 			pin1 = &info->pin_config[i];
82 			break;
83 		}
84 	}
85 	if (pin1 && i == pin)
86 		return 0;
87 
88 	/* Check the desired function and channel. */
89 	switch (func) {
90 	case PTP_PF_NONE:
91 		break;
92 	case PTP_PF_EXTTS:
93 		if (chan >= info->n_ext_ts)
94 			return -EINVAL;
95 		break;
96 	case PTP_PF_PEROUT:
97 		if (chan >= info->n_per_out)
98 			return -EINVAL;
99 		break;
100 	case PTP_PF_PHYSYNC:
101 		if (chan != 0)
102 			return -EINVAL;
103 		break;
104 	default:
105 		return -EINVAL;
106 	}
107 
108 	if (info->verify(info, pin, func, chan)) {
109 		pr_err("driver cannot use function %u and channel %u on pin %u\n",
110 		       func, chan, pin);
111 		return -EOPNOTSUPP;
112 	}
113 
114 	/* Disable whichever pin was previously assigned to this function and
115 	 * channel.
116 	 */
117 	if (pin1) {
118 		ptp_disable_pinfunc(info, func, chan);
119 		pin1->func = PTP_PF_NONE;
120 		pin1->chan = 0;
121 	}
122 
123 	/* Disable whatever function was previously assigned to the requested
124 	 * pin.
125 	 */
126 	ptp_disable_pinfunc(info, pin2->func, pin2->chan);
127 	pin2->func = func;
128 	pin2->chan = chan;
129 
130 	return 0;
131 }
132 
133 int ptp_open(struct posix_clock_context *pccontext, fmode_t fmode)
134 {
135 	struct ptp_clock *ptp = container_of(pccontext->clk, struct ptp_clock, clock);
136 	struct timestamp_event_queue *queue;
137 	char debugfsname[32];
138 
139 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
140 	if (!queue)
141 		return -EINVAL;
142 	queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL);
143 	if (!queue->mask) {
144 		kfree(queue);
145 		return -EINVAL;
146 	}
147 	bitmap_set(queue->mask, 0, PTP_MAX_CHANNELS);
148 	spin_lock_init(&queue->lock);
149 	scoped_guard(spinlock_irq, &ptp->tsevqs_lock)
150 		list_add_tail(&queue->qlist, &ptp->tsevqs);
151 	pccontext->private_clkdata = queue;
152 
153 	/* Debugfs contents */
154 	sprintf(debugfsname, "0x%p", queue);
155 	queue->debugfs_instance =
156 		debugfs_create_dir(debugfsname, ptp->debugfs_root);
157 	queue->dfs_bitmap.array = (u32 *)queue->mask;
158 	queue->dfs_bitmap.n_elements =
159 		DIV_ROUND_UP(PTP_MAX_CHANNELS, BITS_PER_BYTE * sizeof(u32));
160 	debugfs_create_u32_array("mask", 0444, queue->debugfs_instance,
161 				 &queue->dfs_bitmap);
162 
163 	return 0;
164 }
165 
166 int ptp_release(struct posix_clock_context *pccontext)
167 {
168 	struct timestamp_event_queue *queue = pccontext->private_clkdata;
169 	struct ptp_clock *ptp =
170 		container_of(pccontext->clk, struct ptp_clock, clock);
171 
172 	debugfs_remove(queue->debugfs_instance);
173 	pccontext->private_clkdata = NULL;
174 	scoped_guard(spinlock_irq, &ptp->tsevqs_lock)
175 		list_del(&queue->qlist);
176 	bitmap_free(queue->mask);
177 	kfree(queue);
178 	return 0;
179 }
180 
181 static long ptp_clock_getcaps(struct ptp_clock *ptp, void __user *arg)
182 {
183 	struct ptp_clock_caps caps = {
184 		.max_adj		= ptp->info->max_adj,
185 		.n_alarm		= ptp->info->n_alarm,
186 		.n_ext_ts		= ptp->info->n_ext_ts,
187 		.n_per_out		= ptp->info->n_per_out,
188 		.pps			= ptp->info->pps,
189 		.n_pins			= ptp->info->n_pins,
190 		.cross_timestamping	= ptp->info->getcrosststamp != NULL,
191 		.adjust_phase		= ptp->info->adjphase != NULL &&
192 					  ptp->info->getmaxphase != NULL,
193 	};
194 
195 	if (caps.adjust_phase)
196 		caps.max_phase_adj = ptp->info->getmaxphase(ptp->info);
197 
198 	return copy_to_user(arg, &caps, sizeof(caps)) ? -EFAULT : 0;
199 }
200 
201 static long ptp_extts_request(struct ptp_clock *ptp, unsigned int cmd, void __user *arg)
202 {
203 	struct ptp_clock_request req = { .type = PTP_CLK_REQ_EXTTS };
204 	struct ptp_clock_info *ops = ptp->info;
205 	unsigned int supported_extts_flags;
206 
207 	if (copy_from_user(&req.extts, arg, sizeof(req.extts)))
208 		return -EFAULT;
209 
210 	if (cmd == PTP_EXTTS_REQUEST2) {
211 		/* Tell the drivers to check the flags carefully. */
212 		req.extts.flags |= PTP_STRICT_FLAGS;
213 		/* Make sure no reserved bit is set. */
214 		if ((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) ||
215 		    req.extts.rsv[0] || req.extts.rsv[1])
216 			return -EINVAL;
217 
218 		/* Ensure one of the rising/falling edge bits is set. */
219 		if ((req.extts.flags & PTP_ENABLE_FEATURE) &&
220 		    (req.extts.flags & PTP_EXTTS_EDGES) == 0)
221 			return -EINVAL;
222 	} else {
223 		req.extts.flags &= PTP_EXTTS_V1_VALID_FLAGS;
224 		memset(req.extts.rsv, 0, sizeof(req.extts.rsv));
225 	}
226 
227 	if (req.extts.index >= ops->n_ext_ts)
228 		return -EINVAL;
229 
230 	supported_extts_flags = ptp->info->supported_extts_flags;
231 	/* The PTP_ENABLE_FEATURE flag is always supported. */
232 	supported_extts_flags |= PTP_ENABLE_FEATURE;
233 	/* If the driver does not support strictly checking flags, the
234 	 * PTP_RISING_EDGE and PTP_FALLING_EDGE flags are merely hints
235 	 * which are not enforced.
236 	 */
237 	if (!(supported_extts_flags & PTP_STRICT_FLAGS))
238 		supported_extts_flags |= PTP_EXTTS_EDGES;
239 	/* Reject unsupported flags */
240 	if (req.extts.flags & ~supported_extts_flags)
241 		return -EOPNOTSUPP;
242 
243 	scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &ptp->pincfg_mux)
244 		return ops->enable(ops, &req, req.extts.flags & PTP_ENABLE_FEATURE ? 1 : 0);
245 }
246 
247 static long ptp_perout_request(struct ptp_clock *ptp, unsigned int cmd, void __user *arg)
248 {
249 	struct ptp_clock_request req = { .type = PTP_CLK_REQ_PEROUT };
250 	struct ptp_perout_request *perout = &req.perout;
251 	struct ptp_clock_info *ops = ptp->info;
252 
253 	if (copy_from_user(perout, arg, sizeof(*perout)))
254 		return -EFAULT;
255 
256 	if (cmd == PTP_PEROUT_REQUEST2) {
257 		if (perout->flags & ~PTP_PEROUT_VALID_FLAGS)
258 			return -EINVAL;
259 
260 		/*
261 		 * The "on" field has undefined meaning if
262 		 * PTP_PEROUT_DUTY_CYCLE isn't set, we must still treat it
263 		 * as reserved, which must be set to zero.
264 		 */
265 		if (!(perout->flags & PTP_PEROUT_DUTY_CYCLE) &&
266 		    !mem_is_zero(perout->rsv, sizeof(perout->rsv)))
267 			return -EINVAL;
268 
269 		if (perout->flags & PTP_PEROUT_DUTY_CYCLE) {
270 			/* The duty cycle must be subunitary. */
271 			if (perout->on.sec > perout->period.sec ||
272 			    (perout->on.sec == perout->period.sec &&
273 			     perout->on.nsec > perout->period.nsec))
274 				return -ERANGE;
275 		}
276 
277 		if (perout->flags & PTP_PEROUT_PHASE) {
278 			/*
279 			 * The phase should be specified modulo the period,
280 			 * therefore anything equal or larger than 1 period
281 			 * is invalid.
282 			 */
283 			if (perout->phase.sec > perout->period.sec ||
284 			    (perout->phase.sec == perout->period.sec &&
285 			     perout->phase.nsec >= perout->period.nsec))
286 				return -ERANGE;
287 		}
288 	} else {
289 		perout->flags &= PTP_PEROUT_V1_VALID_FLAGS;
290 		memset(perout->rsv, 0, sizeof(perout->rsv));
291 	}
292 
293 	if (perout->index >= ops->n_per_out)
294 		return -EINVAL;
295 	if (perout->flags & ~ops->supported_perout_flags)
296 		return -EOPNOTSUPP;
297 
298 	scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &ptp->pincfg_mux)
299 		return ops->enable(ops, &req, perout->period.sec || perout->period.nsec);
300 }
301 
302 static long ptp_enable_pps(struct ptp_clock *ptp, bool enable)
303 {
304 	struct ptp_clock_request req = { .type = PTP_CLK_REQ_PPS };
305 	struct ptp_clock_info *ops = ptp->info;
306 
307 	if (!capable(CAP_SYS_TIME))
308 		return -EPERM;
309 
310 	scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &ptp->pincfg_mux)
311 		return ops->enable(ops, &req, enable);
312 }
313 
314 typedef int (*ptp_crosststamp_fn)(struct ptp_clock_info *,
315 				  struct system_device_crosststamp *);
316 
317 static long ptp_sys_offset_precise(struct ptp_clock *ptp, void __user *arg,
318 				   ptp_crosststamp_fn crosststamp_fn)
319 {
320 	struct ptp_sys_offset_precise precise_offset;
321 	struct system_device_crosststamp xtstamp;
322 	struct timespec64 ts;
323 	int err;
324 
325 	if (!crosststamp_fn)
326 		return -EOPNOTSUPP;
327 
328 	err = crosststamp_fn(ptp->info, &xtstamp);
329 	if (err)
330 		return err;
331 
332 	memset(&precise_offset, 0, sizeof(precise_offset));
333 	ts = ktime_to_timespec64(xtstamp.device);
334 	precise_offset.device.sec = ts.tv_sec;
335 	precise_offset.device.nsec = ts.tv_nsec;
336 	ts = ktime_to_timespec64(xtstamp.sys_realtime);
337 	precise_offset.sys_realtime.sec = ts.tv_sec;
338 	precise_offset.sys_realtime.nsec = ts.tv_nsec;
339 	ts = ktime_to_timespec64(xtstamp.sys_monoraw);
340 	precise_offset.sys_monoraw.sec = ts.tv_sec;
341 	precise_offset.sys_monoraw.nsec = ts.tv_nsec;
342 
343 	return copy_to_user(arg, &precise_offset, sizeof(precise_offset)) ? -EFAULT : 0;
344 }
345 
346 typedef int (*ptp_gettimex_fn)(struct ptp_clock_info *,
347 			       struct timespec64 *,
348 			       struct ptp_system_timestamp *);
349 
350 static long ptp_sys_offset_extended(struct ptp_clock *ptp, void __user *arg,
351 				    ptp_gettimex_fn gettimex_fn)
352 {
353 	struct ptp_sys_offset_extended *extoff __free(kfree) = NULL;
354 	struct ptp_system_timestamp sts;
355 
356 	if (!gettimex_fn)
357 		return -EOPNOTSUPP;
358 
359 	extoff = memdup_user(arg, sizeof(*extoff));
360 	if (IS_ERR(extoff))
361 		return PTR_ERR(extoff);
362 
363 	if (extoff->n_samples > PTP_MAX_SAMPLES || extoff->rsv[0] || extoff->rsv[1])
364 		return -EINVAL;
365 
366 	switch (extoff->clockid) {
367 	case CLOCK_REALTIME:
368 	case CLOCK_MONOTONIC:
369 	case CLOCK_MONOTONIC_RAW:
370 		break;
371 	case CLOCK_AUX ... CLOCK_AUX_LAST:
372 		if (IS_ENABLED(CONFIG_POSIX_AUX_CLOCKS))
373 			break;
374 		fallthrough;
375 	default:
376 		return -EINVAL;
377 	}
378 
379 	sts.clockid = extoff->clockid;
380 	for (unsigned int i = 0; i < extoff->n_samples; i++) {
381 		struct timespec64 ts;
382 		int err;
383 
384 		err = gettimex_fn(ptp->info, &ts, &sts);
385 		if (err)
386 			return err;
387 
388 		/* Filter out disabled or unavailable clocks */
389 		if (sts.pre_ts.tv_sec < 0 || sts.post_ts.tv_sec < 0)
390 			return -EINVAL;
391 
392 		extoff->ts[i][0].sec = sts.pre_ts.tv_sec;
393 		extoff->ts[i][0].nsec = sts.pre_ts.tv_nsec;
394 		extoff->ts[i][1].sec = ts.tv_sec;
395 		extoff->ts[i][1].nsec = ts.tv_nsec;
396 		extoff->ts[i][2].sec = sts.post_ts.tv_sec;
397 		extoff->ts[i][2].nsec = sts.post_ts.tv_nsec;
398 	}
399 
400 	return copy_to_user(arg, extoff, sizeof(*extoff)) ? -EFAULT : 0;
401 }
402 
403 static long ptp_sys_offset(struct ptp_clock *ptp, void __user *arg)
404 {
405 	struct ptp_sys_offset *sysoff __free(kfree) = NULL;
406 	struct ptp_clock_time *pct;
407 	struct timespec64 ts;
408 
409 	sysoff = memdup_user(arg, sizeof(*sysoff));
410 	if (IS_ERR(sysoff))
411 		return PTR_ERR(sysoff);
412 
413 	if (sysoff->n_samples > PTP_MAX_SAMPLES)
414 		return -EINVAL;
415 
416 	pct = &sysoff->ts[0];
417 	for (unsigned int i = 0; i < sysoff->n_samples; i++) {
418 		struct ptp_clock_info *ops = ptp->info;
419 		int err;
420 
421 		ktime_get_real_ts64(&ts);
422 		pct->sec = ts.tv_sec;
423 		pct->nsec = ts.tv_nsec;
424 		pct++;
425 		if (ops->gettimex64)
426 			err = ops->gettimex64(ops, &ts, NULL);
427 		else
428 			err = ops->gettime64(ops, &ts);
429 		if (err)
430 			return err;
431 		pct->sec = ts.tv_sec;
432 		pct->nsec = ts.tv_nsec;
433 		pct++;
434 	}
435 	ktime_get_real_ts64(&ts);
436 	pct->sec = ts.tv_sec;
437 	pct->nsec = ts.tv_nsec;
438 
439 	return copy_to_user(arg, sysoff, sizeof(*sysoff)) ? -EFAULT : 0;
440 }
441 
442 static long ptp_pin_getfunc(struct ptp_clock *ptp, unsigned int cmd, void __user *arg)
443 {
444 	struct ptp_clock_info *ops = ptp->info;
445 	struct ptp_pin_desc pd;
446 
447 	if (copy_from_user(&pd, arg, sizeof(pd)))
448 		return -EFAULT;
449 
450 	if (cmd == PTP_PIN_GETFUNC2 && !mem_is_zero(pd.rsv, sizeof(pd.rsv)))
451 		return -EINVAL;
452 
453 	if (pd.index >= ops->n_pins)
454 		return -EINVAL;
455 
456 	scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &ptp->pincfg_mux)
457 		pd = ops->pin_config[array_index_nospec(pd.index, ops->n_pins)];
458 
459 	return copy_to_user(arg, &pd, sizeof(pd)) ? -EFAULT : 0;
460 }
461 
462 static long ptp_pin_setfunc(struct ptp_clock *ptp, unsigned int cmd, void __user *arg)
463 {
464 	struct ptp_clock_info *ops = ptp->info;
465 	struct ptp_pin_desc pd;
466 	unsigned int pin_index;
467 
468 	if (copy_from_user(&pd, arg, sizeof(pd)))
469 		return -EFAULT;
470 
471 	if (cmd == PTP_PIN_SETFUNC2 && !mem_is_zero(pd.rsv, sizeof(pd.rsv)))
472 		return -EINVAL;
473 
474 	if (pd.index >= ops->n_pins)
475 		return -EINVAL;
476 
477 	pin_index = array_index_nospec(pd.index, ops->n_pins);
478 	scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &ptp->pincfg_mux)
479 		return ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan);
480 }
481 
482 static long ptp_mask_clear_all(struct timestamp_event_queue *tsevq)
483 {
484 	bitmap_clear(tsevq->mask, 0, PTP_MAX_CHANNELS);
485 	return 0;
486 }
487 
488 static long ptp_mask_en_single(struct timestamp_event_queue *tsevq, void __user *arg)
489 {
490 	unsigned int channel;
491 
492 	if (copy_from_user(&channel, arg, sizeof(channel)))
493 		return -EFAULT;
494 	if (channel >= PTP_MAX_CHANNELS)
495 		return -EFAULT;
496 	set_bit(channel, tsevq->mask);
497 	return 0;
498 }
499 
500 long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
501 	       unsigned long arg)
502 {
503 	struct ptp_clock *ptp = container_of(pccontext->clk, struct ptp_clock, clock);
504 	void __user *argptr;
505 
506 	if (in_compat_syscall() && cmd != PTP_ENABLE_PPS && cmd != PTP_ENABLE_PPS2)
507 		arg = (unsigned long)compat_ptr(arg);
508 	argptr = (void __force __user *)arg;
509 
510 	switch (cmd) {
511 	case PTP_CLOCK_GETCAPS:
512 	case PTP_CLOCK_GETCAPS2:
513 		return ptp_clock_getcaps(ptp, argptr);
514 
515 	case PTP_EXTTS_REQUEST:
516 	case PTP_EXTTS_REQUEST2:
517 		if ((pccontext->fp->f_mode & FMODE_WRITE) == 0)
518 			return -EACCES;
519 		return ptp_extts_request(ptp, cmd, argptr);
520 
521 	case PTP_PEROUT_REQUEST:
522 	case PTP_PEROUT_REQUEST2:
523 		if ((pccontext->fp->f_mode & FMODE_WRITE) == 0)
524 			return -EACCES;
525 		return ptp_perout_request(ptp, cmd, argptr);
526 
527 	case PTP_ENABLE_PPS:
528 	case PTP_ENABLE_PPS2:
529 		if ((pccontext->fp->f_mode & FMODE_WRITE) == 0)
530 			return -EACCES;
531 		return ptp_enable_pps(ptp, !!arg);
532 
533 	case PTP_SYS_OFFSET_PRECISE:
534 	case PTP_SYS_OFFSET_PRECISE2:
535 		return ptp_sys_offset_precise(ptp, argptr,
536 					      ptp->info->getcrosststamp);
537 
538 	case PTP_SYS_OFFSET_EXTENDED:
539 	case PTP_SYS_OFFSET_EXTENDED2:
540 		return ptp_sys_offset_extended(ptp, argptr,
541 					       ptp->info->gettimex64);
542 
543 	case PTP_SYS_OFFSET:
544 	case PTP_SYS_OFFSET2:
545 		return ptp_sys_offset(ptp, argptr);
546 
547 	case PTP_PIN_GETFUNC:
548 	case PTP_PIN_GETFUNC2:
549 		return ptp_pin_getfunc(ptp, cmd, argptr);
550 
551 	case PTP_PIN_SETFUNC:
552 	case PTP_PIN_SETFUNC2:
553 		if ((pccontext->fp->f_mode & FMODE_WRITE) == 0)
554 			return -EACCES;
555 		return ptp_pin_setfunc(ptp, cmd, argptr);
556 
557 	case PTP_MASK_CLEAR_ALL:
558 		return ptp_mask_clear_all(pccontext->private_clkdata);
559 
560 	case PTP_MASK_EN_SINGLE:
561 		return ptp_mask_en_single(pccontext->private_clkdata, argptr);
562 
563 	case PTP_SYS_OFFSET_PRECISE_CYCLES:
564 		return ptp_sys_offset_precise(ptp, argptr,
565 					      ptp->info->getcrosscycles);
566 
567 	case PTP_SYS_OFFSET_EXTENDED_CYCLES:
568 		return ptp_sys_offset_extended(ptp, argptr,
569 					       ptp->info->getcyclesx64);
570 	default:
571 		return -ENOTTY;
572 	}
573 }
574 
575 __poll_t ptp_poll(struct posix_clock_context *pccontext, struct file *fp,
576 		  poll_table *wait)
577 {
578 	struct ptp_clock *ptp =
579 		container_of(pccontext->clk, struct ptp_clock, clock);
580 	struct timestamp_event_queue *queue;
581 
582 	queue = pccontext->private_clkdata;
583 	if (!queue)
584 		return EPOLLERR;
585 
586 	poll_wait(fp, &ptp->tsev_wq, wait);
587 
588 	return queue_cnt(queue) ? EPOLLIN : 0;
589 }
590 
591 #define EXTTS_BUFSIZE (PTP_BUF_TIMESTAMPS * sizeof(struct ptp_extts_event))
592 
593 ssize_t ptp_read(struct posix_clock_context *pccontext, uint rdflags,
594 		 char __user *buf, size_t cnt)
595 {
596 	struct ptp_clock *ptp =	container_of(pccontext->clk, struct ptp_clock, clock);
597 	struct timestamp_event_queue *queue;
598 	struct ptp_extts_event *event;
599 	ssize_t result;
600 
601 	queue = pccontext->private_clkdata;
602 	if (!queue)
603 		return -EINVAL;
604 
605 	if (cnt % sizeof(*event) != 0)
606 		return -EINVAL;
607 
608 	if (cnt > EXTTS_BUFSIZE)
609 		cnt = EXTTS_BUFSIZE;
610 
611 	if (wait_event_interruptible(ptp->tsev_wq, ptp->defunct || queue_cnt(queue)))
612 		return -ERESTARTSYS;
613 
614 	if (ptp->defunct)
615 		return -ENODEV;
616 
617 	event = kmalloc(EXTTS_BUFSIZE, GFP_KERNEL);
618 	if (!event)
619 		return -ENOMEM;
620 
621 	scoped_guard(spinlock_irq, &queue->lock) {
622 		size_t qcnt = min((size_t)queue_cnt(queue), cnt / sizeof(*event));
623 
624 		for (size_t i = 0; i < qcnt; i++) {
625 			event[i] = queue->buf[queue->head];
626 			/* Paired with READ_ONCE() in queue_cnt() */
627 			WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
628 		}
629 		cnt = qcnt * sizeof(*event);
630 	}
631 
632 	result = cnt;
633 	if (copy_to_user(buf, event, cnt))
634 		result = -EFAULT;
635 
636 	kfree(event);
637 	return result;
638 }
639