xref: /linux/drivers/ptp/ptp_clock.c (revision ba0ad6ed89fd5dada3b7b65ef2b08e95d449d4ab)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * PTP 1588 clock support
4  *
5  * Copyright (C) 2010 OMICRON electronics GmbH
6  */
7 #include <linux/idr.h>
8 #include <linux/device.h>
9 #include <linux/err.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/posix-clock.h>
14 #include <linux/pps_kernel.h>
15 #include <linux/slab.h>
16 #include <linux/syscalls.h>
17 #include <linux/uaccess.h>
18 #include <uapi/linux/sched/types.h>
19 
20 #include "ptp_private.h"
21 
22 #define PTP_MAX_ALARMS 4
23 #define PTP_PPS_DEFAULTS (PPS_CAPTUREASSERT | PPS_OFFSETASSERT)
24 #define PTP_PPS_EVENT PPS_CAPTUREASSERT
25 #define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
26 
27 struct class *ptp_class;
28 
29 /* private globals */
30 
31 static dev_t ptp_devt;
32 
33 static DEFINE_IDA(ptp_clocks_map);
34 
35 /* time stamp event queue operations */
36 
37 static inline int queue_free(struct timestamp_event_queue *q)
38 {
39 	return PTP_MAX_TIMESTAMPS - queue_cnt(q) - 1;
40 }
41 
42 static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
43 				       struct ptp_clock_event *src)
44 {
45 	struct ptp_extts_event *dst;
46 	unsigned long flags;
47 	s64 seconds;
48 	u32 remainder;
49 
50 	seconds = div_u64_rem(src->timestamp, 1000000000, &remainder);
51 
52 	spin_lock_irqsave(&queue->lock, flags);
53 
54 	dst = &queue->buf[queue->tail];
55 	dst->index = src->index;
56 	dst->t.sec = seconds;
57 	dst->t.nsec = remainder;
58 
59 	if (!queue_free(queue))
60 		queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
61 
62 	queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS;
63 
64 	spin_unlock_irqrestore(&queue->lock, flags);
65 }
66 
67 /* posix clock implementation */
68 
69 static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp)
70 {
71 	tp->tv_sec = 0;
72 	tp->tv_nsec = 1;
73 	return 0;
74 }
75 
76 static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp)
77 {
78 	struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
79 
80 	if (ptp_clock_freerun(ptp)) {
81 		pr_err("ptp: physical clock is free running\n");
82 		return -EBUSY;
83 	}
84 
85 	return  ptp->info->settime64(ptp->info, tp);
86 }
87 
88 static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp)
89 {
90 	struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
91 	int err;
92 
93 	if (ptp->info->gettimex64)
94 		err = ptp->info->gettimex64(ptp->info, tp, NULL);
95 	else
96 		err = ptp->info->gettime64(ptp->info, tp);
97 	return err;
98 }
99 
100 static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
101 {
102 	struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
103 	struct ptp_clock_info *ops;
104 	int err = -EOPNOTSUPP;
105 
106 	if (ptp_clock_freerun(ptp)) {
107 		pr_err("ptp: physical clock is free running\n");
108 		return -EBUSY;
109 	}
110 
111 	ops = ptp->info;
112 
113 	if (tx->modes & ADJ_SETOFFSET) {
114 		struct timespec64 ts;
115 		ktime_t kt;
116 		s64 delta;
117 
118 		ts.tv_sec  = tx->time.tv_sec;
119 		ts.tv_nsec = tx->time.tv_usec;
120 
121 		if (!(tx->modes & ADJ_NANO))
122 			ts.tv_nsec *= 1000;
123 
124 		if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC)
125 			return -EINVAL;
126 
127 		kt = timespec64_to_ktime(ts);
128 		delta = ktime_to_ns(kt);
129 		err = ops->adjtime(ops, delta);
130 	} else if (tx->modes & ADJ_FREQUENCY) {
131 		long ppb = scaled_ppm_to_ppb(tx->freq);
132 		if (ppb > ops->max_adj || ppb < -ops->max_adj)
133 			return -ERANGE;
134 		err = ops->adjfine(ops, tx->freq);
135 		ptp->dialed_frequency = tx->freq;
136 	} else if (tx->modes & ADJ_OFFSET) {
137 		if (ops->adjphase) {
138 			s32 offset = tx->offset;
139 
140 			if (!(tx->modes & ADJ_NANO))
141 				offset *= NSEC_PER_USEC;
142 
143 			err = ops->adjphase(ops, offset);
144 		}
145 	} else if (tx->modes == 0) {
146 		tx->freq = ptp->dialed_frequency;
147 		err = 0;
148 	}
149 
150 	return err;
151 }
152 
153 static struct posix_clock_operations ptp_clock_ops = {
154 	.owner		= THIS_MODULE,
155 	.clock_adjtime	= ptp_clock_adjtime,
156 	.clock_gettime	= ptp_clock_gettime,
157 	.clock_getres	= ptp_clock_getres,
158 	.clock_settime	= ptp_clock_settime,
159 	.ioctl		= ptp_ioctl,
160 	.open		= ptp_open,
161 	.poll		= ptp_poll,
162 	.read		= ptp_read,
163 };
164 
165 static void ptp_clock_release(struct device *dev)
166 {
167 	struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
168 
169 	ptp_cleanup_pin_groups(ptp);
170 	kfree(ptp->vclock_index);
171 	mutex_destroy(&ptp->tsevq_mux);
172 	mutex_destroy(&ptp->pincfg_mux);
173 	mutex_destroy(&ptp->n_vclocks_mux);
174 	ida_free(&ptp_clocks_map, ptp->index);
175 	kfree(ptp);
176 }
177 
178 static int ptp_getcycles64(struct ptp_clock_info *info, struct timespec64 *ts)
179 {
180 	if (info->getcyclesx64)
181 		return info->getcyclesx64(info, ts, NULL);
182 	else
183 		return info->gettime64(info, ts);
184 }
185 
186 static void ptp_aux_kworker(struct kthread_work *work)
187 {
188 	struct ptp_clock *ptp = container_of(work, struct ptp_clock,
189 					     aux_work.work);
190 	struct ptp_clock_info *info = ptp->info;
191 	long delay;
192 
193 	delay = info->do_aux_work(info);
194 
195 	if (delay >= 0)
196 		kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
197 }
198 
199 /* public interface */
200 
201 struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
202 				     struct device *parent)
203 {
204 	struct ptp_clock *ptp;
205 	int err = 0, index, major = MAJOR(ptp_devt);
206 	size_t size;
207 
208 	if (info->n_alarm > PTP_MAX_ALARMS)
209 		return ERR_PTR(-EINVAL);
210 
211 	/* Initialize a clock structure. */
212 	err = -ENOMEM;
213 	ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL);
214 	if (ptp == NULL)
215 		goto no_memory;
216 
217 	index = ida_alloc_max(&ptp_clocks_map, MINORMASK, GFP_KERNEL);
218 	if (index < 0) {
219 		err = index;
220 		goto no_slot;
221 	}
222 
223 	ptp->clock.ops = ptp_clock_ops;
224 	ptp->info = info;
225 	ptp->devid = MKDEV(major, index);
226 	ptp->index = index;
227 	spin_lock_init(&ptp->tsevq.lock);
228 	mutex_init(&ptp->tsevq_mux);
229 	mutex_init(&ptp->pincfg_mux);
230 	mutex_init(&ptp->n_vclocks_mux);
231 	init_waitqueue_head(&ptp->tsev_wq);
232 
233 	if (ptp->info->getcycles64 || ptp->info->getcyclesx64) {
234 		ptp->has_cycles = true;
235 		if (!ptp->info->getcycles64 && ptp->info->getcyclesx64)
236 			ptp->info->getcycles64 = ptp_getcycles64;
237 	} else {
238 		/* Free running cycle counter not supported, use time. */
239 		ptp->info->getcycles64 = ptp_getcycles64;
240 
241 		if (ptp->info->gettimex64)
242 			ptp->info->getcyclesx64 = ptp->info->gettimex64;
243 
244 		if (ptp->info->getcrosststamp)
245 			ptp->info->getcrosscycles = ptp->info->getcrosststamp;
246 	}
247 
248 	if (ptp->info->do_aux_work) {
249 		kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
250 		ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index);
251 		if (IS_ERR(ptp->kworker)) {
252 			err = PTR_ERR(ptp->kworker);
253 			pr_err("failed to create ptp aux_worker %d\n", err);
254 			goto kworker_err;
255 		}
256 	}
257 
258 	/* PTP virtual clock is being registered under physical clock */
259 	if (parent && parent->class && parent->class->name &&
260 	    strcmp(parent->class->name, "ptp") == 0)
261 		ptp->is_virtual_clock = true;
262 
263 	if (!ptp->is_virtual_clock) {
264 		ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS;
265 
266 		size = sizeof(int) * ptp->max_vclocks;
267 		ptp->vclock_index = kzalloc(size, GFP_KERNEL);
268 		if (!ptp->vclock_index) {
269 			err = -ENOMEM;
270 			goto no_mem_for_vclocks;
271 		}
272 	}
273 
274 	err = ptp_populate_pin_groups(ptp);
275 	if (err)
276 		goto no_pin_groups;
277 
278 	/* Register a new PPS source. */
279 	if (info->pps) {
280 		struct pps_source_info pps;
281 		memset(&pps, 0, sizeof(pps));
282 		snprintf(pps.name, PPS_MAX_NAME_LEN, "ptp%d", index);
283 		pps.mode = PTP_PPS_MODE;
284 		pps.owner = info->owner;
285 		ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS);
286 		if (IS_ERR(ptp->pps_source)) {
287 			err = PTR_ERR(ptp->pps_source);
288 			pr_err("failed to register pps source\n");
289 			goto no_pps;
290 		}
291 		ptp->pps_source->lookup_cookie = ptp;
292 	}
293 
294 	/* Initialize a new device of our class in our clock structure. */
295 	device_initialize(&ptp->dev);
296 	ptp->dev.devt = ptp->devid;
297 	ptp->dev.class = ptp_class;
298 	ptp->dev.parent = parent;
299 	ptp->dev.groups = ptp->pin_attr_groups;
300 	ptp->dev.release = ptp_clock_release;
301 	dev_set_drvdata(&ptp->dev, ptp);
302 	dev_set_name(&ptp->dev, "ptp%d", ptp->index);
303 
304 	/* Create a posix clock and link it to the device. */
305 	err = posix_clock_register(&ptp->clock, &ptp->dev);
306 	if (err) {
307 		if (ptp->pps_source)
308 			pps_unregister_source(ptp->pps_source);
309 
310 		if (ptp->kworker)
311 			kthread_destroy_worker(ptp->kworker);
312 
313 		put_device(&ptp->dev);
314 
315 		pr_err("failed to create posix clock\n");
316 		return ERR_PTR(err);
317 	}
318 
319 	return ptp;
320 
321 no_pps:
322 	ptp_cleanup_pin_groups(ptp);
323 no_pin_groups:
324 	kfree(ptp->vclock_index);
325 no_mem_for_vclocks:
326 	if (ptp->kworker)
327 		kthread_destroy_worker(ptp->kworker);
328 kworker_err:
329 	mutex_destroy(&ptp->tsevq_mux);
330 	mutex_destroy(&ptp->pincfg_mux);
331 	mutex_destroy(&ptp->n_vclocks_mux);
332 	ida_free(&ptp_clocks_map, index);
333 no_slot:
334 	kfree(ptp);
335 no_memory:
336 	return ERR_PTR(err);
337 }
338 EXPORT_SYMBOL(ptp_clock_register);
339 
340 static int unregister_vclock(struct device *dev, void *data)
341 {
342 	struct ptp_clock *ptp = dev_get_drvdata(dev);
343 
344 	ptp_vclock_unregister(info_to_vclock(ptp->info));
345 	return 0;
346 }
347 
348 int ptp_clock_unregister(struct ptp_clock *ptp)
349 {
350 	if (ptp_vclock_in_use(ptp)) {
351 		device_for_each_child(&ptp->dev, NULL, unregister_vclock);
352 	}
353 
354 	ptp->defunct = 1;
355 	wake_up_interruptible(&ptp->tsev_wq);
356 
357 	if (ptp->kworker) {
358 		kthread_cancel_delayed_work_sync(&ptp->aux_work);
359 		kthread_destroy_worker(ptp->kworker);
360 	}
361 
362 	/* Release the clock's resources. */
363 	if (ptp->pps_source)
364 		pps_unregister_source(ptp->pps_source);
365 
366 	posix_clock_unregister(&ptp->clock);
367 
368 	return 0;
369 }
370 EXPORT_SYMBOL(ptp_clock_unregister);
371 
372 void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
373 {
374 	struct pps_event_time evt;
375 
376 	switch (event->type) {
377 
378 	case PTP_CLOCK_ALARM:
379 		break;
380 
381 	case PTP_CLOCK_EXTTS:
382 		enqueue_external_timestamp(&ptp->tsevq, event);
383 		wake_up_interruptible(&ptp->tsev_wq);
384 		break;
385 
386 	case PTP_CLOCK_PPS:
387 		pps_get_ts(&evt);
388 		pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL);
389 		break;
390 
391 	case PTP_CLOCK_PPSUSR:
392 		pps_event(ptp->pps_source, &event->pps_times,
393 			  PTP_PPS_EVENT, NULL);
394 		break;
395 	}
396 }
397 EXPORT_SYMBOL(ptp_clock_event);
398 
399 int ptp_clock_index(struct ptp_clock *ptp)
400 {
401 	return ptp->index;
402 }
403 EXPORT_SYMBOL(ptp_clock_index);
404 
405 int ptp_find_pin(struct ptp_clock *ptp,
406 		 enum ptp_pin_function func, unsigned int chan)
407 {
408 	struct ptp_pin_desc *pin = NULL;
409 	int i;
410 
411 	for (i = 0; i < ptp->info->n_pins; i++) {
412 		if (ptp->info->pin_config[i].func == func &&
413 		    ptp->info->pin_config[i].chan == chan) {
414 			pin = &ptp->info->pin_config[i];
415 			break;
416 		}
417 	}
418 
419 	return pin ? i : -1;
420 }
421 EXPORT_SYMBOL(ptp_find_pin);
422 
423 int ptp_find_pin_unlocked(struct ptp_clock *ptp,
424 			  enum ptp_pin_function func, unsigned int chan)
425 {
426 	int result;
427 
428 	mutex_lock(&ptp->pincfg_mux);
429 
430 	result = ptp_find_pin(ptp, func, chan);
431 
432 	mutex_unlock(&ptp->pincfg_mux);
433 
434 	return result;
435 }
436 EXPORT_SYMBOL(ptp_find_pin_unlocked);
437 
438 int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
439 {
440 	return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
441 }
442 EXPORT_SYMBOL(ptp_schedule_worker);
443 
444 void ptp_cancel_worker_sync(struct ptp_clock *ptp)
445 {
446 	kthread_cancel_delayed_work_sync(&ptp->aux_work);
447 }
448 EXPORT_SYMBOL(ptp_cancel_worker_sync);
449 
450 /* module operations */
451 
452 static void __exit ptp_exit(void)
453 {
454 	class_destroy(ptp_class);
455 	unregister_chrdev_region(ptp_devt, MINORMASK + 1);
456 	ida_destroy(&ptp_clocks_map);
457 }
458 
459 static int __init ptp_init(void)
460 {
461 	int err;
462 
463 	ptp_class = class_create("ptp");
464 	if (IS_ERR(ptp_class)) {
465 		pr_err("ptp: failed to allocate class\n");
466 		return PTR_ERR(ptp_class);
467 	}
468 
469 	err = alloc_chrdev_region(&ptp_devt, 0, MINORMASK + 1, "ptp");
470 	if (err < 0) {
471 		pr_err("ptp: failed to allocate device region\n");
472 		goto no_region;
473 	}
474 
475 	ptp_class->dev_groups = ptp_groups;
476 	pr_info("PTP clock support registered\n");
477 	return 0;
478 
479 no_region:
480 	class_destroy(ptp_class);
481 	return err;
482 }
483 
484 subsys_initcall(ptp_init);
485 module_exit(ptp_exit);
486 
487 MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
488 MODULE_DESCRIPTION("PTP clocks support");
489 MODULE_LICENSE("GPL");
490