xref: /linux/drivers/ptp/ptp_clock.c (revision 662fa3d6099374c4615bf64d06895e3573b935b2)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * PTP 1588 clock support
4  *
5  * Copyright (C) 2010 OMICRON electronics GmbH
6  */
7 #include <linux/idr.h>
8 #include <linux/device.h>
9 #include <linux/err.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/posix-clock.h>
14 #include <linux/pps_kernel.h>
15 #include <linux/slab.h>
16 #include <linux/syscalls.h>
17 #include <linux/uaccess.h>
18 #include <uapi/linux/sched/types.h>
19 
20 #include "ptp_private.h"
21 
22 #define PTP_MAX_ALARMS 4
23 #define PTP_PPS_DEFAULTS (PPS_CAPTUREASSERT | PPS_OFFSETASSERT)
24 #define PTP_PPS_EVENT PPS_CAPTUREASSERT
25 #define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
26 
27 /* private globals */
28 
29 static dev_t ptp_devt;
30 static struct class *ptp_class;
31 
32 static DEFINE_IDA(ptp_clocks_map);
33 
34 /* time stamp event queue operations */
35 
36 static inline int queue_free(struct timestamp_event_queue *q)
37 {
38 	return PTP_MAX_TIMESTAMPS - queue_cnt(q) - 1;
39 }
40 
41 static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
42 				       struct ptp_clock_event *src)
43 {
44 	struct ptp_extts_event *dst;
45 	unsigned long flags;
46 	s64 seconds;
47 	u32 remainder;
48 
49 	seconds = div_u64_rem(src->timestamp, 1000000000, &remainder);
50 
51 	spin_lock_irqsave(&queue->lock, flags);
52 
53 	dst = &queue->buf[queue->tail];
54 	dst->index = src->index;
55 	dst->t.sec = seconds;
56 	dst->t.nsec = remainder;
57 
58 	if (!queue_free(queue))
59 		queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
60 
61 	queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS;
62 
63 	spin_unlock_irqrestore(&queue->lock, flags);
64 }
65 
66 /* posix clock implementation */
67 
68 static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp)
69 {
70 	tp->tv_sec = 0;
71 	tp->tv_nsec = 1;
72 	return 0;
73 }
74 
75 static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp)
76 {
77 	struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
78 
79 	return  ptp->info->settime64(ptp->info, tp);
80 }
81 
82 static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp)
83 {
84 	struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
85 	int err;
86 
87 	if (ptp->info->gettimex64)
88 		err = ptp->info->gettimex64(ptp->info, tp, NULL);
89 	else
90 		err = ptp->info->gettime64(ptp->info, tp);
91 	return err;
92 }
93 
94 static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
95 {
96 	struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
97 	struct ptp_clock_info *ops;
98 	int err = -EOPNOTSUPP;
99 
100 	ops = ptp->info;
101 
102 	if (tx->modes & ADJ_SETOFFSET) {
103 		struct timespec64 ts;
104 		ktime_t kt;
105 		s64 delta;
106 
107 		ts.tv_sec  = tx->time.tv_sec;
108 		ts.tv_nsec = tx->time.tv_usec;
109 
110 		if (!(tx->modes & ADJ_NANO))
111 			ts.tv_nsec *= 1000;
112 
113 		if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC)
114 			return -EINVAL;
115 
116 		kt = timespec64_to_ktime(ts);
117 		delta = ktime_to_ns(kt);
118 		err = ops->adjtime(ops, delta);
119 	} else if (tx->modes & ADJ_FREQUENCY) {
120 		long ppb = scaled_ppm_to_ppb(tx->freq);
121 		if (ppb > ops->max_adj || ppb < -ops->max_adj)
122 			return -ERANGE;
123 		if (ops->adjfine)
124 			err = ops->adjfine(ops, tx->freq);
125 		else
126 			err = ops->adjfreq(ops, ppb);
127 		ptp->dialed_frequency = tx->freq;
128 	} else if (tx->modes & ADJ_OFFSET) {
129 		if (ops->adjphase) {
130 			s32 offset = tx->offset;
131 
132 			if (!(tx->modes & ADJ_NANO))
133 				offset *= NSEC_PER_USEC;
134 
135 			err = ops->adjphase(ops, offset);
136 		}
137 	} else if (tx->modes == 0) {
138 		tx->freq = ptp->dialed_frequency;
139 		err = 0;
140 	}
141 
142 	return err;
143 }
144 
145 static struct posix_clock_operations ptp_clock_ops = {
146 	.owner		= THIS_MODULE,
147 	.clock_adjtime	= ptp_clock_adjtime,
148 	.clock_gettime	= ptp_clock_gettime,
149 	.clock_getres	= ptp_clock_getres,
150 	.clock_settime	= ptp_clock_settime,
151 	.ioctl		= ptp_ioctl,
152 	.open		= ptp_open,
153 	.poll		= ptp_poll,
154 	.read		= ptp_read,
155 };
156 
157 static void ptp_clock_release(struct device *dev)
158 {
159 	struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
160 
161 	ptp_cleanup_pin_groups(ptp);
162 	mutex_destroy(&ptp->tsevq_mux);
163 	mutex_destroy(&ptp->pincfg_mux);
164 	ida_simple_remove(&ptp_clocks_map, ptp->index);
165 	kfree(ptp);
166 }
167 
168 static void ptp_aux_kworker(struct kthread_work *work)
169 {
170 	struct ptp_clock *ptp = container_of(work, struct ptp_clock,
171 					     aux_work.work);
172 	struct ptp_clock_info *info = ptp->info;
173 	long delay;
174 
175 	delay = info->do_aux_work(info);
176 
177 	if (delay >= 0)
178 		kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
179 }
180 
181 /* public interface */
182 
183 struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
184 				     struct device *parent)
185 {
186 	struct ptp_clock *ptp;
187 	int err = 0, index, major = MAJOR(ptp_devt);
188 
189 	if (info->n_alarm > PTP_MAX_ALARMS)
190 		return ERR_PTR(-EINVAL);
191 
192 	/* Initialize a clock structure. */
193 	err = -ENOMEM;
194 	ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL);
195 	if (ptp == NULL)
196 		goto no_memory;
197 
198 	index = ida_simple_get(&ptp_clocks_map, 0, MINORMASK + 1, GFP_KERNEL);
199 	if (index < 0) {
200 		err = index;
201 		goto no_slot;
202 	}
203 
204 	ptp->clock.ops = ptp_clock_ops;
205 	ptp->info = info;
206 	ptp->devid = MKDEV(major, index);
207 	ptp->index = index;
208 	spin_lock_init(&ptp->tsevq.lock);
209 	mutex_init(&ptp->tsevq_mux);
210 	mutex_init(&ptp->pincfg_mux);
211 	init_waitqueue_head(&ptp->tsev_wq);
212 
213 	if (ptp->info->do_aux_work) {
214 		kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
215 		ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index);
216 		if (IS_ERR(ptp->kworker)) {
217 			err = PTR_ERR(ptp->kworker);
218 			pr_err("failed to create ptp aux_worker %d\n", err);
219 			goto kworker_err;
220 		}
221 		ptp->pps_source->lookup_cookie = ptp;
222 	}
223 
224 	err = ptp_populate_pin_groups(ptp);
225 	if (err)
226 		goto no_pin_groups;
227 
228 	/* Register a new PPS source. */
229 	if (info->pps) {
230 		struct pps_source_info pps;
231 		memset(&pps, 0, sizeof(pps));
232 		snprintf(pps.name, PPS_MAX_NAME_LEN, "ptp%d", index);
233 		pps.mode = PTP_PPS_MODE;
234 		pps.owner = info->owner;
235 		ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS);
236 		if (IS_ERR(ptp->pps_source)) {
237 			err = PTR_ERR(ptp->pps_source);
238 			pr_err("failed to register pps source\n");
239 			goto no_pps;
240 		}
241 	}
242 
243 	/* Initialize a new device of our class in our clock structure. */
244 	device_initialize(&ptp->dev);
245 	ptp->dev.devt = ptp->devid;
246 	ptp->dev.class = ptp_class;
247 	ptp->dev.parent = parent;
248 	ptp->dev.groups = ptp->pin_attr_groups;
249 	ptp->dev.release = ptp_clock_release;
250 	dev_set_drvdata(&ptp->dev, ptp);
251 	dev_set_name(&ptp->dev, "ptp%d", ptp->index);
252 
253 	/* Create a posix clock and link it to the device. */
254 	err = posix_clock_register(&ptp->clock, &ptp->dev);
255 	if (err) {
256 		pr_err("failed to create posix clock\n");
257 		goto no_clock;
258 	}
259 
260 	return ptp;
261 
262 no_clock:
263 	if (ptp->pps_source)
264 		pps_unregister_source(ptp->pps_source);
265 no_pps:
266 	ptp_cleanup_pin_groups(ptp);
267 no_pin_groups:
268 	if (ptp->kworker)
269 		kthread_destroy_worker(ptp->kworker);
270 kworker_err:
271 	mutex_destroy(&ptp->tsevq_mux);
272 	mutex_destroy(&ptp->pincfg_mux);
273 	ida_simple_remove(&ptp_clocks_map, index);
274 no_slot:
275 	kfree(ptp);
276 no_memory:
277 	return ERR_PTR(err);
278 }
279 EXPORT_SYMBOL(ptp_clock_register);
280 
281 int ptp_clock_unregister(struct ptp_clock *ptp)
282 {
283 	ptp->defunct = 1;
284 	wake_up_interruptible(&ptp->tsev_wq);
285 
286 	if (ptp->kworker) {
287 		kthread_cancel_delayed_work_sync(&ptp->aux_work);
288 		kthread_destroy_worker(ptp->kworker);
289 	}
290 
291 	/* Release the clock's resources. */
292 	if (ptp->pps_source)
293 		pps_unregister_source(ptp->pps_source);
294 
295 	posix_clock_unregister(&ptp->clock);
296 
297 	return 0;
298 }
299 EXPORT_SYMBOL(ptp_clock_unregister);
300 
301 void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
302 {
303 	struct pps_event_time evt;
304 
305 	switch (event->type) {
306 
307 	case PTP_CLOCK_ALARM:
308 		break;
309 
310 	case PTP_CLOCK_EXTTS:
311 		enqueue_external_timestamp(&ptp->tsevq, event);
312 		wake_up_interruptible(&ptp->tsev_wq);
313 		break;
314 
315 	case PTP_CLOCK_PPS:
316 		pps_get_ts(&evt);
317 		pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL);
318 		break;
319 
320 	case PTP_CLOCK_PPSUSR:
321 		pps_event(ptp->pps_source, &event->pps_times,
322 			  PTP_PPS_EVENT, NULL);
323 		break;
324 	}
325 }
326 EXPORT_SYMBOL(ptp_clock_event);
327 
328 int ptp_clock_index(struct ptp_clock *ptp)
329 {
330 	return ptp->index;
331 }
332 EXPORT_SYMBOL(ptp_clock_index);
333 
334 int ptp_find_pin(struct ptp_clock *ptp,
335 		 enum ptp_pin_function func, unsigned int chan)
336 {
337 	struct ptp_pin_desc *pin = NULL;
338 	int i;
339 
340 	for (i = 0; i < ptp->info->n_pins; i++) {
341 		if (ptp->info->pin_config[i].func == func &&
342 		    ptp->info->pin_config[i].chan == chan) {
343 			pin = &ptp->info->pin_config[i];
344 			break;
345 		}
346 	}
347 
348 	return pin ? i : -1;
349 }
350 EXPORT_SYMBOL(ptp_find_pin);
351 
352 int ptp_find_pin_unlocked(struct ptp_clock *ptp,
353 			  enum ptp_pin_function func, unsigned int chan)
354 {
355 	int result;
356 
357 	mutex_lock(&ptp->pincfg_mux);
358 
359 	result = ptp_find_pin(ptp, func, chan);
360 
361 	mutex_unlock(&ptp->pincfg_mux);
362 
363 	return result;
364 }
365 EXPORT_SYMBOL(ptp_find_pin_unlocked);
366 
367 int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
368 {
369 	return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
370 }
371 EXPORT_SYMBOL(ptp_schedule_worker);
372 
373 void ptp_cancel_worker_sync(struct ptp_clock *ptp)
374 {
375 	kthread_cancel_delayed_work_sync(&ptp->aux_work);
376 }
377 EXPORT_SYMBOL(ptp_cancel_worker_sync);
378 
379 /* module operations */
380 
381 static void __exit ptp_exit(void)
382 {
383 	class_destroy(ptp_class);
384 	unregister_chrdev_region(ptp_devt, MINORMASK + 1);
385 	ida_destroy(&ptp_clocks_map);
386 }
387 
388 static int __init ptp_init(void)
389 {
390 	int err;
391 
392 	ptp_class = class_create(THIS_MODULE, "ptp");
393 	if (IS_ERR(ptp_class)) {
394 		pr_err("ptp: failed to allocate class\n");
395 		return PTR_ERR(ptp_class);
396 	}
397 
398 	err = alloc_chrdev_region(&ptp_devt, 0, MINORMASK + 1, "ptp");
399 	if (err < 0) {
400 		pr_err("ptp: failed to allocate device region\n");
401 		goto no_region;
402 	}
403 
404 	ptp_class->dev_groups = ptp_groups;
405 	pr_info("PTP clock support registered\n");
406 	return 0;
407 
408 no_region:
409 	class_destroy(ptp_class);
410 	return err;
411 }
412 
413 subsys_initcall(ptp_init);
414 module_exit(ptp_exit);
415 
416 MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
417 MODULE_DESCRIPTION("PTP clocks support");
418 MODULE_LICENSE("GPL");
419