1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * PTP 1588 clock support 4 * 5 * Copyright (C) 2010 OMICRON electronics GmbH 6 */ 7 #include <linux/idr.h> 8 #include <linux/device.h> 9 #include <linux/err.h> 10 #include <linux/init.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/posix-clock.h> 14 #include <linux/pps_kernel.h> 15 #include <linux/slab.h> 16 #include <linux/syscalls.h> 17 #include <linux/uaccess.h> 18 #include <linux/debugfs.h> 19 #include <uapi/linux/sched/types.h> 20 21 #include "ptp_private.h" 22 23 #define PTP_MAX_ALARMS 4 24 #define PTP_PPS_DEFAULTS (PPS_CAPTUREASSERT | PPS_OFFSETASSERT) 25 #define PTP_PPS_EVENT PPS_CAPTUREASSERT 26 #define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC) 27 28 const struct class ptp_class = { 29 .name = "ptp", 30 .dev_groups = ptp_groups 31 }; 32 33 /* private globals */ 34 35 static dev_t ptp_devt; 36 37 static DEFINE_IDA(ptp_clocks_map); 38 39 /* time stamp event queue operations */ 40 41 static inline int queue_free(struct timestamp_event_queue *q) 42 { 43 return PTP_MAX_TIMESTAMPS - queue_cnt(q) - 1; 44 } 45 46 static void enqueue_external_timestamp(struct timestamp_event_queue *queue, 47 struct ptp_clock_event *src) 48 { 49 struct ptp_extts_event *dst; 50 struct timespec64 offset_ts; 51 unsigned long flags; 52 s64 seconds; 53 u32 remainder; 54 55 if (src->type == PTP_CLOCK_EXTTS) { 56 seconds = div_u64_rem(src->timestamp, 1000000000, &remainder); 57 } else if (src->type == PTP_CLOCK_EXTOFF) { 58 offset_ts = ns_to_timespec64(src->offset); 59 seconds = offset_ts.tv_sec; 60 remainder = offset_ts.tv_nsec; 61 } else { 62 WARN(1, "%s: unknown type %d\n", __func__, src->type); 63 return; 64 } 65 66 spin_lock_irqsave(&queue->lock, flags); 67 68 dst = &queue->buf[queue->tail]; 69 dst->index = src->index; 70 dst->flags = PTP_EXTTS_EVENT_VALID; 71 dst->t.sec = seconds; 72 dst->t.nsec = remainder; 73 if (src->type == PTP_CLOCK_EXTOFF) 74 dst->flags |= PTP_EXT_OFFSET; 75 76 /* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */ 77 if (!queue_free(queue)) 78 WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS); 79 80 WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS); 81 82 spin_unlock_irqrestore(&queue->lock, flags); 83 } 84 85 /* posix clock implementation */ 86 87 static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp) 88 { 89 tp->tv_sec = 0; 90 tp->tv_nsec = 1; 91 return 0; 92 } 93 94 static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp) 95 { 96 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); 97 98 if (ptp_clock_freerun(ptp)) { 99 pr_err("ptp: physical clock is free running\n"); 100 return -EBUSY; 101 } 102 103 return ptp->info->settime64(ptp->info, tp); 104 } 105 106 static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp) 107 { 108 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); 109 int err; 110 111 if (ptp->info->gettimex64) 112 err = ptp->info->gettimex64(ptp->info, tp, NULL); 113 else 114 err = ptp->info->gettime64(ptp->info, tp); 115 return err; 116 } 117 118 static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx) 119 { 120 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); 121 struct ptp_clock_info *ops; 122 int err = -EOPNOTSUPP; 123 124 if (ptp_clock_freerun(ptp)) { 125 pr_err("ptp: physical clock is free running\n"); 126 return -EBUSY; 127 } 128 129 ops = ptp->info; 130 131 if (tx->modes & ADJ_SETOFFSET) { 132 struct timespec64 ts; 133 ktime_t kt; 134 s64 delta; 135 136 ts.tv_sec = tx->time.tv_sec; 137 ts.tv_nsec = tx->time.tv_usec; 138 139 if (!(tx->modes & ADJ_NANO)) 140 ts.tv_nsec *= 1000; 141 142 if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC) 143 return -EINVAL; 144 145 kt = timespec64_to_ktime(ts); 146 delta = ktime_to_ns(kt); 147 err = ops->adjtime(ops, delta); 148 } else if (tx->modes & ADJ_FREQUENCY) { 149 long ppb = scaled_ppm_to_ppb(tx->freq); 150 if (ppb > ops->max_adj || ppb < -ops->max_adj) 151 return -ERANGE; 152 err = ops->adjfine(ops, tx->freq); 153 ptp->dialed_frequency = tx->freq; 154 } else if (tx->modes & ADJ_OFFSET) { 155 if (ops->adjphase) { 156 s32 max_phase_adj = ops->getmaxphase(ops); 157 s32 offset = tx->offset; 158 159 if (!(tx->modes & ADJ_NANO)) 160 offset *= NSEC_PER_USEC; 161 162 if (offset > max_phase_adj || offset < -max_phase_adj) 163 return -ERANGE; 164 165 err = ops->adjphase(ops, offset); 166 } 167 } else if (tx->modes == 0) { 168 tx->freq = ptp->dialed_frequency; 169 err = 0; 170 } 171 172 return err; 173 } 174 175 static struct posix_clock_operations ptp_clock_ops = { 176 .owner = THIS_MODULE, 177 .clock_adjtime = ptp_clock_adjtime, 178 .clock_gettime = ptp_clock_gettime, 179 .clock_getres = ptp_clock_getres, 180 .clock_settime = ptp_clock_settime, 181 .ioctl = ptp_ioctl, 182 .open = ptp_open, 183 .release = ptp_release, 184 .poll = ptp_poll, 185 .read = ptp_read, 186 }; 187 188 static void ptp_clock_release(struct device *dev) 189 { 190 struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev); 191 struct timestamp_event_queue *tsevq; 192 unsigned long flags; 193 194 ptp_cleanup_pin_groups(ptp); 195 kfree(ptp->vclock_index); 196 mutex_destroy(&ptp->pincfg_mux); 197 mutex_destroy(&ptp->n_vclocks_mux); 198 /* Delete first entry */ 199 spin_lock_irqsave(&ptp->tsevqs_lock, flags); 200 tsevq = list_first_entry(&ptp->tsevqs, struct timestamp_event_queue, 201 qlist); 202 list_del(&tsevq->qlist); 203 spin_unlock_irqrestore(&ptp->tsevqs_lock, flags); 204 bitmap_free(tsevq->mask); 205 kfree(tsevq); 206 debugfs_remove(ptp->debugfs_root); 207 ida_free(&ptp_clocks_map, ptp->index); 208 kfree(ptp); 209 } 210 211 static int ptp_getcycles64(struct ptp_clock_info *info, struct timespec64 *ts) 212 { 213 if (info->getcyclesx64) 214 return info->getcyclesx64(info, ts, NULL); 215 else 216 return info->gettime64(info, ts); 217 } 218 219 static void ptp_aux_kworker(struct kthread_work *work) 220 { 221 struct ptp_clock *ptp = container_of(work, struct ptp_clock, 222 aux_work.work); 223 struct ptp_clock_info *info = ptp->info; 224 long delay; 225 226 delay = info->do_aux_work(info); 227 228 if (delay >= 0) 229 kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay); 230 } 231 232 /* public interface */ 233 234 struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, 235 struct device *parent) 236 { 237 struct ptp_clock *ptp; 238 struct timestamp_event_queue *queue = NULL; 239 int err = 0, index, major = MAJOR(ptp_devt); 240 char debugfsname[16]; 241 size_t size; 242 243 if (info->n_alarm > PTP_MAX_ALARMS) 244 return ERR_PTR(-EINVAL); 245 246 /* Initialize a clock structure. */ 247 err = -ENOMEM; 248 ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL); 249 if (ptp == NULL) 250 goto no_memory; 251 252 index = ida_alloc_max(&ptp_clocks_map, MINORMASK, GFP_KERNEL); 253 if (index < 0) { 254 err = index; 255 goto no_slot; 256 } 257 258 ptp->clock.ops = ptp_clock_ops; 259 ptp->info = info; 260 ptp->devid = MKDEV(major, index); 261 ptp->index = index; 262 INIT_LIST_HEAD(&ptp->tsevqs); 263 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 264 if (!queue) 265 goto no_memory_queue; 266 list_add_tail(&queue->qlist, &ptp->tsevqs); 267 spin_lock_init(&ptp->tsevqs_lock); 268 queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL); 269 if (!queue->mask) 270 goto no_memory_bitmap; 271 bitmap_set(queue->mask, 0, PTP_MAX_CHANNELS); 272 spin_lock_init(&queue->lock); 273 mutex_init(&ptp->pincfg_mux); 274 mutex_init(&ptp->n_vclocks_mux); 275 init_waitqueue_head(&ptp->tsev_wq); 276 277 if (ptp->info->getcycles64 || ptp->info->getcyclesx64) { 278 ptp->has_cycles = true; 279 if (!ptp->info->getcycles64 && ptp->info->getcyclesx64) 280 ptp->info->getcycles64 = ptp_getcycles64; 281 } else { 282 /* Free running cycle counter not supported, use time. */ 283 ptp->info->getcycles64 = ptp_getcycles64; 284 285 if (ptp->info->gettimex64) 286 ptp->info->getcyclesx64 = ptp->info->gettimex64; 287 288 if (ptp->info->getcrosststamp) 289 ptp->info->getcrosscycles = ptp->info->getcrosststamp; 290 } 291 292 if (ptp->info->do_aux_work) { 293 kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker); 294 ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index); 295 if (IS_ERR(ptp->kworker)) { 296 err = PTR_ERR(ptp->kworker); 297 pr_err("failed to create ptp aux_worker %d\n", err); 298 goto kworker_err; 299 } 300 } 301 302 /* PTP virtual clock is being registered under physical clock */ 303 if (parent && parent->class && parent->class->name && 304 strcmp(parent->class->name, "ptp") == 0) 305 ptp->is_virtual_clock = true; 306 307 if (!ptp->is_virtual_clock) { 308 ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS; 309 310 size = sizeof(int) * ptp->max_vclocks; 311 ptp->vclock_index = kzalloc(size, GFP_KERNEL); 312 if (!ptp->vclock_index) { 313 err = -ENOMEM; 314 goto no_mem_for_vclocks; 315 } 316 } 317 318 err = ptp_populate_pin_groups(ptp); 319 if (err) 320 goto no_pin_groups; 321 322 /* Register a new PPS source. */ 323 if (info->pps) { 324 struct pps_source_info pps; 325 memset(&pps, 0, sizeof(pps)); 326 snprintf(pps.name, PPS_MAX_NAME_LEN, "ptp%d", index); 327 pps.mode = PTP_PPS_MODE; 328 pps.owner = info->owner; 329 ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS); 330 if (IS_ERR(ptp->pps_source)) { 331 err = PTR_ERR(ptp->pps_source); 332 pr_err("failed to register pps source\n"); 333 goto no_pps; 334 } 335 ptp->pps_source->lookup_cookie = ptp; 336 } 337 338 /* Initialize a new device of our class in our clock structure. */ 339 device_initialize(&ptp->dev); 340 ptp->dev.devt = ptp->devid; 341 ptp->dev.class = &ptp_class; 342 ptp->dev.parent = parent; 343 ptp->dev.groups = ptp->pin_attr_groups; 344 ptp->dev.release = ptp_clock_release; 345 dev_set_drvdata(&ptp->dev, ptp); 346 dev_set_name(&ptp->dev, "ptp%d", ptp->index); 347 348 /* Create a posix clock and link it to the device. */ 349 err = posix_clock_register(&ptp->clock, &ptp->dev); 350 if (err) { 351 if (ptp->pps_source) 352 pps_unregister_source(ptp->pps_source); 353 354 if (ptp->kworker) 355 kthread_destroy_worker(ptp->kworker); 356 357 put_device(&ptp->dev); 358 359 pr_err("failed to create posix clock\n"); 360 return ERR_PTR(err); 361 } 362 363 /* Debugfs initialization */ 364 snprintf(debugfsname, sizeof(debugfsname), "ptp%d", ptp->index); 365 ptp->debugfs_root = debugfs_create_dir(debugfsname, NULL); 366 367 return ptp; 368 369 no_pps: 370 ptp_cleanup_pin_groups(ptp); 371 no_pin_groups: 372 kfree(ptp->vclock_index); 373 no_mem_for_vclocks: 374 if (ptp->kworker) 375 kthread_destroy_worker(ptp->kworker); 376 kworker_err: 377 mutex_destroy(&ptp->pincfg_mux); 378 mutex_destroy(&ptp->n_vclocks_mux); 379 bitmap_free(queue->mask); 380 no_memory_bitmap: 381 list_del(&queue->qlist); 382 kfree(queue); 383 no_memory_queue: 384 ida_free(&ptp_clocks_map, index); 385 no_slot: 386 kfree(ptp); 387 no_memory: 388 return ERR_PTR(err); 389 } 390 EXPORT_SYMBOL(ptp_clock_register); 391 392 static int unregister_vclock(struct device *dev, void *data) 393 { 394 struct ptp_clock *ptp = dev_get_drvdata(dev); 395 396 ptp_vclock_unregister(info_to_vclock(ptp->info)); 397 return 0; 398 } 399 400 int ptp_clock_unregister(struct ptp_clock *ptp) 401 { 402 if (ptp_vclock_in_use(ptp)) { 403 device_for_each_child(&ptp->dev, NULL, unregister_vclock); 404 } 405 406 ptp->defunct = 1; 407 wake_up_interruptible(&ptp->tsev_wq); 408 409 if (ptp->kworker) { 410 kthread_cancel_delayed_work_sync(&ptp->aux_work); 411 kthread_destroy_worker(ptp->kworker); 412 } 413 414 /* Release the clock's resources. */ 415 if (ptp->pps_source) 416 pps_unregister_source(ptp->pps_source); 417 418 posix_clock_unregister(&ptp->clock); 419 420 return 0; 421 } 422 EXPORT_SYMBOL(ptp_clock_unregister); 423 424 void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event) 425 { 426 struct timestamp_event_queue *tsevq; 427 struct pps_event_time evt; 428 unsigned long flags; 429 430 switch (event->type) { 431 432 case PTP_CLOCK_ALARM: 433 break; 434 435 case PTP_CLOCK_EXTTS: 436 case PTP_CLOCK_EXTOFF: 437 /* Enqueue timestamp on selected queues */ 438 spin_lock_irqsave(&ptp->tsevqs_lock, flags); 439 list_for_each_entry(tsevq, &ptp->tsevqs, qlist) { 440 if (test_bit((unsigned int)event->index, tsevq->mask)) 441 enqueue_external_timestamp(tsevq, event); 442 } 443 spin_unlock_irqrestore(&ptp->tsevqs_lock, flags); 444 wake_up_interruptible(&ptp->tsev_wq); 445 break; 446 447 case PTP_CLOCK_PPS: 448 pps_get_ts(&evt); 449 pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL); 450 break; 451 452 case PTP_CLOCK_PPSUSR: 453 pps_event(ptp->pps_source, &event->pps_times, 454 PTP_PPS_EVENT, NULL); 455 break; 456 } 457 } 458 EXPORT_SYMBOL(ptp_clock_event); 459 460 int ptp_clock_index(struct ptp_clock *ptp) 461 { 462 return ptp->index; 463 } 464 EXPORT_SYMBOL(ptp_clock_index); 465 466 int ptp_find_pin(struct ptp_clock *ptp, 467 enum ptp_pin_function func, unsigned int chan) 468 { 469 struct ptp_pin_desc *pin = NULL; 470 int i; 471 472 for (i = 0; i < ptp->info->n_pins; i++) { 473 if (ptp->info->pin_config[i].func == func && 474 ptp->info->pin_config[i].chan == chan) { 475 pin = &ptp->info->pin_config[i]; 476 break; 477 } 478 } 479 480 return pin ? i : -1; 481 } 482 EXPORT_SYMBOL(ptp_find_pin); 483 484 int ptp_find_pin_unlocked(struct ptp_clock *ptp, 485 enum ptp_pin_function func, unsigned int chan) 486 { 487 int result; 488 489 mutex_lock(&ptp->pincfg_mux); 490 491 result = ptp_find_pin(ptp, func, chan); 492 493 mutex_unlock(&ptp->pincfg_mux); 494 495 return result; 496 } 497 EXPORT_SYMBOL(ptp_find_pin_unlocked); 498 499 int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay) 500 { 501 return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay); 502 } 503 EXPORT_SYMBOL(ptp_schedule_worker); 504 505 void ptp_cancel_worker_sync(struct ptp_clock *ptp) 506 { 507 kthread_cancel_delayed_work_sync(&ptp->aux_work); 508 } 509 EXPORT_SYMBOL(ptp_cancel_worker_sync); 510 511 /* module operations */ 512 513 static void __exit ptp_exit(void) 514 { 515 class_unregister(&ptp_class); 516 unregister_chrdev_region(ptp_devt, MINORMASK + 1); 517 ida_destroy(&ptp_clocks_map); 518 } 519 520 static int __init ptp_init(void) 521 { 522 int err; 523 524 err = class_register(&ptp_class); 525 if (err) { 526 pr_err("ptp: failed to allocate class\n"); 527 return err; 528 } 529 530 err = alloc_chrdev_region(&ptp_devt, 0, MINORMASK + 1, "ptp"); 531 if (err < 0) { 532 pr_err("ptp: failed to allocate device region\n"); 533 goto no_region; 534 } 535 536 pr_info("PTP clock support registered\n"); 537 return 0; 538 539 no_region: 540 class_unregister(&ptp_class); 541 return err; 542 } 543 544 subsys_initcall(ptp_init); 545 module_exit(ptp_exit); 546 547 MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>"); 548 MODULE_DESCRIPTION("PTP clocks support"); 549 MODULE_LICENSE("GPL"); 550