1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * PTP 1588 clock support 4 * 5 * Copyright (C) 2010 OMICRON electronics GmbH 6 */ 7 #include <linux/device.h> 8 #include <linux/err.h> 9 #include <linux/init.h> 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/posix-clock.h> 13 #include <linux/pps_kernel.h> 14 #include <linux/property.h> 15 #include <linux/slab.h> 16 #include <linux/syscalls.h> 17 #include <linux/uaccess.h> 18 #include <linux/debugfs.h> 19 #include <linux/xarray.h> 20 #include <uapi/linux/sched/types.h> 21 22 #include "ptp_private.h" 23 24 #define PTP_MAX_ALARMS 4 25 #define PTP_PPS_DEFAULTS (PPS_CAPTUREASSERT | PPS_OFFSETASSERT) 26 #define PTP_PPS_EVENT PPS_CAPTUREASSERT 27 #define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC) 28 29 const struct class ptp_class = { 30 .name = "ptp", 31 .dev_groups = ptp_groups 32 }; 33 34 /* private globals */ 35 36 static dev_t ptp_devt; 37 38 static DEFINE_XARRAY_ALLOC(ptp_clocks_map); 39 40 /* time stamp event queue operations */ 41 42 static inline int queue_free(struct timestamp_event_queue *q) 43 { 44 return PTP_MAX_TIMESTAMPS - queue_cnt(q) - 1; 45 } 46 47 static void enqueue_external_timestamp(struct timestamp_event_queue *queue, 48 struct ptp_clock_event *src) 49 { 50 struct ptp_extts_event *dst; 51 struct timespec64 offset_ts; 52 unsigned long flags; 53 s64 seconds; 54 u32 remainder; 55 56 if (src->type == PTP_CLOCK_EXTTS) { 57 seconds = div_u64_rem(src->timestamp, 1000000000, &remainder); 58 } else if (src->type == PTP_CLOCK_EXTOFF) { 59 offset_ts = ns_to_timespec64(src->offset); 60 seconds = offset_ts.tv_sec; 61 remainder = offset_ts.tv_nsec; 62 } else { 63 WARN(1, "%s: unknown type %d\n", __func__, src->type); 64 return; 65 } 66 67 spin_lock_irqsave(&queue->lock, flags); 68 69 dst = &queue->buf[queue->tail]; 70 dst->index = src->index; 71 dst->flags = PTP_EXTTS_EVENT_VALID; 72 dst->t.sec = seconds; 73 dst->t.nsec = remainder; 74 if (src->type == PTP_CLOCK_EXTOFF) 75 dst->flags |= PTP_EXT_OFFSET; 76 77 /* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */ 78 if (!queue_free(queue)) 79 WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS); 80 81 WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS); 82 83 spin_unlock_irqrestore(&queue->lock, flags); 84 } 85 86 /* posix clock implementation */ 87 88 static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp) 89 { 90 tp->tv_sec = 0; 91 tp->tv_nsec = 1; 92 return 0; 93 } 94 95 static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp) 96 { 97 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); 98 99 if (ptp_clock_freerun(ptp)) { 100 pr_err_ratelimited("ptp: physical clock is free running\n"); 101 return -EBUSY; 102 } 103 104 if (!timespec64_valid_settod(tp)) 105 return -EINVAL; 106 107 return ptp->info->settime64(ptp->info, tp); 108 } 109 110 static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp) 111 { 112 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); 113 int err; 114 115 if (ptp->info->gettimex64) 116 err = ptp->info->gettimex64(ptp->info, tp, NULL); 117 else 118 err = ptp->info->gettime64(ptp->info, tp); 119 return err; 120 } 121 122 static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx) 123 { 124 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); 125 struct ptp_clock_info *ops; 126 int err = -EOPNOTSUPP; 127 128 if (tx->modes & (ADJ_SETOFFSET | ADJ_FREQUENCY | ADJ_OFFSET) && 129 ptp_clock_freerun(ptp)) { 130 pr_err("ptp: physical clock is free running\n"); 131 return -EBUSY; 132 } 133 134 ops = ptp->info; 135 136 if (tx->modes & ADJ_SETOFFSET) { 137 struct timespec64 ts, ts2; 138 ktime_t kt; 139 s64 delta; 140 141 ts.tv_sec = tx->time.tv_sec; 142 ts.tv_nsec = tx->time.tv_usec; 143 144 if (!(tx->modes & ADJ_NANO)) 145 ts.tv_nsec *= 1000; 146 147 if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC) 148 return -EINVAL; 149 150 /* Make sure the offset is valid */ 151 err = ptp_clock_gettime(pc, &ts2); 152 if (err) 153 return err; 154 ts2 = timespec64_add(ts2, ts); 155 if (!timespec64_valid_settod(&ts2)) 156 return -EINVAL; 157 158 kt = timespec64_to_ktime(ts); 159 delta = ktime_to_ns(kt); 160 err = ops->adjtime(ops, delta); 161 } else if (tx->modes & ADJ_FREQUENCY) { 162 long ppb = scaled_ppm_to_ppb(tx->freq); 163 if (ppb > ops->max_adj || ppb < -ops->max_adj) 164 return -ERANGE; 165 err = ops->adjfine(ops, tx->freq); 166 if (!err) 167 ptp->dialed_frequency = tx->freq; 168 } else if (tx->modes & ADJ_OFFSET) { 169 if (ops->adjphase) { 170 s32 max_phase_adj = ops->getmaxphase(ops); 171 s32 offset = tx->offset; 172 173 if (!(tx->modes & ADJ_NANO)) 174 offset *= NSEC_PER_USEC; 175 176 if (offset > max_phase_adj || offset < -max_phase_adj) 177 return -ERANGE; 178 179 err = ops->adjphase(ops, offset); 180 } 181 } else if (tx->modes == 0) { 182 tx->freq = ptp->dialed_frequency; 183 err = 0; 184 } 185 186 return err; 187 } 188 189 static struct posix_clock_operations ptp_clock_ops = { 190 .owner = THIS_MODULE, 191 .clock_adjtime = ptp_clock_adjtime, 192 .clock_gettime = ptp_clock_gettime, 193 .clock_getres = ptp_clock_getres, 194 .clock_settime = ptp_clock_settime, 195 .ioctl = ptp_ioctl, 196 .open = ptp_open, 197 .release = ptp_release, 198 .poll = ptp_poll, 199 .read = ptp_read, 200 }; 201 202 static void ptp_clock_release(struct device *dev) 203 { 204 struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev); 205 struct timestamp_event_queue *tsevq; 206 unsigned long flags; 207 208 ptp_cleanup_pin_groups(ptp); 209 kfree(ptp->vclock_index); 210 mutex_destroy(&ptp->pincfg_mux); 211 mutex_destroy(&ptp->n_vclocks_mux); 212 /* Delete first entry */ 213 spin_lock_irqsave(&ptp->tsevqs_lock, flags); 214 tsevq = list_first_entry(&ptp->tsevqs, struct timestamp_event_queue, 215 qlist); 216 list_del(&tsevq->qlist); 217 spin_unlock_irqrestore(&ptp->tsevqs_lock, flags); 218 bitmap_free(tsevq->mask); 219 kfree(tsevq); 220 debugfs_remove(ptp->debugfs_root); 221 xa_erase(&ptp_clocks_map, ptp->index); 222 kfree(ptp); 223 } 224 225 static int ptp_getcycles64(struct ptp_clock_info *info, struct timespec64 *ts) 226 { 227 if (info->getcyclesx64) 228 return info->getcyclesx64(info, ts, NULL); 229 else 230 return info->gettime64(info, ts); 231 } 232 233 static int ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on) 234 { 235 return -EOPNOTSUPP; 236 } 237 238 static void ptp_aux_kworker(struct kthread_work *work) 239 { 240 struct ptp_clock *ptp = container_of(work, struct ptp_clock, 241 aux_work.work); 242 struct ptp_clock_info *info = ptp->info; 243 long delay; 244 245 delay = info->do_aux_work(info); 246 247 if (delay >= 0) 248 kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay); 249 } 250 251 /* public interface */ 252 253 struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, 254 struct device *parent) 255 { 256 struct ptp_clock *ptp; 257 struct timestamp_event_queue *queue = NULL; 258 int err, index, major = MAJOR(ptp_devt); 259 char debugfsname[16]; 260 size_t size; 261 262 if (info->n_alarm > PTP_MAX_ALARMS) 263 return ERR_PTR(-EINVAL); 264 265 /* Initialize a clock structure. */ 266 ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL); 267 if (!ptp) { 268 err = -ENOMEM; 269 goto no_memory; 270 } 271 272 err = xa_alloc(&ptp_clocks_map, &index, ptp, xa_limit_31b, 273 GFP_KERNEL); 274 if (err) 275 goto no_slot; 276 277 ptp->clock.ops = ptp_clock_ops; 278 ptp->info = info; 279 ptp->devid = MKDEV(major, index); 280 ptp->index = index; 281 INIT_LIST_HEAD(&ptp->tsevqs); 282 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 283 if (!queue) { 284 err = -ENOMEM; 285 goto no_memory_queue; 286 } 287 list_add_tail(&queue->qlist, &ptp->tsevqs); 288 spin_lock_init(&ptp->tsevqs_lock); 289 queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL); 290 if (!queue->mask) { 291 err = -ENOMEM; 292 goto no_memory_bitmap; 293 } 294 bitmap_set(queue->mask, 0, PTP_MAX_CHANNELS); 295 spin_lock_init(&queue->lock); 296 mutex_init(&ptp->pincfg_mux); 297 mutex_init(&ptp->n_vclocks_mux); 298 init_waitqueue_head(&ptp->tsev_wq); 299 300 if (ptp->info->getcycles64 || ptp->info->getcyclesx64) { 301 ptp->has_cycles = true; 302 if (!ptp->info->getcycles64 && ptp->info->getcyclesx64) 303 ptp->info->getcycles64 = ptp_getcycles64; 304 } else { 305 /* Free running cycle counter not supported, use time. */ 306 ptp->info->getcycles64 = ptp_getcycles64; 307 308 if (ptp->info->gettimex64) 309 ptp->info->getcyclesx64 = ptp->info->gettimex64; 310 311 if (ptp->info->getcrosststamp) 312 ptp->info->getcrosscycles = ptp->info->getcrosststamp; 313 } 314 315 if (!ptp->info->enable) 316 ptp->info->enable = ptp_enable; 317 318 if (ptp->info->do_aux_work) { 319 kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker); 320 ptp->kworker = kthread_run_worker(0, "ptp%d", ptp->index); 321 if (IS_ERR(ptp->kworker)) { 322 err = PTR_ERR(ptp->kworker); 323 pr_err("failed to create ptp aux_worker %d\n", err); 324 goto kworker_err; 325 } 326 } 327 328 /* PTP virtual clock is being registered under physical clock */ 329 if (parent && parent->class && parent->class->name && 330 strcmp(parent->class->name, "ptp") == 0) 331 ptp->is_virtual_clock = true; 332 333 if (!ptp->is_virtual_clock) { 334 ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS; 335 336 size = sizeof(int) * ptp->max_vclocks; 337 ptp->vclock_index = kzalloc(size, GFP_KERNEL); 338 if (!ptp->vclock_index) { 339 err = -ENOMEM; 340 goto no_mem_for_vclocks; 341 } 342 } 343 344 err = ptp_populate_pin_groups(ptp); 345 if (err) 346 goto no_pin_groups; 347 348 /* Register a new PPS source. */ 349 if (info->pps) { 350 struct pps_source_info pps; 351 memset(&pps, 0, sizeof(pps)); 352 snprintf(pps.name, PPS_MAX_NAME_LEN, "ptp%d", index); 353 pps.mode = PTP_PPS_MODE; 354 pps.owner = info->owner; 355 ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS); 356 if (IS_ERR(ptp->pps_source)) { 357 err = PTR_ERR(ptp->pps_source); 358 pr_err("failed to register pps source\n"); 359 goto no_pps; 360 } 361 ptp->pps_source->lookup_cookie = ptp; 362 } 363 364 /* Initialize a new device of our class in our clock structure. */ 365 device_initialize(&ptp->dev); 366 ptp->dev.devt = ptp->devid; 367 ptp->dev.class = &ptp_class; 368 ptp->dev.parent = parent; 369 ptp->dev.groups = ptp->pin_attr_groups; 370 ptp->dev.release = ptp_clock_release; 371 dev_set_drvdata(&ptp->dev, ptp); 372 dev_set_name(&ptp->dev, "ptp%d", ptp->index); 373 374 /* Create a posix clock and link it to the device. */ 375 err = posix_clock_register(&ptp->clock, &ptp->dev); 376 if (err) { 377 if (ptp->pps_source) 378 pps_unregister_source(ptp->pps_source); 379 380 if (ptp->kworker) 381 kthread_destroy_worker(ptp->kworker); 382 383 put_device(&ptp->dev); 384 385 pr_err("failed to create posix clock\n"); 386 return ERR_PTR(err); 387 } 388 389 /* Debugfs initialization */ 390 snprintf(debugfsname, sizeof(debugfsname), "ptp%d", ptp->index); 391 ptp->debugfs_root = debugfs_create_dir(debugfsname, NULL); 392 393 return ptp; 394 395 no_pps: 396 ptp_cleanup_pin_groups(ptp); 397 no_pin_groups: 398 kfree(ptp->vclock_index); 399 no_mem_for_vclocks: 400 if (ptp->kworker) 401 kthread_destroy_worker(ptp->kworker); 402 kworker_err: 403 mutex_destroy(&ptp->pincfg_mux); 404 mutex_destroy(&ptp->n_vclocks_mux); 405 bitmap_free(queue->mask); 406 no_memory_bitmap: 407 list_del(&queue->qlist); 408 kfree(queue); 409 no_memory_queue: 410 xa_erase(&ptp_clocks_map, index); 411 no_slot: 412 kfree(ptp); 413 no_memory: 414 return ERR_PTR(err); 415 } 416 EXPORT_SYMBOL(ptp_clock_register); 417 418 static int unregister_vclock(struct device *dev, void *data) 419 { 420 struct ptp_clock *ptp = dev_get_drvdata(dev); 421 422 ptp_vclock_unregister(info_to_vclock(ptp->info)); 423 return 0; 424 } 425 426 int ptp_clock_unregister(struct ptp_clock *ptp) 427 { 428 if (ptp_vclock_in_use(ptp)) { 429 device_for_each_child(&ptp->dev, NULL, unregister_vclock); 430 } 431 432 ptp->defunct = 1; 433 wake_up_interruptible(&ptp->tsev_wq); 434 435 if (ptp->kworker) { 436 kthread_cancel_delayed_work_sync(&ptp->aux_work); 437 kthread_destroy_worker(ptp->kworker); 438 } 439 440 /* Release the clock's resources. */ 441 if (ptp->pps_source) 442 pps_unregister_source(ptp->pps_source); 443 444 posix_clock_unregister(&ptp->clock); 445 446 return 0; 447 } 448 EXPORT_SYMBOL(ptp_clock_unregister); 449 450 void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event) 451 { 452 struct timestamp_event_queue *tsevq; 453 struct pps_event_time evt; 454 unsigned long flags; 455 456 switch (event->type) { 457 458 case PTP_CLOCK_ALARM: 459 break; 460 461 case PTP_CLOCK_EXTTS: 462 case PTP_CLOCK_EXTOFF: 463 /* Enqueue timestamp on selected queues */ 464 spin_lock_irqsave(&ptp->tsevqs_lock, flags); 465 list_for_each_entry(tsevq, &ptp->tsevqs, qlist) { 466 if (test_bit((unsigned int)event->index, tsevq->mask)) 467 enqueue_external_timestamp(tsevq, event); 468 } 469 spin_unlock_irqrestore(&ptp->tsevqs_lock, flags); 470 wake_up_interruptible(&ptp->tsev_wq); 471 break; 472 473 case PTP_CLOCK_PPS: 474 pps_get_ts(&evt); 475 pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL); 476 break; 477 478 case PTP_CLOCK_PPSUSR: 479 pps_event(ptp->pps_source, &event->pps_times, 480 PTP_PPS_EVENT, NULL); 481 break; 482 } 483 } 484 EXPORT_SYMBOL(ptp_clock_event); 485 486 int ptp_clock_index(struct ptp_clock *ptp) 487 { 488 return ptp->index; 489 } 490 EXPORT_SYMBOL(ptp_clock_index); 491 492 static int ptp_clock_of_node_match(struct device *dev, const void *data) 493 { 494 const struct device_node *parent_np = data; 495 496 return (dev->parent && dev_of_node(dev->parent) == parent_np); 497 } 498 499 int ptp_clock_index_by_of_node(struct device_node *np) 500 { 501 struct ptp_clock *ptp; 502 struct device *dev; 503 int phc_index; 504 505 dev = class_find_device(&ptp_class, NULL, np, 506 ptp_clock_of_node_match); 507 if (!dev) 508 return -1; 509 510 ptp = dev_get_drvdata(dev); 511 phc_index = ptp_clock_index(ptp); 512 put_device(dev); 513 514 return phc_index; 515 } 516 EXPORT_SYMBOL_GPL(ptp_clock_index_by_of_node); 517 518 static int ptp_clock_dev_match(struct device *dev, const void *data) 519 { 520 const struct device *parent = data; 521 522 return dev->parent == parent; 523 } 524 525 int ptp_clock_index_by_dev(struct device *parent) 526 { 527 struct ptp_clock *ptp; 528 struct device *dev; 529 int phc_index; 530 531 dev = class_find_device(&ptp_class, NULL, parent, 532 ptp_clock_dev_match); 533 if (!dev) 534 return -1; 535 536 ptp = dev_get_drvdata(dev); 537 phc_index = ptp_clock_index(ptp); 538 put_device(dev); 539 540 return phc_index; 541 } 542 EXPORT_SYMBOL_GPL(ptp_clock_index_by_dev); 543 544 int ptp_find_pin(struct ptp_clock *ptp, 545 enum ptp_pin_function func, unsigned int chan) 546 { 547 struct ptp_pin_desc *pin = NULL; 548 int i; 549 550 for (i = 0; i < ptp->info->n_pins; i++) { 551 if (ptp->info->pin_config[i].func == func && 552 ptp->info->pin_config[i].chan == chan) { 553 pin = &ptp->info->pin_config[i]; 554 break; 555 } 556 } 557 558 return pin ? i : -1; 559 } 560 EXPORT_SYMBOL(ptp_find_pin); 561 562 int ptp_find_pin_unlocked(struct ptp_clock *ptp, 563 enum ptp_pin_function func, unsigned int chan) 564 { 565 int result; 566 567 mutex_lock(&ptp->pincfg_mux); 568 569 result = ptp_find_pin(ptp, func, chan); 570 571 mutex_unlock(&ptp->pincfg_mux); 572 573 return result; 574 } 575 EXPORT_SYMBOL(ptp_find_pin_unlocked); 576 577 int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay) 578 { 579 return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay); 580 } 581 EXPORT_SYMBOL(ptp_schedule_worker); 582 583 void ptp_cancel_worker_sync(struct ptp_clock *ptp) 584 { 585 kthread_cancel_delayed_work_sync(&ptp->aux_work); 586 } 587 EXPORT_SYMBOL(ptp_cancel_worker_sync); 588 589 /* module operations */ 590 591 static void __exit ptp_exit(void) 592 { 593 class_unregister(&ptp_class); 594 unregister_chrdev_region(ptp_devt, MINORMASK + 1); 595 xa_destroy(&ptp_clocks_map); 596 } 597 598 static int __init ptp_init(void) 599 { 600 int err; 601 602 err = class_register(&ptp_class); 603 if (err) { 604 pr_err("ptp: failed to allocate class\n"); 605 return err; 606 } 607 608 err = alloc_chrdev_region(&ptp_devt, 0, MINORMASK + 1, "ptp"); 609 if (err < 0) { 610 pr_err("ptp: failed to allocate device region\n"); 611 goto no_region; 612 } 613 614 pr_info("PTP clock support registered\n"); 615 return 0; 616 617 no_region: 618 class_unregister(&ptp_class); 619 return err; 620 } 621 622 subsys_initcall(ptp_init); 623 module_exit(ptp_exit); 624 625 MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>"); 626 MODULE_DESCRIPTION("PTP clocks support"); 627 MODULE_LICENSE("GPL"); 628