1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * PTP 1588 clock support - character device implementation. 4 * 5 * Copyright (C) 2010 OMICRON electronics GmbH 6 */ 7 #include <linux/module.h> 8 #include <linux/posix-clock.h> 9 #include <linux/poll.h> 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/timekeeping.h> 13 #include <linux/debugfs.h> 14 15 #include <linux/nospec.h> 16 17 #include "ptp_private.h" 18 19 static int ptp_disable_pinfunc(struct ptp_clock_info *ops, 20 enum ptp_pin_function func, unsigned int chan) 21 { 22 struct ptp_clock_request rq; 23 int err = 0; 24 25 memset(&rq, 0, sizeof(rq)); 26 27 switch (func) { 28 case PTP_PF_NONE: 29 break; 30 case PTP_PF_EXTTS: 31 rq.type = PTP_CLK_REQ_EXTTS; 32 rq.extts.index = chan; 33 err = ops->enable(ops, &rq, 0); 34 break; 35 case PTP_PF_PEROUT: 36 rq.type = PTP_CLK_REQ_PEROUT; 37 rq.perout.index = chan; 38 err = ops->enable(ops, &rq, 0); 39 break; 40 case PTP_PF_PHYSYNC: 41 break; 42 default: 43 return -EINVAL; 44 } 45 46 return err; 47 } 48 49 int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin, 50 enum ptp_pin_function func, unsigned int chan) 51 { 52 struct ptp_clock_info *info = ptp->info; 53 struct ptp_pin_desc *pin1 = NULL, *pin2 = &info->pin_config[pin]; 54 unsigned int i; 55 56 /* Check to see if any other pin previously had this function. */ 57 for (i = 0; i < info->n_pins; i++) { 58 if (info->pin_config[i].func == func && 59 info->pin_config[i].chan == chan) { 60 pin1 = &info->pin_config[i]; 61 break; 62 } 63 } 64 if (pin1 && i == pin) 65 return 0; 66 67 /* Check the desired function and channel. */ 68 switch (func) { 69 case PTP_PF_NONE: 70 break; 71 case PTP_PF_EXTTS: 72 if (chan >= info->n_ext_ts) 73 return -EINVAL; 74 break; 75 case PTP_PF_PEROUT: 76 if (chan >= info->n_per_out) 77 return -EINVAL; 78 break; 79 case PTP_PF_PHYSYNC: 80 if (chan != 0) 81 return -EINVAL; 82 break; 83 default: 84 return -EINVAL; 85 } 86 87 if (info->verify(info, pin, func, chan)) { 88 pr_err("driver cannot use function %u and channel %u on pin %u\n", 89 func, chan, pin); 90 return -EOPNOTSUPP; 91 } 92 93 /* Disable whatever function was previously assigned. */ 94 if (pin1) { 95 ptp_disable_pinfunc(info, func, chan); 96 pin1->func = PTP_PF_NONE; 97 pin1->chan = 0; 98 } 99 ptp_disable_pinfunc(info, pin2->func, pin2->chan); 100 pin2->func = func; 101 pin2->chan = chan; 102 103 return 0; 104 } 105 106 int ptp_open(struct posix_clock_context *pccontext, fmode_t fmode) 107 { 108 struct ptp_clock *ptp = 109 container_of(pccontext->clk, struct ptp_clock, clock); 110 struct timestamp_event_queue *queue; 111 char debugfsname[32]; 112 unsigned long flags; 113 114 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 115 if (!queue) 116 return -EINVAL; 117 queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL); 118 if (!queue->mask) { 119 kfree(queue); 120 return -EINVAL; 121 } 122 bitmap_set(queue->mask, 0, PTP_MAX_CHANNELS); 123 spin_lock_init(&queue->lock); 124 spin_lock_irqsave(&ptp->tsevqs_lock, flags); 125 list_add_tail(&queue->qlist, &ptp->tsevqs); 126 spin_unlock_irqrestore(&ptp->tsevqs_lock, flags); 127 pccontext->private_clkdata = queue; 128 129 /* Debugfs contents */ 130 sprintf(debugfsname, "0x%p", queue); 131 queue->debugfs_instance = 132 debugfs_create_dir(debugfsname, ptp->debugfs_root); 133 queue->dfs_bitmap.array = (u32 *)queue->mask; 134 queue->dfs_bitmap.n_elements = 135 DIV_ROUND_UP(PTP_MAX_CHANNELS, BITS_PER_BYTE * sizeof(u32)); 136 debugfs_create_u32_array("mask", 0444, queue->debugfs_instance, 137 &queue->dfs_bitmap); 138 139 return 0; 140 } 141 142 int ptp_release(struct posix_clock_context *pccontext) 143 { 144 struct timestamp_event_queue *queue = pccontext->private_clkdata; 145 unsigned long flags; 146 struct ptp_clock *ptp = 147 container_of(pccontext->clk, struct ptp_clock, clock); 148 149 debugfs_remove(queue->debugfs_instance); 150 pccontext->private_clkdata = NULL; 151 spin_lock_irqsave(&ptp->tsevqs_lock, flags); 152 list_del(&queue->qlist); 153 spin_unlock_irqrestore(&ptp->tsevqs_lock, flags); 154 bitmap_free(queue->mask); 155 kfree(queue); 156 return 0; 157 } 158 159 long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd, 160 unsigned long arg) 161 { 162 struct ptp_clock *ptp = 163 container_of(pccontext->clk, struct ptp_clock, clock); 164 struct ptp_sys_offset_extended *extoff = NULL; 165 struct ptp_sys_offset_precise precise_offset; 166 struct system_device_crosststamp xtstamp; 167 struct ptp_clock_info *ops = ptp->info; 168 struct ptp_sys_offset *sysoff = NULL; 169 struct timestamp_event_queue *tsevq; 170 struct ptp_system_timestamp sts; 171 struct ptp_clock_request req; 172 struct ptp_clock_caps caps; 173 struct ptp_clock_time *pct; 174 unsigned int i, pin_index; 175 struct ptp_pin_desc pd; 176 struct timespec64 ts; 177 int enable, err = 0; 178 179 tsevq = pccontext->private_clkdata; 180 181 switch (cmd) { 182 183 case PTP_CLOCK_GETCAPS: 184 case PTP_CLOCK_GETCAPS2: 185 memset(&caps, 0, sizeof(caps)); 186 187 caps.max_adj = ptp->info->max_adj; 188 caps.n_alarm = ptp->info->n_alarm; 189 caps.n_ext_ts = ptp->info->n_ext_ts; 190 caps.n_per_out = ptp->info->n_per_out; 191 caps.pps = ptp->info->pps; 192 caps.n_pins = ptp->info->n_pins; 193 caps.cross_timestamping = ptp->info->getcrosststamp != NULL; 194 caps.adjust_phase = ptp->info->adjphase != NULL && 195 ptp->info->getmaxphase != NULL; 196 if (caps.adjust_phase) 197 caps.max_phase_adj = ptp->info->getmaxphase(ptp->info); 198 if (copy_to_user((void __user *)arg, &caps, sizeof(caps))) 199 err = -EFAULT; 200 break; 201 202 case PTP_EXTTS_REQUEST: 203 case PTP_EXTTS_REQUEST2: 204 memset(&req, 0, sizeof(req)); 205 206 if (copy_from_user(&req.extts, (void __user *)arg, 207 sizeof(req.extts))) { 208 err = -EFAULT; 209 break; 210 } 211 if (cmd == PTP_EXTTS_REQUEST2) { 212 /* Tell the drivers to check the flags carefully. */ 213 req.extts.flags |= PTP_STRICT_FLAGS; 214 /* Make sure no reserved bit is set. */ 215 if ((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) || 216 req.extts.rsv[0] || req.extts.rsv[1]) { 217 err = -EINVAL; 218 break; 219 } 220 /* Ensure one of the rising/falling edge bits is set. */ 221 if ((req.extts.flags & PTP_ENABLE_FEATURE) && 222 (req.extts.flags & PTP_EXTTS_EDGES) == 0) { 223 err = -EINVAL; 224 break; 225 } 226 } else if (cmd == PTP_EXTTS_REQUEST) { 227 req.extts.flags &= PTP_EXTTS_V1_VALID_FLAGS; 228 req.extts.rsv[0] = 0; 229 req.extts.rsv[1] = 0; 230 } 231 if (req.extts.index >= ops->n_ext_ts) { 232 err = -EINVAL; 233 break; 234 } 235 req.type = PTP_CLK_REQ_EXTTS; 236 enable = req.extts.flags & PTP_ENABLE_FEATURE ? 1 : 0; 237 if (mutex_lock_interruptible(&ptp->pincfg_mux)) 238 return -ERESTARTSYS; 239 err = ops->enable(ops, &req, enable); 240 mutex_unlock(&ptp->pincfg_mux); 241 break; 242 243 case PTP_PEROUT_REQUEST: 244 case PTP_PEROUT_REQUEST2: 245 memset(&req, 0, sizeof(req)); 246 247 if (copy_from_user(&req.perout, (void __user *)arg, 248 sizeof(req.perout))) { 249 err = -EFAULT; 250 break; 251 } 252 if (cmd == PTP_PEROUT_REQUEST2) { 253 struct ptp_perout_request *perout = &req.perout; 254 255 if (perout->flags & ~PTP_PEROUT_VALID_FLAGS) { 256 err = -EINVAL; 257 break; 258 } 259 /* 260 * The "on" field has undefined meaning if 261 * PTP_PEROUT_DUTY_CYCLE isn't set, we must still treat 262 * it as reserved, which must be set to zero. 263 */ 264 if (!(perout->flags & PTP_PEROUT_DUTY_CYCLE) && 265 (perout->rsv[0] || perout->rsv[1] || 266 perout->rsv[2] || perout->rsv[3])) { 267 err = -EINVAL; 268 break; 269 } 270 if (perout->flags & PTP_PEROUT_DUTY_CYCLE) { 271 /* The duty cycle must be subunitary. */ 272 if (perout->on.sec > perout->period.sec || 273 (perout->on.sec == perout->period.sec && 274 perout->on.nsec > perout->period.nsec)) { 275 err = -ERANGE; 276 break; 277 } 278 } 279 if (perout->flags & PTP_PEROUT_PHASE) { 280 /* 281 * The phase should be specified modulo the 282 * period, therefore anything equal or larger 283 * than 1 period is invalid. 284 */ 285 if (perout->phase.sec > perout->period.sec || 286 (perout->phase.sec == perout->period.sec && 287 perout->phase.nsec >= perout->period.nsec)) { 288 err = -ERANGE; 289 break; 290 } 291 } 292 } else if (cmd == PTP_PEROUT_REQUEST) { 293 req.perout.flags &= PTP_PEROUT_V1_VALID_FLAGS; 294 req.perout.rsv[0] = 0; 295 req.perout.rsv[1] = 0; 296 req.perout.rsv[2] = 0; 297 req.perout.rsv[3] = 0; 298 } 299 if (req.perout.index >= ops->n_per_out) { 300 err = -EINVAL; 301 break; 302 } 303 req.type = PTP_CLK_REQ_PEROUT; 304 enable = req.perout.period.sec || req.perout.period.nsec; 305 if (mutex_lock_interruptible(&ptp->pincfg_mux)) 306 return -ERESTARTSYS; 307 err = ops->enable(ops, &req, enable); 308 mutex_unlock(&ptp->pincfg_mux); 309 break; 310 311 case PTP_ENABLE_PPS: 312 case PTP_ENABLE_PPS2: 313 memset(&req, 0, sizeof(req)); 314 315 if (!capable(CAP_SYS_TIME)) 316 return -EPERM; 317 req.type = PTP_CLK_REQ_PPS; 318 enable = arg ? 1 : 0; 319 if (mutex_lock_interruptible(&ptp->pincfg_mux)) 320 return -ERESTARTSYS; 321 err = ops->enable(ops, &req, enable); 322 mutex_unlock(&ptp->pincfg_mux); 323 break; 324 325 case PTP_SYS_OFFSET_PRECISE: 326 case PTP_SYS_OFFSET_PRECISE2: 327 if (!ptp->info->getcrosststamp) { 328 err = -EOPNOTSUPP; 329 break; 330 } 331 err = ptp->info->getcrosststamp(ptp->info, &xtstamp); 332 if (err) 333 break; 334 335 memset(&precise_offset, 0, sizeof(precise_offset)); 336 ts = ktime_to_timespec64(xtstamp.device); 337 precise_offset.device.sec = ts.tv_sec; 338 precise_offset.device.nsec = ts.tv_nsec; 339 ts = ktime_to_timespec64(xtstamp.sys_realtime); 340 precise_offset.sys_realtime.sec = ts.tv_sec; 341 precise_offset.sys_realtime.nsec = ts.tv_nsec; 342 ts = ktime_to_timespec64(xtstamp.sys_monoraw); 343 precise_offset.sys_monoraw.sec = ts.tv_sec; 344 precise_offset.sys_monoraw.nsec = ts.tv_nsec; 345 if (copy_to_user((void __user *)arg, &precise_offset, 346 sizeof(precise_offset))) 347 err = -EFAULT; 348 break; 349 350 case PTP_SYS_OFFSET_EXTENDED: 351 case PTP_SYS_OFFSET_EXTENDED2: 352 if (!ptp->info->gettimex64) { 353 err = -EOPNOTSUPP; 354 break; 355 } 356 extoff = memdup_user((void __user *)arg, sizeof(*extoff)); 357 if (IS_ERR(extoff)) { 358 err = PTR_ERR(extoff); 359 extoff = NULL; 360 break; 361 } 362 if (extoff->n_samples > PTP_MAX_SAMPLES || 363 extoff->rsv[0] || extoff->rsv[1] || 364 (extoff->clockid != CLOCK_REALTIME && 365 extoff->clockid != CLOCK_MONOTONIC && 366 extoff->clockid != CLOCK_MONOTONIC_RAW)) { 367 err = -EINVAL; 368 break; 369 } 370 sts.clockid = extoff->clockid; 371 for (i = 0; i < extoff->n_samples; i++) { 372 err = ptp->info->gettimex64(ptp->info, &ts, &sts); 373 if (err) 374 goto out; 375 extoff->ts[i][0].sec = sts.pre_ts.tv_sec; 376 extoff->ts[i][0].nsec = sts.pre_ts.tv_nsec; 377 extoff->ts[i][1].sec = ts.tv_sec; 378 extoff->ts[i][1].nsec = ts.tv_nsec; 379 extoff->ts[i][2].sec = sts.post_ts.tv_sec; 380 extoff->ts[i][2].nsec = sts.post_ts.tv_nsec; 381 } 382 if (copy_to_user((void __user *)arg, extoff, sizeof(*extoff))) 383 err = -EFAULT; 384 break; 385 386 case PTP_SYS_OFFSET: 387 case PTP_SYS_OFFSET2: 388 sysoff = memdup_user((void __user *)arg, sizeof(*sysoff)); 389 if (IS_ERR(sysoff)) { 390 err = PTR_ERR(sysoff); 391 sysoff = NULL; 392 break; 393 } 394 if (sysoff->n_samples > PTP_MAX_SAMPLES) { 395 err = -EINVAL; 396 break; 397 } 398 pct = &sysoff->ts[0]; 399 for (i = 0; i < sysoff->n_samples; i++) { 400 ktime_get_real_ts64(&ts); 401 pct->sec = ts.tv_sec; 402 pct->nsec = ts.tv_nsec; 403 pct++; 404 if (ops->gettimex64) 405 err = ops->gettimex64(ops, &ts, NULL); 406 else 407 err = ops->gettime64(ops, &ts); 408 if (err) 409 goto out; 410 pct->sec = ts.tv_sec; 411 pct->nsec = ts.tv_nsec; 412 pct++; 413 } 414 ktime_get_real_ts64(&ts); 415 pct->sec = ts.tv_sec; 416 pct->nsec = ts.tv_nsec; 417 if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff))) 418 err = -EFAULT; 419 break; 420 421 case PTP_PIN_GETFUNC: 422 case PTP_PIN_GETFUNC2: 423 if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) { 424 err = -EFAULT; 425 break; 426 } 427 if ((pd.rsv[0] || pd.rsv[1] || pd.rsv[2] 428 || pd.rsv[3] || pd.rsv[4]) 429 && cmd == PTP_PIN_GETFUNC2) { 430 err = -EINVAL; 431 break; 432 } else if (cmd == PTP_PIN_GETFUNC) { 433 pd.rsv[0] = 0; 434 pd.rsv[1] = 0; 435 pd.rsv[2] = 0; 436 pd.rsv[3] = 0; 437 pd.rsv[4] = 0; 438 } 439 pin_index = pd.index; 440 if (pin_index >= ops->n_pins) { 441 err = -EINVAL; 442 break; 443 } 444 pin_index = array_index_nospec(pin_index, ops->n_pins); 445 if (mutex_lock_interruptible(&ptp->pincfg_mux)) 446 return -ERESTARTSYS; 447 pd = ops->pin_config[pin_index]; 448 mutex_unlock(&ptp->pincfg_mux); 449 if (!err && copy_to_user((void __user *)arg, &pd, sizeof(pd))) 450 err = -EFAULT; 451 break; 452 453 case PTP_PIN_SETFUNC: 454 case PTP_PIN_SETFUNC2: 455 if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) { 456 err = -EFAULT; 457 break; 458 } 459 if ((pd.rsv[0] || pd.rsv[1] || pd.rsv[2] 460 || pd.rsv[3] || pd.rsv[4]) 461 && cmd == PTP_PIN_SETFUNC2) { 462 err = -EINVAL; 463 break; 464 } else if (cmd == PTP_PIN_SETFUNC) { 465 pd.rsv[0] = 0; 466 pd.rsv[1] = 0; 467 pd.rsv[2] = 0; 468 pd.rsv[3] = 0; 469 pd.rsv[4] = 0; 470 } 471 pin_index = pd.index; 472 if (pin_index >= ops->n_pins) { 473 err = -EINVAL; 474 break; 475 } 476 pin_index = array_index_nospec(pin_index, ops->n_pins); 477 if (mutex_lock_interruptible(&ptp->pincfg_mux)) 478 return -ERESTARTSYS; 479 err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan); 480 mutex_unlock(&ptp->pincfg_mux); 481 break; 482 483 case PTP_MASK_CLEAR_ALL: 484 bitmap_clear(tsevq->mask, 0, PTP_MAX_CHANNELS); 485 break; 486 487 case PTP_MASK_EN_SINGLE: 488 if (copy_from_user(&i, (void __user *)arg, sizeof(i))) { 489 err = -EFAULT; 490 break; 491 } 492 if (i >= PTP_MAX_CHANNELS) { 493 err = -EFAULT; 494 break; 495 } 496 set_bit(i, tsevq->mask); 497 break; 498 499 default: 500 err = -ENOTTY; 501 break; 502 } 503 504 out: 505 kfree(extoff); 506 kfree(sysoff); 507 return err; 508 } 509 510 __poll_t ptp_poll(struct posix_clock_context *pccontext, struct file *fp, 511 poll_table *wait) 512 { 513 struct ptp_clock *ptp = 514 container_of(pccontext->clk, struct ptp_clock, clock); 515 struct timestamp_event_queue *queue; 516 517 queue = pccontext->private_clkdata; 518 if (!queue) 519 return EPOLLERR; 520 521 poll_wait(fp, &ptp->tsev_wq, wait); 522 523 return queue_cnt(queue) ? EPOLLIN : 0; 524 } 525 526 #define EXTTS_BUFSIZE (PTP_BUF_TIMESTAMPS * sizeof(struct ptp_extts_event)) 527 528 ssize_t ptp_read(struct posix_clock_context *pccontext, uint rdflags, 529 char __user *buf, size_t cnt) 530 { 531 struct ptp_clock *ptp = 532 container_of(pccontext->clk, struct ptp_clock, clock); 533 struct timestamp_event_queue *queue; 534 struct ptp_extts_event *event; 535 unsigned long flags; 536 size_t qcnt, i; 537 int result; 538 539 queue = pccontext->private_clkdata; 540 if (!queue) { 541 result = -EINVAL; 542 goto exit; 543 } 544 545 if (cnt % sizeof(struct ptp_extts_event) != 0) { 546 result = -EINVAL; 547 goto exit; 548 } 549 550 if (cnt > EXTTS_BUFSIZE) 551 cnt = EXTTS_BUFSIZE; 552 553 cnt = cnt / sizeof(struct ptp_extts_event); 554 555 if (wait_event_interruptible(ptp->tsev_wq, 556 ptp->defunct || queue_cnt(queue))) { 557 return -ERESTARTSYS; 558 } 559 560 if (ptp->defunct) { 561 result = -ENODEV; 562 goto exit; 563 } 564 565 event = kmalloc(EXTTS_BUFSIZE, GFP_KERNEL); 566 if (!event) { 567 result = -ENOMEM; 568 goto exit; 569 } 570 571 spin_lock_irqsave(&queue->lock, flags); 572 573 qcnt = queue_cnt(queue); 574 575 if (cnt > qcnt) 576 cnt = qcnt; 577 578 for (i = 0; i < cnt; i++) { 579 event[i] = queue->buf[queue->head]; 580 /* Paired with READ_ONCE() in queue_cnt() */ 581 WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS); 582 } 583 584 spin_unlock_irqrestore(&queue->lock, flags); 585 586 cnt = cnt * sizeof(struct ptp_extts_event); 587 588 result = cnt; 589 if (copy_to_user(buf, event, cnt)) { 590 result = -EFAULT; 591 goto free_event; 592 } 593 594 free_event: 595 kfree(event); 596 exit: 597 return result; 598 } 599