1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * PTP 1588 clock support - character device implementation. 4 * 5 * Copyright (C) 2010 OMICRON electronics GmbH 6 */ 7 #include <linux/module.h> 8 #include <linux/posix-clock.h> 9 #include <linux/poll.h> 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/timekeeping.h> 13 #include <linux/debugfs.h> 14 15 #include <linux/nospec.h> 16 17 #include "ptp_private.h" 18 19 static int ptp_disable_pinfunc(struct ptp_clock_info *ops, 20 enum ptp_pin_function func, unsigned int chan) 21 { 22 struct ptp_clock_request rq; 23 int err = 0; 24 25 memset(&rq, 0, sizeof(rq)); 26 27 switch (func) { 28 case PTP_PF_NONE: 29 break; 30 case PTP_PF_EXTTS: 31 rq.type = PTP_CLK_REQ_EXTTS; 32 rq.extts.index = chan; 33 err = ops->enable(ops, &rq, 0); 34 break; 35 case PTP_PF_PEROUT: 36 rq.type = PTP_CLK_REQ_PEROUT; 37 rq.perout.index = chan; 38 err = ops->enable(ops, &rq, 0); 39 break; 40 case PTP_PF_PHYSYNC: 41 break; 42 default: 43 return -EINVAL; 44 } 45 46 return err; 47 } 48 49 int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin, 50 enum ptp_pin_function func, unsigned int chan) 51 { 52 struct ptp_clock_info *info = ptp->info; 53 struct ptp_pin_desc *pin1 = NULL, *pin2 = &info->pin_config[pin]; 54 unsigned int i; 55 56 /* Check to see if any other pin previously had this function. */ 57 for (i = 0; i < info->n_pins; i++) { 58 if (info->pin_config[i].func == func && 59 info->pin_config[i].chan == chan) { 60 pin1 = &info->pin_config[i]; 61 break; 62 } 63 } 64 if (pin1 && i == pin) 65 return 0; 66 67 /* Check the desired function and channel. */ 68 switch (func) { 69 case PTP_PF_NONE: 70 break; 71 case PTP_PF_EXTTS: 72 if (chan >= info->n_ext_ts) 73 return -EINVAL; 74 break; 75 case PTP_PF_PEROUT: 76 if (chan >= info->n_per_out) 77 return -EINVAL; 78 break; 79 case PTP_PF_PHYSYNC: 80 if (chan != 0) 81 return -EINVAL; 82 break; 83 default: 84 return -EINVAL; 85 } 86 87 if (info->verify(info, pin, func, chan)) { 88 pr_err("driver cannot use function %u on pin %u\n", func, chan); 89 return -EOPNOTSUPP; 90 } 91 92 /* Disable whatever function was previously assigned. */ 93 if (pin1) { 94 ptp_disable_pinfunc(info, func, chan); 95 pin1->func = PTP_PF_NONE; 96 pin1->chan = 0; 97 } 98 ptp_disable_pinfunc(info, pin2->func, pin2->chan); 99 pin2->func = func; 100 pin2->chan = chan; 101 102 return 0; 103 } 104 105 int ptp_open(struct posix_clock_context *pccontext, fmode_t fmode) 106 { 107 struct ptp_clock *ptp = 108 container_of(pccontext->clk, struct ptp_clock, clock); 109 struct timestamp_event_queue *queue; 110 char debugfsname[32]; 111 unsigned long flags; 112 113 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 114 if (!queue) 115 return -EINVAL; 116 queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL); 117 if (!queue->mask) { 118 kfree(queue); 119 return -EINVAL; 120 } 121 bitmap_set(queue->mask, 0, PTP_MAX_CHANNELS); 122 spin_lock_init(&queue->lock); 123 spin_lock_irqsave(&ptp->tsevqs_lock, flags); 124 list_add_tail(&queue->qlist, &ptp->tsevqs); 125 spin_unlock_irqrestore(&ptp->tsevqs_lock, flags); 126 pccontext->private_clkdata = queue; 127 128 /* Debugfs contents */ 129 sprintf(debugfsname, "0x%p", queue); 130 queue->debugfs_instance = 131 debugfs_create_dir(debugfsname, ptp->debugfs_root); 132 queue->dfs_bitmap.array = (u32 *)queue->mask; 133 queue->dfs_bitmap.n_elements = 134 DIV_ROUND_UP(PTP_MAX_CHANNELS, BITS_PER_BYTE * sizeof(u32)); 135 debugfs_create_u32_array("mask", 0444, queue->debugfs_instance, 136 &queue->dfs_bitmap); 137 138 return 0; 139 } 140 141 int ptp_release(struct posix_clock_context *pccontext) 142 { 143 struct timestamp_event_queue *queue = pccontext->private_clkdata; 144 unsigned long flags; 145 struct ptp_clock *ptp = 146 container_of(pccontext->clk, struct ptp_clock, clock); 147 148 debugfs_remove(queue->debugfs_instance); 149 pccontext->private_clkdata = NULL; 150 spin_lock_irqsave(&ptp->tsevqs_lock, flags); 151 list_del(&queue->qlist); 152 spin_unlock_irqrestore(&ptp->tsevqs_lock, flags); 153 bitmap_free(queue->mask); 154 kfree(queue); 155 return 0; 156 } 157 158 long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd, 159 unsigned long arg) 160 { 161 struct ptp_clock *ptp = 162 container_of(pccontext->clk, struct ptp_clock, clock); 163 struct ptp_sys_offset_extended *extoff = NULL; 164 struct ptp_sys_offset_precise precise_offset; 165 struct system_device_crosststamp xtstamp; 166 struct ptp_clock_info *ops = ptp->info; 167 struct ptp_sys_offset *sysoff = NULL; 168 struct timestamp_event_queue *tsevq; 169 struct ptp_system_timestamp sts; 170 struct ptp_clock_request req; 171 struct ptp_clock_caps caps; 172 struct ptp_clock_time *pct; 173 unsigned int i, pin_index; 174 struct ptp_pin_desc pd; 175 struct timespec64 ts; 176 int enable, err = 0; 177 178 tsevq = pccontext->private_clkdata; 179 180 switch (cmd) { 181 182 case PTP_CLOCK_GETCAPS: 183 case PTP_CLOCK_GETCAPS2: 184 memset(&caps, 0, sizeof(caps)); 185 186 caps.max_adj = ptp->info->max_adj; 187 caps.n_alarm = ptp->info->n_alarm; 188 caps.n_ext_ts = ptp->info->n_ext_ts; 189 caps.n_per_out = ptp->info->n_per_out; 190 caps.pps = ptp->info->pps; 191 caps.n_pins = ptp->info->n_pins; 192 caps.cross_timestamping = ptp->info->getcrosststamp != NULL; 193 caps.adjust_phase = ptp->info->adjphase != NULL && 194 ptp->info->getmaxphase != NULL; 195 if (caps.adjust_phase) 196 caps.max_phase_adj = ptp->info->getmaxphase(ptp->info); 197 if (copy_to_user((void __user *)arg, &caps, sizeof(caps))) 198 err = -EFAULT; 199 break; 200 201 case PTP_EXTTS_REQUEST: 202 case PTP_EXTTS_REQUEST2: 203 memset(&req, 0, sizeof(req)); 204 205 if (copy_from_user(&req.extts, (void __user *)arg, 206 sizeof(req.extts))) { 207 err = -EFAULT; 208 break; 209 } 210 if (cmd == PTP_EXTTS_REQUEST2) { 211 /* Tell the drivers to check the flags carefully. */ 212 req.extts.flags |= PTP_STRICT_FLAGS; 213 /* Make sure no reserved bit is set. */ 214 if ((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) || 215 req.extts.rsv[0] || req.extts.rsv[1]) { 216 err = -EINVAL; 217 break; 218 } 219 /* Ensure one of the rising/falling edge bits is set. */ 220 if ((req.extts.flags & PTP_ENABLE_FEATURE) && 221 (req.extts.flags & PTP_EXTTS_EDGES) == 0) { 222 err = -EINVAL; 223 break; 224 } 225 } else if (cmd == PTP_EXTTS_REQUEST) { 226 req.extts.flags &= PTP_EXTTS_V1_VALID_FLAGS; 227 req.extts.rsv[0] = 0; 228 req.extts.rsv[1] = 0; 229 } 230 if (req.extts.index >= ops->n_ext_ts) { 231 err = -EINVAL; 232 break; 233 } 234 req.type = PTP_CLK_REQ_EXTTS; 235 enable = req.extts.flags & PTP_ENABLE_FEATURE ? 1 : 0; 236 if (mutex_lock_interruptible(&ptp->pincfg_mux)) 237 return -ERESTARTSYS; 238 err = ops->enable(ops, &req, enable); 239 mutex_unlock(&ptp->pincfg_mux); 240 break; 241 242 case PTP_PEROUT_REQUEST: 243 case PTP_PEROUT_REQUEST2: 244 memset(&req, 0, sizeof(req)); 245 246 if (copy_from_user(&req.perout, (void __user *)arg, 247 sizeof(req.perout))) { 248 err = -EFAULT; 249 break; 250 } 251 if (cmd == PTP_PEROUT_REQUEST2) { 252 struct ptp_perout_request *perout = &req.perout; 253 254 if (perout->flags & ~PTP_PEROUT_VALID_FLAGS) { 255 err = -EINVAL; 256 break; 257 } 258 /* 259 * The "on" field has undefined meaning if 260 * PTP_PEROUT_DUTY_CYCLE isn't set, we must still treat 261 * it as reserved, which must be set to zero. 262 */ 263 if (!(perout->flags & PTP_PEROUT_DUTY_CYCLE) && 264 (perout->rsv[0] || perout->rsv[1] || 265 perout->rsv[2] || perout->rsv[3])) { 266 err = -EINVAL; 267 break; 268 } 269 if (perout->flags & PTP_PEROUT_DUTY_CYCLE) { 270 /* The duty cycle must be subunitary. */ 271 if (perout->on.sec > perout->period.sec || 272 (perout->on.sec == perout->period.sec && 273 perout->on.nsec > perout->period.nsec)) { 274 err = -ERANGE; 275 break; 276 } 277 } 278 if (perout->flags & PTP_PEROUT_PHASE) { 279 /* 280 * The phase should be specified modulo the 281 * period, therefore anything equal or larger 282 * than 1 period is invalid. 283 */ 284 if (perout->phase.sec > perout->period.sec || 285 (perout->phase.sec == perout->period.sec && 286 perout->phase.nsec >= perout->period.nsec)) { 287 err = -ERANGE; 288 break; 289 } 290 } 291 } else if (cmd == PTP_PEROUT_REQUEST) { 292 req.perout.flags &= PTP_PEROUT_V1_VALID_FLAGS; 293 req.perout.rsv[0] = 0; 294 req.perout.rsv[1] = 0; 295 req.perout.rsv[2] = 0; 296 req.perout.rsv[3] = 0; 297 } 298 if (req.perout.index >= ops->n_per_out) { 299 err = -EINVAL; 300 break; 301 } 302 req.type = PTP_CLK_REQ_PEROUT; 303 enable = req.perout.period.sec || req.perout.period.nsec; 304 if (mutex_lock_interruptible(&ptp->pincfg_mux)) 305 return -ERESTARTSYS; 306 err = ops->enable(ops, &req, enable); 307 mutex_unlock(&ptp->pincfg_mux); 308 break; 309 310 case PTP_ENABLE_PPS: 311 case PTP_ENABLE_PPS2: 312 memset(&req, 0, sizeof(req)); 313 314 if (!capable(CAP_SYS_TIME)) 315 return -EPERM; 316 req.type = PTP_CLK_REQ_PPS; 317 enable = arg ? 1 : 0; 318 if (mutex_lock_interruptible(&ptp->pincfg_mux)) 319 return -ERESTARTSYS; 320 err = ops->enable(ops, &req, enable); 321 mutex_unlock(&ptp->pincfg_mux); 322 break; 323 324 case PTP_SYS_OFFSET_PRECISE: 325 case PTP_SYS_OFFSET_PRECISE2: 326 if (!ptp->info->getcrosststamp) { 327 err = -EOPNOTSUPP; 328 break; 329 } 330 err = ptp->info->getcrosststamp(ptp->info, &xtstamp); 331 if (err) 332 break; 333 334 memset(&precise_offset, 0, sizeof(precise_offset)); 335 ts = ktime_to_timespec64(xtstamp.device); 336 precise_offset.device.sec = ts.tv_sec; 337 precise_offset.device.nsec = ts.tv_nsec; 338 ts = ktime_to_timespec64(xtstamp.sys_realtime); 339 precise_offset.sys_realtime.sec = ts.tv_sec; 340 precise_offset.sys_realtime.nsec = ts.tv_nsec; 341 ts = ktime_to_timespec64(xtstamp.sys_monoraw); 342 precise_offset.sys_monoraw.sec = ts.tv_sec; 343 precise_offset.sys_monoraw.nsec = ts.tv_nsec; 344 if (copy_to_user((void __user *)arg, &precise_offset, 345 sizeof(precise_offset))) 346 err = -EFAULT; 347 break; 348 349 case PTP_SYS_OFFSET_EXTENDED: 350 case PTP_SYS_OFFSET_EXTENDED2: 351 if (!ptp->info->gettimex64) { 352 err = -EOPNOTSUPP; 353 break; 354 } 355 extoff = memdup_user((void __user *)arg, sizeof(*extoff)); 356 if (IS_ERR(extoff)) { 357 err = PTR_ERR(extoff); 358 extoff = NULL; 359 break; 360 } 361 if (extoff->n_samples > PTP_MAX_SAMPLES 362 || extoff->rsv[0] || extoff->rsv[1] || extoff->rsv[2]) { 363 err = -EINVAL; 364 break; 365 } 366 for (i = 0; i < extoff->n_samples; i++) { 367 err = ptp->info->gettimex64(ptp->info, &ts, &sts); 368 if (err) 369 goto out; 370 extoff->ts[i][0].sec = sts.pre_ts.tv_sec; 371 extoff->ts[i][0].nsec = sts.pre_ts.tv_nsec; 372 extoff->ts[i][1].sec = ts.tv_sec; 373 extoff->ts[i][1].nsec = ts.tv_nsec; 374 extoff->ts[i][2].sec = sts.post_ts.tv_sec; 375 extoff->ts[i][2].nsec = sts.post_ts.tv_nsec; 376 } 377 if (copy_to_user((void __user *)arg, extoff, sizeof(*extoff))) 378 err = -EFAULT; 379 break; 380 381 case PTP_SYS_OFFSET: 382 case PTP_SYS_OFFSET2: 383 sysoff = memdup_user((void __user *)arg, sizeof(*sysoff)); 384 if (IS_ERR(sysoff)) { 385 err = PTR_ERR(sysoff); 386 sysoff = NULL; 387 break; 388 } 389 if (sysoff->n_samples > PTP_MAX_SAMPLES) { 390 err = -EINVAL; 391 break; 392 } 393 pct = &sysoff->ts[0]; 394 for (i = 0; i < sysoff->n_samples; i++) { 395 ktime_get_real_ts64(&ts); 396 pct->sec = ts.tv_sec; 397 pct->nsec = ts.tv_nsec; 398 pct++; 399 if (ops->gettimex64) 400 err = ops->gettimex64(ops, &ts, NULL); 401 else 402 err = ops->gettime64(ops, &ts); 403 if (err) 404 goto out; 405 pct->sec = ts.tv_sec; 406 pct->nsec = ts.tv_nsec; 407 pct++; 408 } 409 ktime_get_real_ts64(&ts); 410 pct->sec = ts.tv_sec; 411 pct->nsec = ts.tv_nsec; 412 if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff))) 413 err = -EFAULT; 414 break; 415 416 case PTP_PIN_GETFUNC: 417 case PTP_PIN_GETFUNC2: 418 if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) { 419 err = -EFAULT; 420 break; 421 } 422 if ((pd.rsv[0] || pd.rsv[1] || pd.rsv[2] 423 || pd.rsv[3] || pd.rsv[4]) 424 && cmd == PTP_PIN_GETFUNC2) { 425 err = -EINVAL; 426 break; 427 } else if (cmd == PTP_PIN_GETFUNC) { 428 pd.rsv[0] = 0; 429 pd.rsv[1] = 0; 430 pd.rsv[2] = 0; 431 pd.rsv[3] = 0; 432 pd.rsv[4] = 0; 433 } 434 pin_index = pd.index; 435 if (pin_index >= ops->n_pins) { 436 err = -EINVAL; 437 break; 438 } 439 pin_index = array_index_nospec(pin_index, ops->n_pins); 440 if (mutex_lock_interruptible(&ptp->pincfg_mux)) 441 return -ERESTARTSYS; 442 pd = ops->pin_config[pin_index]; 443 mutex_unlock(&ptp->pincfg_mux); 444 if (!err && copy_to_user((void __user *)arg, &pd, sizeof(pd))) 445 err = -EFAULT; 446 break; 447 448 case PTP_PIN_SETFUNC: 449 case PTP_PIN_SETFUNC2: 450 if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) { 451 err = -EFAULT; 452 break; 453 } 454 if ((pd.rsv[0] || pd.rsv[1] || pd.rsv[2] 455 || pd.rsv[3] || pd.rsv[4]) 456 && cmd == PTP_PIN_SETFUNC2) { 457 err = -EINVAL; 458 break; 459 } else if (cmd == PTP_PIN_SETFUNC) { 460 pd.rsv[0] = 0; 461 pd.rsv[1] = 0; 462 pd.rsv[2] = 0; 463 pd.rsv[3] = 0; 464 pd.rsv[4] = 0; 465 } 466 pin_index = pd.index; 467 if (pin_index >= ops->n_pins) { 468 err = -EINVAL; 469 break; 470 } 471 pin_index = array_index_nospec(pin_index, ops->n_pins); 472 if (mutex_lock_interruptible(&ptp->pincfg_mux)) 473 return -ERESTARTSYS; 474 err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan); 475 mutex_unlock(&ptp->pincfg_mux); 476 break; 477 478 case PTP_MASK_CLEAR_ALL: 479 bitmap_clear(tsevq->mask, 0, PTP_MAX_CHANNELS); 480 break; 481 482 case PTP_MASK_EN_SINGLE: 483 if (copy_from_user(&i, (void __user *)arg, sizeof(i))) { 484 err = -EFAULT; 485 break; 486 } 487 if (i >= PTP_MAX_CHANNELS) { 488 err = -EFAULT; 489 break; 490 } 491 set_bit(i, tsevq->mask); 492 break; 493 494 default: 495 err = -ENOTTY; 496 break; 497 } 498 499 out: 500 kfree(extoff); 501 kfree(sysoff); 502 return err; 503 } 504 505 __poll_t ptp_poll(struct posix_clock_context *pccontext, struct file *fp, 506 poll_table *wait) 507 { 508 struct ptp_clock *ptp = 509 container_of(pccontext->clk, struct ptp_clock, clock); 510 struct timestamp_event_queue *queue; 511 512 queue = pccontext->private_clkdata; 513 if (!queue) 514 return EPOLLERR; 515 516 poll_wait(fp, &ptp->tsev_wq, wait); 517 518 return queue_cnt(queue) ? EPOLLIN : 0; 519 } 520 521 #define EXTTS_BUFSIZE (PTP_BUF_TIMESTAMPS * sizeof(struct ptp_extts_event)) 522 523 ssize_t ptp_read(struct posix_clock_context *pccontext, uint rdflags, 524 char __user *buf, size_t cnt) 525 { 526 struct ptp_clock *ptp = 527 container_of(pccontext->clk, struct ptp_clock, clock); 528 struct timestamp_event_queue *queue; 529 struct ptp_extts_event *event; 530 unsigned long flags; 531 size_t qcnt, i; 532 int result; 533 534 queue = pccontext->private_clkdata; 535 if (!queue) { 536 result = -EINVAL; 537 goto exit; 538 } 539 540 if (cnt % sizeof(struct ptp_extts_event) != 0) { 541 result = -EINVAL; 542 goto exit; 543 } 544 545 if (cnt > EXTTS_BUFSIZE) 546 cnt = EXTTS_BUFSIZE; 547 548 cnt = cnt / sizeof(struct ptp_extts_event); 549 550 if (wait_event_interruptible(ptp->tsev_wq, 551 ptp->defunct || queue_cnt(queue))) { 552 return -ERESTARTSYS; 553 } 554 555 if (ptp->defunct) { 556 result = -ENODEV; 557 goto exit; 558 } 559 560 event = kmalloc(EXTTS_BUFSIZE, GFP_KERNEL); 561 if (!event) { 562 result = -ENOMEM; 563 goto exit; 564 } 565 566 spin_lock_irqsave(&queue->lock, flags); 567 568 qcnt = queue_cnt(queue); 569 570 if (cnt > qcnt) 571 cnt = qcnt; 572 573 for (i = 0; i < cnt; i++) { 574 event[i] = queue->buf[queue->head]; 575 /* Paired with READ_ONCE() in queue_cnt() */ 576 WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS); 577 } 578 579 spin_unlock_irqrestore(&queue->lock, flags); 580 581 cnt = cnt * sizeof(struct ptp_extts_event); 582 583 result = cnt; 584 if (copy_to_user(buf, event, cnt)) { 585 result = -EFAULT; 586 goto free_event; 587 } 588 589 free_event: 590 kfree(event); 591 exit: 592 return result; 593 } 594