1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * TI Common Platform Time Sync 4 * 5 * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com> 6 * 7 */ 8 #include <linux/clk-provider.h> 9 #include <linux/err.h> 10 #include <linux/if.h> 11 #include <linux/hrtimer.h> 12 #include <linux/module.h> 13 #include <linux/net_tstamp.h> 14 #include <linux/ptp_classify.h> 15 #include <linux/time.h> 16 #include <linux/uaccess.h> 17 #include <linux/workqueue.h> 18 #include <linux/if_ether.h> 19 #include <linux/if_vlan.h> 20 21 #include "cpts.h" 22 23 #define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */ 24 #define CPTS_SKB_RX_TX_TMO 100 /*ms */ 25 #define CPTS_EVENT_RX_TX_TIMEOUT (100) /* ms */ 26 27 struct cpts_skb_cb_data { 28 u32 skb_mtype_seqid; 29 unsigned long tmo; 30 }; 31 32 #define cpts_read32(c, r) readl_relaxed(&c->reg->r) 33 #define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r) 34 35 static int cpts_event_port(struct cpts_event *event) 36 { 37 return (event->high >> PORT_NUMBER_SHIFT) & PORT_NUMBER_MASK; 38 } 39 40 static int event_expired(struct cpts_event *event) 41 { 42 return time_after(jiffies, event->tmo); 43 } 44 45 static int event_type(struct cpts_event *event) 46 { 47 return (event->high >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; 48 } 49 50 static int cpts_fifo_pop(struct cpts *cpts, u32 *high, u32 *low) 51 { 52 u32 r = cpts_read32(cpts, intstat_raw); 53 54 if (r & TS_PEND_RAW) { 55 *high = cpts_read32(cpts, event_high); 56 *low = cpts_read32(cpts, event_low); 57 cpts_write32(cpts, EVENT_POP, event_pop); 58 return 0; 59 } 60 return -1; 61 } 62 63 static int cpts_purge_events(struct cpts *cpts) 64 { 65 struct list_head *this, *next; 66 struct cpts_event *event; 67 int removed = 0; 68 69 list_for_each_safe(this, next, &cpts->events) { 70 event = list_entry(this, struct cpts_event, list); 71 if (event_expired(event)) { 72 list_del_init(&event->list); 73 list_add(&event->list, &cpts->pool); 74 ++removed; 75 } 76 } 77 78 if (removed) 79 dev_dbg(cpts->dev, "cpts: event pool cleaned up %d\n", removed); 80 return removed ? 0 : -1; 81 } 82 83 static void cpts_purge_txq(struct cpts *cpts) 84 { 85 struct cpts_skb_cb_data *skb_cb; 86 struct sk_buff *skb, *tmp; 87 int removed = 0; 88 89 skb_queue_walk_safe(&cpts->txq, skb, tmp) { 90 skb_cb = (struct cpts_skb_cb_data *)skb->cb; 91 if (time_after(jiffies, skb_cb->tmo)) { 92 __skb_unlink(skb, &cpts->txq); 93 dev_consume_skb_any(skb); 94 ++removed; 95 } 96 } 97 98 if (removed) 99 dev_dbg(cpts->dev, "txq cleaned up %d\n", removed); 100 } 101 102 /* 103 * Returns zero if matching event type was found. 104 */ 105 static int cpts_fifo_read(struct cpts *cpts, int match) 106 { 107 struct ptp_clock_event pevent; 108 bool need_schedule = false; 109 struct cpts_event *event; 110 unsigned long flags; 111 int i, type = -1; 112 u32 hi, lo; 113 114 spin_lock_irqsave(&cpts->lock, flags); 115 116 for (i = 0; i < CPTS_FIFO_DEPTH; i++) { 117 if (cpts_fifo_pop(cpts, &hi, &lo)) 118 break; 119 120 if (list_empty(&cpts->pool) && cpts_purge_events(cpts)) { 121 dev_warn(cpts->dev, "cpts: event pool empty\n"); 122 break; 123 } 124 125 event = list_first_entry(&cpts->pool, struct cpts_event, list); 126 event->high = hi; 127 event->low = lo; 128 event->timestamp = timecounter_cyc2time(&cpts->tc, event->low); 129 type = event_type(event); 130 131 dev_dbg(cpts->dev, "CPTS_EV: %d high:%08X low:%08x\n", 132 type, event->high, event->low); 133 switch (type) { 134 case CPTS_EV_PUSH: 135 WRITE_ONCE(cpts->cur_timestamp, lo); 136 timecounter_read(&cpts->tc); 137 if (cpts->mult_new) { 138 cpts->cc.mult = cpts->mult_new; 139 cpts->mult_new = 0; 140 } 141 if (!cpts->irq_poll) 142 complete(&cpts->ts_push_complete); 143 break; 144 case CPTS_EV_TX: 145 case CPTS_EV_RX: 146 event->tmo = jiffies + 147 msecs_to_jiffies(CPTS_EVENT_RX_TX_TIMEOUT); 148 149 list_del_init(&event->list); 150 list_add_tail(&event->list, &cpts->events); 151 need_schedule = true; 152 break; 153 case CPTS_EV_ROLL: 154 case CPTS_EV_HALF: 155 break; 156 case CPTS_EV_HW: 157 pevent.timestamp = event->timestamp; 158 pevent.type = PTP_CLOCK_EXTTS; 159 pevent.index = cpts_event_port(event) - 1; 160 ptp_clock_event(cpts->clock, &pevent); 161 break; 162 default: 163 dev_err(cpts->dev, "cpts: unknown event type\n"); 164 break; 165 } 166 if (type == match) 167 break; 168 } 169 170 spin_unlock_irqrestore(&cpts->lock, flags); 171 172 if (!cpts->irq_poll && need_schedule) 173 ptp_schedule_worker(cpts->clock, 0); 174 175 return type == match ? 0 : -1; 176 } 177 178 void cpts_misc_interrupt(struct cpts *cpts) 179 { 180 cpts_fifo_read(cpts, -1); 181 } 182 EXPORT_SYMBOL_GPL(cpts_misc_interrupt); 183 184 static u64 cpts_systim_read(const struct cyclecounter *cc) 185 { 186 struct cpts *cpts = container_of(cc, struct cpts, cc); 187 188 return READ_ONCE(cpts->cur_timestamp); 189 } 190 191 static void cpts_update_cur_time(struct cpts *cpts, int match, 192 struct ptp_system_timestamp *sts) 193 { 194 unsigned long flags; 195 196 reinit_completion(&cpts->ts_push_complete); 197 198 /* use spin_lock_irqsave() here as it has to run very fast */ 199 spin_lock_irqsave(&cpts->lock, flags); 200 ptp_read_system_prets(sts); 201 cpts_write32(cpts, TS_PUSH, ts_push); 202 cpts_read32(cpts, ts_push); 203 ptp_read_system_postts(sts); 204 spin_unlock_irqrestore(&cpts->lock, flags); 205 206 if (cpts->irq_poll && cpts_fifo_read(cpts, match) && match != -1) 207 dev_err(cpts->dev, "cpts: unable to obtain a time stamp\n"); 208 209 if (!cpts->irq_poll && 210 !wait_for_completion_timeout(&cpts->ts_push_complete, HZ)) 211 dev_err(cpts->dev, "cpts: obtain a time stamp timeout\n"); 212 } 213 214 /* PTP clock operations */ 215 216 static int cpts_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 217 { 218 struct cpts *cpts = container_of(ptp, struct cpts, info); 219 220 mutex_lock(&cpts->ptp_clk_mutex); 221 222 cpts->mult_new = adjust_by_scaled_ppm(cpts->cc_mult, scaled_ppm); 223 224 cpts_update_cur_time(cpts, CPTS_EV_PUSH, NULL); 225 226 mutex_unlock(&cpts->ptp_clk_mutex); 227 return 0; 228 } 229 230 static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 231 { 232 struct cpts *cpts = container_of(ptp, struct cpts, info); 233 234 mutex_lock(&cpts->ptp_clk_mutex); 235 timecounter_adjtime(&cpts->tc, delta); 236 mutex_unlock(&cpts->ptp_clk_mutex); 237 238 return 0; 239 } 240 241 static int cpts_ptp_gettimeex(struct ptp_clock_info *ptp, 242 struct timespec64 *ts, 243 struct ptp_system_timestamp *sts) 244 { 245 struct cpts *cpts = container_of(ptp, struct cpts, info); 246 u64 ns; 247 248 mutex_lock(&cpts->ptp_clk_mutex); 249 250 cpts_update_cur_time(cpts, CPTS_EV_PUSH, sts); 251 252 ns = timecounter_read(&cpts->tc); 253 mutex_unlock(&cpts->ptp_clk_mutex); 254 255 *ts = ns_to_timespec64(ns); 256 257 return 0; 258 } 259 260 static int cpts_ptp_settime(struct ptp_clock_info *ptp, 261 const struct timespec64 *ts) 262 { 263 struct cpts *cpts = container_of(ptp, struct cpts, info); 264 u64 ns; 265 266 ns = timespec64_to_ns(ts); 267 268 mutex_lock(&cpts->ptp_clk_mutex); 269 timecounter_init(&cpts->tc, &cpts->cc, ns); 270 mutex_unlock(&cpts->ptp_clk_mutex); 271 272 return 0; 273 } 274 275 static int cpts_extts_enable(struct cpts *cpts, u32 index, int on) 276 { 277 u32 v; 278 279 if (((cpts->hw_ts_enable & BIT(index)) >> index) == on) 280 return 0; 281 282 mutex_lock(&cpts->ptp_clk_mutex); 283 284 v = cpts_read32(cpts, control); 285 if (on) { 286 v |= BIT(8 + index); 287 cpts->hw_ts_enable |= BIT(index); 288 } else { 289 v &= ~BIT(8 + index); 290 cpts->hw_ts_enable &= ~BIT(index); 291 } 292 cpts_write32(cpts, v, control); 293 294 mutex_unlock(&cpts->ptp_clk_mutex); 295 296 return 0; 297 } 298 299 static int cpts_ptp_enable(struct ptp_clock_info *ptp, 300 struct ptp_clock_request *rq, int on) 301 { 302 struct cpts *cpts = container_of(ptp, struct cpts, info); 303 304 switch (rq->type) { 305 case PTP_CLK_REQ_EXTTS: 306 return cpts_extts_enable(cpts, rq->extts.index, on); 307 default: 308 break; 309 } 310 311 return -EOPNOTSUPP; 312 } 313 314 static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event) 315 { 316 struct sk_buff_head txq_list; 317 struct sk_buff *skb, *tmp; 318 unsigned long flags; 319 bool found = false; 320 u32 mtype_seqid; 321 322 mtype_seqid = event->high & 323 ((MESSAGE_TYPE_MASK << MESSAGE_TYPE_SHIFT) | 324 (SEQUENCE_ID_MASK << SEQUENCE_ID_SHIFT) | 325 (EVENT_TYPE_MASK << EVENT_TYPE_SHIFT)); 326 327 __skb_queue_head_init(&txq_list); 328 329 spin_lock_irqsave(&cpts->txq.lock, flags); 330 skb_queue_splice_init(&cpts->txq, &txq_list); 331 spin_unlock_irqrestore(&cpts->txq.lock, flags); 332 333 skb_queue_walk_safe(&txq_list, skb, tmp) { 334 struct skb_shared_hwtstamps ssh; 335 struct cpts_skb_cb_data *skb_cb = 336 (struct cpts_skb_cb_data *)skb->cb; 337 338 if (mtype_seqid == skb_cb->skb_mtype_seqid) { 339 memset(&ssh, 0, sizeof(ssh)); 340 ssh.hwtstamp = ns_to_ktime(event->timestamp); 341 skb_tstamp_tx(skb, &ssh); 342 found = true; 343 __skb_unlink(skb, &txq_list); 344 dev_consume_skb_any(skb); 345 dev_dbg(cpts->dev, "match tx timestamp mtype_seqid %08x\n", 346 mtype_seqid); 347 break; 348 } 349 350 if (time_after(jiffies, skb_cb->tmo)) { 351 /* timeout any expired skbs over 1s */ 352 dev_dbg(cpts->dev, "expiring tx timestamp from txq\n"); 353 __skb_unlink(skb, &txq_list); 354 dev_consume_skb_any(skb); 355 } 356 } 357 358 spin_lock_irqsave(&cpts->txq.lock, flags); 359 skb_queue_splice(&txq_list, &cpts->txq); 360 spin_unlock_irqrestore(&cpts->txq.lock, flags); 361 362 return found; 363 } 364 365 static void cpts_process_events(struct cpts *cpts) 366 { 367 struct list_head *this, *next; 368 struct cpts_event *event; 369 LIST_HEAD(events_free); 370 unsigned long flags; 371 LIST_HEAD(events); 372 373 spin_lock_irqsave(&cpts->lock, flags); 374 list_splice_init(&cpts->events, &events); 375 spin_unlock_irqrestore(&cpts->lock, flags); 376 377 list_for_each_safe(this, next, &events) { 378 event = list_entry(this, struct cpts_event, list); 379 if (cpts_match_tx_ts(cpts, event) || 380 time_after(jiffies, event->tmo)) { 381 list_del_init(&event->list); 382 list_add(&event->list, &events_free); 383 } 384 } 385 386 spin_lock_irqsave(&cpts->lock, flags); 387 list_splice_tail(&events, &cpts->events); 388 list_splice_tail(&events_free, &cpts->pool); 389 spin_unlock_irqrestore(&cpts->lock, flags); 390 } 391 392 static long cpts_overflow_check(struct ptp_clock_info *ptp) 393 { 394 struct cpts *cpts = container_of(ptp, struct cpts, info); 395 unsigned long delay = cpts->ov_check_period; 396 unsigned long flags; 397 u64 ns; 398 399 mutex_lock(&cpts->ptp_clk_mutex); 400 401 cpts_update_cur_time(cpts, -1, NULL); 402 ns = timecounter_read(&cpts->tc); 403 404 cpts_process_events(cpts); 405 406 spin_lock_irqsave(&cpts->txq.lock, flags); 407 if (!skb_queue_empty(&cpts->txq)) { 408 cpts_purge_txq(cpts); 409 if (!skb_queue_empty(&cpts->txq)) 410 delay = CPTS_SKB_TX_WORK_TIMEOUT; 411 } 412 spin_unlock_irqrestore(&cpts->txq.lock, flags); 413 414 dev_dbg(cpts->dev, "cpts overflow check at %lld\n", ns); 415 mutex_unlock(&cpts->ptp_clk_mutex); 416 return (long)delay; 417 } 418 419 static const struct ptp_clock_info cpts_info = { 420 .owner = THIS_MODULE, 421 .name = "CTPS timer", 422 .max_adj = 1000000, 423 .n_ext_ts = 0, 424 .n_pins = 0, 425 .pps = 0, 426 .adjfine = cpts_ptp_adjfine, 427 .adjtime = cpts_ptp_adjtime, 428 .gettimex64 = cpts_ptp_gettimeex, 429 .settime64 = cpts_ptp_settime, 430 .enable = cpts_ptp_enable, 431 .do_aux_work = cpts_overflow_check, 432 }; 433 434 static int cpts_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid) 435 { 436 unsigned int ptp_class = ptp_classify_raw(skb); 437 struct ptp_header *hdr; 438 u8 msgtype; 439 u16 seqid; 440 441 if (ptp_class == PTP_CLASS_NONE) 442 return 0; 443 444 hdr = ptp_parse_header(skb, ptp_class); 445 if (!hdr) 446 return 0; 447 448 msgtype = ptp_get_msgtype(hdr, ptp_class); 449 seqid = ntohs(hdr->sequence_id); 450 451 *mtype_seqid = (msgtype & MESSAGE_TYPE_MASK) << MESSAGE_TYPE_SHIFT; 452 *mtype_seqid |= (seqid & SEQUENCE_ID_MASK) << SEQUENCE_ID_SHIFT; 453 454 return 1; 455 } 456 457 static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, 458 int ev_type, u32 skb_mtype_seqid) 459 { 460 struct list_head *this, *next; 461 struct cpts_event *event; 462 unsigned long flags; 463 u32 mtype_seqid; 464 u64 ns = 0; 465 466 cpts_fifo_read(cpts, -1); 467 spin_lock_irqsave(&cpts->lock, flags); 468 list_for_each_safe(this, next, &cpts->events) { 469 event = list_entry(this, struct cpts_event, list); 470 if (event_expired(event)) { 471 list_del_init(&event->list); 472 list_add(&event->list, &cpts->pool); 473 continue; 474 } 475 476 mtype_seqid = event->high & 477 ((MESSAGE_TYPE_MASK << MESSAGE_TYPE_SHIFT) | 478 (SEQUENCE_ID_MASK << SEQUENCE_ID_SHIFT) | 479 (EVENT_TYPE_MASK << EVENT_TYPE_SHIFT)); 480 481 if (mtype_seqid == skb_mtype_seqid) { 482 ns = event->timestamp; 483 list_del_init(&event->list); 484 list_add(&event->list, &cpts->pool); 485 break; 486 } 487 } 488 spin_unlock_irqrestore(&cpts->lock, flags); 489 490 return ns; 491 } 492 493 void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb) 494 { 495 struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb; 496 struct skb_shared_hwtstamps *ssh; 497 int ret; 498 u64 ns; 499 500 /* cpts_rx_timestamp() is called before eth_type_trans(), so 501 * skb MAC Hdr properties are not configured yet. Hence need to 502 * reset skb MAC header here 503 */ 504 skb_reset_mac_header(skb); 505 ret = cpts_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid); 506 if (!ret) 507 return; 508 509 skb_cb->skb_mtype_seqid |= (CPTS_EV_RX << EVENT_TYPE_SHIFT); 510 511 dev_dbg(cpts->dev, "%s mtype seqid %08x\n", 512 __func__, skb_cb->skb_mtype_seqid); 513 514 ns = cpts_find_ts(cpts, skb, CPTS_EV_RX, skb_cb->skb_mtype_seqid); 515 if (!ns) 516 return; 517 ssh = skb_hwtstamps(skb); 518 memset(ssh, 0, sizeof(*ssh)); 519 ssh->hwtstamp = ns_to_ktime(ns); 520 } 521 EXPORT_SYMBOL_GPL(cpts_rx_timestamp); 522 523 void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb) 524 { 525 struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb; 526 int ret; 527 528 if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) 529 return; 530 531 ret = cpts_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid); 532 if (!ret) 533 return; 534 535 skb_cb->skb_mtype_seqid |= (CPTS_EV_TX << EVENT_TYPE_SHIFT); 536 537 dev_dbg(cpts->dev, "%s mtype seqid %08x\n", 538 __func__, skb_cb->skb_mtype_seqid); 539 540 /* Always defer TX TS processing to PTP worker */ 541 skb_get(skb); 542 /* get the timestamp for timeouts */ 543 skb_cb->tmo = jiffies + msecs_to_jiffies(CPTS_SKB_RX_TX_TMO); 544 skb_queue_tail(&cpts->txq, skb); 545 ptp_schedule_worker(cpts->clock, 0); 546 } 547 EXPORT_SYMBOL_GPL(cpts_tx_timestamp); 548 549 int cpts_register(struct cpts *cpts) 550 { 551 int err, i; 552 553 skb_queue_head_init(&cpts->txq); 554 INIT_LIST_HEAD(&cpts->events); 555 INIT_LIST_HEAD(&cpts->pool); 556 for (i = 0; i < CPTS_MAX_EVENTS; i++) 557 list_add(&cpts->pool_data[i].list, &cpts->pool); 558 559 err = clk_enable(cpts->refclk); 560 if (err) 561 return err; 562 563 cpts_write32(cpts, CPTS_EN, control); 564 cpts_write32(cpts, TS_PEND_EN, int_enable); 565 566 timecounter_init(&cpts->tc, &cpts->cc, ktime_get_real_ns()); 567 568 cpts->clock = ptp_clock_register(&cpts->info, cpts->dev); 569 if (IS_ERR(cpts->clock)) { 570 err = PTR_ERR(cpts->clock); 571 cpts->clock = NULL; 572 goto err_ptp; 573 } 574 cpts->phc_index = ptp_clock_index(cpts->clock); 575 576 ptp_schedule_worker(cpts->clock, cpts->ov_check_period); 577 return 0; 578 579 err_ptp: 580 clk_disable(cpts->refclk); 581 return err; 582 } 583 EXPORT_SYMBOL_GPL(cpts_register); 584 585 void cpts_unregister(struct cpts *cpts) 586 { 587 if (WARN_ON(!cpts->clock)) 588 return; 589 590 ptp_clock_unregister(cpts->clock); 591 cpts->clock = NULL; 592 cpts->phc_index = -1; 593 594 cpts_write32(cpts, 0, int_enable); 595 cpts_write32(cpts, 0, control); 596 597 /* Drop all packet */ 598 skb_queue_purge(&cpts->txq); 599 600 clk_disable(cpts->refclk); 601 } 602 EXPORT_SYMBOL_GPL(cpts_unregister); 603 604 static void cpts_calc_mult_shift(struct cpts *cpts) 605 { 606 u64 frac, maxsec, ns; 607 u32 freq; 608 609 freq = clk_get_rate(cpts->refclk); 610 611 /* Calc the maximum number of seconds which we can run before 612 * wrapping around. 613 */ 614 maxsec = cpts->cc.mask; 615 do_div(maxsec, freq); 616 /* limit conversation rate to 10 sec as higher values will produce 617 * too small mult factors and so reduce the conversion accuracy 618 */ 619 if (maxsec > 10) 620 maxsec = 10; 621 622 /* Calc overflow check period (maxsec / 2) */ 623 cpts->ov_check_period = (HZ * maxsec) / 2; 624 dev_info(cpts->dev, "cpts: overflow check period %lu (jiffies)\n", 625 cpts->ov_check_period); 626 627 if (cpts->cc.mult || cpts->cc.shift) 628 return; 629 630 clocks_calc_mult_shift(&cpts->cc.mult, &cpts->cc.shift, 631 freq, NSEC_PER_SEC, maxsec); 632 633 frac = 0; 634 ns = cyclecounter_cyc2ns(&cpts->cc, freq, cpts->cc.mask, &frac); 635 636 dev_info(cpts->dev, 637 "CPTS: ref_clk_freq:%u calc_mult:%u calc_shift:%u error:%lld nsec/sec\n", 638 freq, cpts->cc.mult, cpts->cc.shift, (ns - NSEC_PER_SEC)); 639 } 640 641 static void cpts_clk_unregister(void *clk) 642 { 643 clk_hw_unregister_mux(clk); 644 } 645 646 static void cpts_clk_del_provider(void *np) 647 { 648 of_clk_del_provider(np); 649 } 650 651 static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node) 652 { 653 struct device_node *refclk_np; 654 const char **parent_names; 655 unsigned int num_parents; 656 struct clk_hw *clk_hw; 657 int ret = -EINVAL; 658 u32 *mux_table; 659 660 refclk_np = of_get_child_by_name(node, "cpts-refclk-mux"); 661 if (!refclk_np) 662 /* refclk selection supported not for all SoCs */ 663 return 0; 664 665 num_parents = of_clk_get_parent_count(refclk_np); 666 if (num_parents < 1) { 667 dev_err(cpts->dev, "mux-clock %s must have parents\n", 668 refclk_np->name); 669 goto mux_fail; 670 } 671 672 parent_names = devm_kcalloc(cpts->dev, num_parents, 673 sizeof(*parent_names), GFP_KERNEL); 674 675 mux_table = devm_kcalloc(cpts->dev, num_parents, sizeof(*mux_table), 676 GFP_KERNEL); 677 if (!mux_table || !parent_names) { 678 ret = -ENOMEM; 679 goto mux_fail; 680 } 681 682 of_clk_parent_fill(refclk_np, parent_names, num_parents); 683 684 ret = of_property_read_variable_u32_array(refclk_np, "ti,mux-tbl", 685 mux_table, 686 num_parents, num_parents); 687 if (ret < 0) 688 goto mux_fail; 689 690 clk_hw = clk_hw_register_mux_table(cpts->dev, refclk_np->name, 691 parent_names, num_parents, 692 0, 693 &cpts->reg->rftclk_sel, 0, 0x1F, 694 0, mux_table, NULL); 695 if (IS_ERR(clk_hw)) { 696 ret = PTR_ERR(clk_hw); 697 goto mux_fail; 698 } 699 700 ret = devm_add_action_or_reset(cpts->dev, cpts_clk_unregister, clk_hw); 701 if (ret) { 702 dev_err(cpts->dev, "add clkmux unreg action %d", ret); 703 goto mux_fail; 704 } 705 706 ret = of_clk_add_hw_provider(refclk_np, of_clk_hw_simple_get, clk_hw); 707 if (ret) 708 goto mux_fail; 709 710 ret = devm_add_action_or_reset(cpts->dev, cpts_clk_del_provider, 711 refclk_np); 712 if (ret) { 713 dev_err(cpts->dev, "add clkmux provider unreg action %d", ret); 714 goto mux_fail; 715 } 716 717 return ret; 718 719 mux_fail: 720 of_node_put(refclk_np); 721 return ret; 722 } 723 724 static int cpts_of_parse(struct cpts *cpts, struct device_node *node) 725 { 726 int ret = -EINVAL; 727 u32 prop; 728 729 if (!of_property_read_u32(node, "cpts_clock_mult", &prop)) 730 cpts->cc.mult = prop; 731 732 if (!of_property_read_u32(node, "cpts_clock_shift", &prop)) 733 cpts->cc.shift = prop; 734 735 if ((cpts->cc.mult && !cpts->cc.shift) || 736 (!cpts->cc.mult && cpts->cc.shift)) 737 goto of_error; 738 739 return cpts_of_mux_clk_setup(cpts, node); 740 741 of_error: 742 dev_err(cpts->dev, "CPTS: Missing property in the DT.\n"); 743 return ret; 744 } 745 746 struct cpts *cpts_create(struct device *dev, void __iomem *regs, 747 struct device_node *node, u32 n_ext_ts) 748 { 749 struct cpts *cpts; 750 int ret; 751 752 cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL); 753 if (!cpts) 754 return ERR_PTR(-ENOMEM); 755 756 cpts->dev = dev; 757 cpts->reg = (struct cpsw_cpts __iomem *)regs; 758 cpts->irq_poll = true; 759 spin_lock_init(&cpts->lock); 760 mutex_init(&cpts->ptp_clk_mutex); 761 init_completion(&cpts->ts_push_complete); 762 763 ret = cpts_of_parse(cpts, node); 764 if (ret) 765 return ERR_PTR(ret); 766 767 cpts->refclk = devm_get_clk_from_child(dev, node, "cpts"); 768 if (IS_ERR(cpts->refclk)) 769 /* try get clk from dev node for compatibility */ 770 cpts->refclk = devm_clk_get(dev, "cpts"); 771 772 if (IS_ERR(cpts->refclk)) { 773 dev_err(dev, "Failed to get cpts refclk %ld\n", 774 PTR_ERR(cpts->refclk)); 775 return ERR_CAST(cpts->refclk); 776 } 777 778 ret = clk_prepare(cpts->refclk); 779 if (ret) 780 return ERR_PTR(ret); 781 782 cpts->cc.read = cpts_systim_read; 783 cpts->cc.mask = CLOCKSOURCE_MASK(32); 784 cpts->info = cpts_info; 785 cpts->phc_index = -1; 786 787 if (n_ext_ts) 788 cpts->info.n_ext_ts = n_ext_ts; 789 790 cpts_calc_mult_shift(cpts); 791 /* save cc.mult original value as it can be modified 792 * by cpts_ptp_adjfine(). 793 */ 794 cpts->cc_mult = cpts->cc.mult; 795 796 return cpts; 797 } 798 EXPORT_SYMBOL_GPL(cpts_create); 799 800 void cpts_release(struct cpts *cpts) 801 { 802 if (!cpts) 803 return; 804 805 if (WARN_ON(!cpts->refclk)) 806 return; 807 808 clk_unprepare(cpts->refclk); 809 } 810 EXPORT_SYMBOL_GPL(cpts_release); 811 812 MODULE_LICENSE("GPL v2"); 813 MODULE_DESCRIPTION("TI CPTS driver"); 814 MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>"); 815