1 // SPDX-License-Identifier: GPL-2.0 2 /* TI K3 AM65x Common Platform Time Sync 3 * 4 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com 5 * 6 */ 7 8 #include <linux/clk.h> 9 #include <linux/clk-provider.h> 10 #include <linux/err.h> 11 #include <linux/if_vlan.h> 12 #include <linux/interrupt.h> 13 #include <linux/module.h> 14 #include <linux/netdevice.h> 15 #include <linux/net_tstamp.h> 16 #include <linux/of.h> 17 #include <linux/of_irq.h> 18 #include <linux/platform_device.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/ptp_classify.h> 21 #include <linux/ptp_clock_kernel.h> 22 23 #include "am65-cpts.h" 24 25 struct am65_genf_regs { 26 u32 comp_lo; /* Comparison Low Value 0:31 */ 27 u32 comp_hi; /* Comparison High Value 32:63 */ 28 u32 control; /* control */ 29 u32 length; /* Length */ 30 u32 ppm_low; /* PPM Load Low Value 0:31 */ 31 u32 ppm_hi; /* PPM Load High Value 32:63 */ 32 u32 ts_nudge; /* Nudge value */ 33 } __aligned(32) __packed; 34 35 #define AM65_CPTS_GENF_MAX_NUM 9 36 #define AM65_CPTS_ESTF_MAX_NUM 8 37 38 struct am65_cpts_regs { 39 u32 idver; /* Identification and version */ 40 u32 control; /* Time sync control */ 41 u32 rftclk_sel; /* Reference Clock Select Register */ 42 u32 ts_push; /* Time stamp event push */ 43 u32 ts_load_val_lo; /* Time Stamp Load Low Value 0:31 */ 44 u32 ts_load_en; /* Time stamp load enable */ 45 u32 ts_comp_lo; /* Time Stamp Comparison Low Value 0:31 */ 46 u32 ts_comp_length; /* Time Stamp Comparison Length */ 47 u32 intstat_raw; /* Time sync interrupt status raw */ 48 u32 intstat_masked; /* Time sync interrupt status masked */ 49 u32 int_enable; /* Time sync interrupt enable */ 50 u32 ts_comp_nudge; /* Time Stamp Comparison Nudge Value */ 51 u32 event_pop; /* Event interrupt pop */ 52 u32 event_0; /* Event Time Stamp lo 0:31 */ 53 u32 event_1; /* Event Type Fields */ 54 u32 event_2; /* Event Type Fields domain */ 55 u32 event_3; /* Event Time Stamp hi 32:63 */ 56 u32 ts_load_val_hi; /* Time Stamp Load High Value 32:63 */ 57 u32 ts_comp_hi; /* Time Stamp Comparison High Value 32:63 */ 58 u32 ts_add_val; /* Time Stamp Add value */ 59 u32 ts_ppm_low; /* Time Stamp PPM Load Low Value 0:31 */ 60 u32 ts_ppm_hi; /* Time Stamp PPM Load High Value 32:63 */ 61 u32 ts_nudge; /* Time Stamp Nudge value */ 62 u32 reserv[33]; 63 struct am65_genf_regs genf[AM65_CPTS_GENF_MAX_NUM]; 64 struct am65_genf_regs estf[AM65_CPTS_ESTF_MAX_NUM]; 65 }; 66 67 /* CONTROL_REG */ 68 #define AM65_CPTS_CONTROL_EN BIT(0) 69 #define AM65_CPTS_CONTROL_INT_TEST BIT(1) 70 #define AM65_CPTS_CONTROL_TS_COMP_POLARITY BIT(2) 71 #define AM65_CPTS_CONTROL_TSTAMP_EN BIT(3) 72 #define AM65_CPTS_CONTROL_SEQUENCE_EN BIT(4) 73 #define AM65_CPTS_CONTROL_64MODE BIT(5) 74 #define AM65_CPTS_CONTROL_TS_COMP_TOG BIT(6) 75 #define AM65_CPTS_CONTROL_TS_PPM_DIR BIT(7) 76 #define AM65_CPTS_CONTROL_HW1_TS_PUSH_EN BIT(8) 77 #define AM65_CPTS_CONTROL_HW2_TS_PUSH_EN BIT(9) 78 #define AM65_CPTS_CONTROL_HW3_TS_PUSH_EN BIT(10) 79 #define AM65_CPTS_CONTROL_HW4_TS_PUSH_EN BIT(11) 80 #define AM65_CPTS_CONTROL_HW5_TS_PUSH_EN BIT(12) 81 #define AM65_CPTS_CONTROL_HW6_TS_PUSH_EN BIT(13) 82 #define AM65_CPTS_CONTROL_HW7_TS_PUSH_EN BIT(14) 83 #define AM65_CPTS_CONTROL_HW8_TS_PUSH_EN BIT(15) 84 #define AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET (8) 85 86 #define AM65_CPTS_CONTROL_TX_GENF_CLR_EN BIT(17) 87 88 #define AM65_CPTS_CONTROL_TS_SYNC_SEL_MASK (0xF) 89 #define AM65_CPTS_CONTROL_TS_SYNC_SEL_SHIFT (28) 90 91 /* RFTCLK_SEL_REG */ 92 #define AM65_CPTS_RFTCLK_SEL_MASK (0x1F) 93 94 /* TS_PUSH_REG */ 95 #define AM65_CPTS_TS_PUSH BIT(0) 96 97 /* TS_LOAD_EN_REG */ 98 #define AM65_CPTS_TS_LOAD_EN BIT(0) 99 100 /* INTSTAT_RAW_REG */ 101 #define AM65_CPTS_INTSTAT_RAW_TS_PEND BIT(0) 102 103 /* INTSTAT_MASKED_REG */ 104 #define AM65_CPTS_INTSTAT_MASKED_TS_PEND BIT(0) 105 106 /* INT_ENABLE_REG */ 107 #define AM65_CPTS_INT_ENABLE_TS_PEND_EN BIT(0) 108 109 /* TS_COMP_NUDGE_REG */ 110 #define AM65_CPTS_TS_COMP_NUDGE_MASK (0xFF) 111 112 /* EVENT_POP_REG */ 113 #define AM65_CPTS_EVENT_POP BIT(0) 114 115 /* EVENT_1_REG */ 116 #define AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK GENMASK(15, 0) 117 118 #define AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK GENMASK(19, 16) 119 #define AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT (16) 120 121 #define AM65_CPTS_EVENT_1_EVENT_TYPE_MASK GENMASK(23, 20) 122 #define AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT (20) 123 124 #define AM65_CPTS_EVENT_1_PORT_NUMBER_MASK GENMASK(28, 24) 125 #define AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT (24) 126 127 /* EVENT_2_REG */ 128 #define AM65_CPTS_EVENT_2_REG_DOMAIN_MASK (0xFF) 129 #define AM65_CPTS_EVENT_2_REG_DOMAIN_SHIFT (0) 130 131 enum { 132 AM65_CPTS_EV_PUSH, /* Time Stamp Push Event */ 133 AM65_CPTS_EV_ROLL, /* Time Stamp Rollover Event */ 134 AM65_CPTS_EV_HALF, /* Time Stamp Half Rollover Event */ 135 AM65_CPTS_EV_HW, /* Hardware Time Stamp Push Event */ 136 AM65_CPTS_EV_RX, /* Ethernet Receive Event */ 137 AM65_CPTS_EV_TX, /* Ethernet Transmit Event */ 138 AM65_CPTS_EV_TS_COMP, /* Time Stamp Compare Event */ 139 AM65_CPTS_EV_HOST, /* Host Transmit Event */ 140 }; 141 142 struct am65_cpts_event { 143 struct list_head list; 144 unsigned long tmo; 145 u32 event1; 146 u32 event2; 147 u64 timestamp; 148 }; 149 150 #define AM65_CPTS_FIFO_DEPTH (16) 151 #define AM65_CPTS_MAX_EVENTS (32) 152 #define AM65_CPTS_EVENT_RX_TX_TIMEOUT (20) /* ms */ 153 #define AM65_CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */ 154 #define AM65_CPTS_MIN_PPM 0x400 155 156 struct am65_cpts { 157 struct device *dev; 158 struct am65_cpts_regs __iomem *reg; 159 struct ptp_clock_info ptp_info; 160 struct ptp_clock *ptp_clock; 161 int phc_index; 162 struct clk_hw *clk_mux_hw; 163 struct device_node *clk_mux_np; 164 struct clk *refclk; 165 u32 refclk_freq; 166 struct list_head events; 167 struct list_head pool; 168 struct am65_cpts_event pool_data[AM65_CPTS_MAX_EVENTS]; 169 spinlock_t lock; /* protects events lists*/ 170 u32 ext_ts_inputs; 171 u32 genf_num; 172 u32 ts_add_val; 173 int irq; 174 struct mutex ptp_clk_lock; /* PHC access sync */ 175 u64 timestamp; 176 u32 genf_enable; 177 u32 hw_ts_enable; 178 u32 estf_enable; 179 struct sk_buff_head txq; 180 bool pps_enabled; 181 bool pps_present; 182 u32 pps_hw_ts_idx; 183 u32 pps_genf_idx; 184 /* context save/restore */ 185 u64 sr_cpts_ns; 186 u64 sr_ktime_ns; 187 u32 sr_control; 188 u32 sr_int_enable; 189 u32 sr_rftclk_sel; 190 u32 sr_ts_ppm_hi; 191 u32 sr_ts_ppm_low; 192 struct am65_genf_regs sr_genf[AM65_CPTS_GENF_MAX_NUM]; 193 struct am65_genf_regs sr_estf[AM65_CPTS_ESTF_MAX_NUM]; 194 }; 195 196 struct am65_cpts_skb_cb_data { 197 unsigned long tmo; 198 u32 skb_mtype_seqid; 199 }; 200 201 #define am65_cpts_write32(c, v, r) writel(v, &(c)->reg->r) 202 #define am65_cpts_read32(c, r) readl(&(c)->reg->r) 203 204 static void am65_cpts_settime(struct am65_cpts *cpts, u64 start_tstamp) 205 { 206 u32 val; 207 208 val = upper_32_bits(start_tstamp); 209 am65_cpts_write32(cpts, val, ts_load_val_hi); 210 val = lower_32_bits(start_tstamp); 211 am65_cpts_write32(cpts, val, ts_load_val_lo); 212 213 am65_cpts_write32(cpts, AM65_CPTS_TS_LOAD_EN, ts_load_en); 214 } 215 216 static void am65_cpts_set_add_val(struct am65_cpts *cpts) 217 { 218 /* select coefficient according to the rate */ 219 cpts->ts_add_val = (NSEC_PER_SEC / cpts->refclk_freq - 1) & 0x7; 220 221 am65_cpts_write32(cpts, cpts->ts_add_val, ts_add_val); 222 } 223 224 static void am65_cpts_disable(struct am65_cpts *cpts) 225 { 226 am65_cpts_write32(cpts, 0, control); 227 am65_cpts_write32(cpts, 0, int_enable); 228 } 229 230 static int am65_cpts_event_get_port(struct am65_cpts_event *event) 231 { 232 return (event->event1 & AM65_CPTS_EVENT_1_PORT_NUMBER_MASK) >> 233 AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT; 234 } 235 236 static int am65_cpts_event_get_type(struct am65_cpts_event *event) 237 { 238 return (event->event1 & AM65_CPTS_EVENT_1_EVENT_TYPE_MASK) >> 239 AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT; 240 } 241 242 static int am65_cpts_cpts_purge_events(struct am65_cpts *cpts) 243 { 244 struct list_head *this, *next; 245 struct am65_cpts_event *event; 246 int removed = 0; 247 248 list_for_each_safe(this, next, &cpts->events) { 249 event = list_entry(this, struct am65_cpts_event, list); 250 if (time_after(jiffies, event->tmo)) { 251 list_del_init(&event->list); 252 list_add(&event->list, &cpts->pool); 253 ++removed; 254 } 255 } 256 257 if (removed) 258 dev_dbg(cpts->dev, "event pool cleaned up %d\n", removed); 259 return removed ? 0 : -1; 260 } 261 262 static bool am65_cpts_fifo_pop_event(struct am65_cpts *cpts, 263 struct am65_cpts_event *event) 264 { 265 u32 r = am65_cpts_read32(cpts, intstat_raw); 266 267 if (r & AM65_CPTS_INTSTAT_RAW_TS_PEND) { 268 event->timestamp = am65_cpts_read32(cpts, event_0); 269 event->event1 = am65_cpts_read32(cpts, event_1); 270 event->event2 = am65_cpts_read32(cpts, event_2); 271 event->timestamp |= (u64)am65_cpts_read32(cpts, event_3) << 32; 272 am65_cpts_write32(cpts, AM65_CPTS_EVENT_POP, event_pop); 273 return false; 274 } 275 return true; 276 } 277 278 static int __am65_cpts_fifo_read(struct am65_cpts *cpts) 279 { 280 struct ptp_clock_event pevent; 281 struct am65_cpts_event *event; 282 bool schedule = false; 283 int i, type, ret = 0; 284 285 for (i = 0; i < AM65_CPTS_FIFO_DEPTH; i++) { 286 event = list_first_entry_or_null(&cpts->pool, 287 struct am65_cpts_event, list); 288 289 if (!event) { 290 if (am65_cpts_cpts_purge_events(cpts)) { 291 dev_err(cpts->dev, "cpts: event pool empty\n"); 292 ret = -1; 293 goto out; 294 } 295 continue; 296 } 297 298 if (am65_cpts_fifo_pop_event(cpts, event)) 299 break; 300 301 type = am65_cpts_event_get_type(event); 302 switch (type) { 303 case AM65_CPTS_EV_PUSH: 304 cpts->timestamp = event->timestamp; 305 dev_dbg(cpts->dev, "AM65_CPTS_EV_PUSH t:%llu\n", 306 cpts->timestamp); 307 break; 308 case AM65_CPTS_EV_RX: 309 case AM65_CPTS_EV_TX: 310 event->tmo = jiffies + 311 msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT); 312 313 list_move_tail(&event->list, &cpts->events); 314 315 dev_dbg(cpts->dev, 316 "AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n", 317 event->event1, event->event2, 318 event->timestamp); 319 schedule = true; 320 break; 321 case AM65_CPTS_EV_HW: 322 pevent.index = am65_cpts_event_get_port(event) - 1; 323 pevent.timestamp = event->timestamp; 324 if (cpts->pps_enabled && pevent.index == cpts->pps_hw_ts_idx) { 325 pevent.type = PTP_CLOCK_PPSUSR; 326 pevent.pps_times.ts_real = ns_to_timespec64(pevent.timestamp); 327 } else { 328 pevent.type = PTP_CLOCK_EXTTS; 329 } 330 dev_dbg(cpts->dev, "AM65_CPTS_EV_HW:%s p:%d t:%llu\n", 331 pevent.type == PTP_CLOCK_EXTTS ? 332 "extts" : "pps", 333 pevent.index, event->timestamp); 334 335 ptp_clock_event(cpts->ptp_clock, &pevent); 336 break; 337 case AM65_CPTS_EV_HOST: 338 break; 339 case AM65_CPTS_EV_ROLL: 340 case AM65_CPTS_EV_HALF: 341 case AM65_CPTS_EV_TS_COMP: 342 dev_dbg(cpts->dev, 343 "AM65_CPTS_EVT: %d e1:%08x e2:%08x t:%lld\n", 344 type, 345 event->event1, event->event2, 346 event->timestamp); 347 break; 348 default: 349 dev_err(cpts->dev, "cpts: unknown event type\n"); 350 ret = -1; 351 goto out; 352 } 353 } 354 355 out: 356 if (schedule) 357 ptp_schedule_worker(cpts->ptp_clock, 0); 358 359 return ret; 360 } 361 362 static int am65_cpts_fifo_read(struct am65_cpts *cpts) 363 { 364 unsigned long flags; 365 int ret = 0; 366 367 spin_lock_irqsave(&cpts->lock, flags); 368 ret = __am65_cpts_fifo_read(cpts); 369 spin_unlock_irqrestore(&cpts->lock, flags); 370 371 return ret; 372 } 373 374 static u64 am65_cpts_gettime(struct am65_cpts *cpts, 375 struct ptp_system_timestamp *sts) 376 { 377 unsigned long flags; 378 u64 val = 0; 379 380 /* temporarily disable cpts interrupt to avoid intentional 381 * doubled read. Interrupt can be in-flight - it's Ok. 382 */ 383 am65_cpts_write32(cpts, 0, int_enable); 384 385 /* use spin_lock_irqsave() here as it has to run very fast */ 386 spin_lock_irqsave(&cpts->lock, flags); 387 ptp_read_system_prets(sts); 388 am65_cpts_write32(cpts, AM65_CPTS_TS_PUSH, ts_push); 389 am65_cpts_read32(cpts, ts_push); 390 ptp_read_system_postts(sts); 391 spin_unlock_irqrestore(&cpts->lock, flags); 392 393 am65_cpts_fifo_read(cpts); 394 395 am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable); 396 397 val = cpts->timestamp; 398 399 return val; 400 } 401 402 static irqreturn_t am65_cpts_interrupt(int irq, void *dev_id) 403 { 404 struct am65_cpts *cpts = dev_id; 405 406 if (am65_cpts_fifo_read(cpts)) 407 dev_dbg(cpts->dev, "cpts: unable to obtain a time stamp\n"); 408 409 return IRQ_HANDLED; 410 } 411 412 /* PTP clock operations */ 413 static int am65_cpts_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 414 { 415 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info); 416 u32 estf_ctrl_val = 0, estf_ppm_hi = 0, estf_ppm_low = 0; 417 s32 ppb = scaled_ppm_to_ppb(scaled_ppm); 418 int pps_index = cpts->pps_genf_idx; 419 u64 adj_period, pps_adj_period; 420 u32 ctrl_val, ppm_hi, ppm_low; 421 unsigned long flags; 422 int neg_adj = 0, i; 423 424 if (ppb < 0) { 425 neg_adj = 1; 426 ppb = -ppb; 427 } 428 429 /* base freq = 1GHz = 1 000 000 000 430 * ppb_norm = ppb * base_freq / clock_freq; 431 * ppm_norm = ppb_norm / 1000 432 * adj_period = 1 000 000 / ppm_norm 433 * adj_period = 1 000 000 000 / ppb_norm 434 * adj_period = 1 000 000 000 / (ppb * base_freq / clock_freq) 435 * adj_period = (1 000 000 000 * clock_freq) / (ppb * base_freq) 436 * adj_period = clock_freq / ppb 437 */ 438 adj_period = div_u64(cpts->refclk_freq, ppb); 439 440 mutex_lock(&cpts->ptp_clk_lock); 441 442 ctrl_val = am65_cpts_read32(cpts, control); 443 if (neg_adj) 444 ctrl_val |= AM65_CPTS_CONTROL_TS_PPM_DIR; 445 else 446 ctrl_val &= ~AM65_CPTS_CONTROL_TS_PPM_DIR; 447 448 ppm_hi = upper_32_bits(adj_period) & 0x3FF; 449 ppm_low = lower_32_bits(adj_period); 450 451 if (cpts->pps_enabled) { 452 estf_ctrl_val = am65_cpts_read32(cpts, genf[pps_index].control); 453 if (neg_adj) 454 estf_ctrl_val &= ~BIT(1); 455 else 456 estf_ctrl_val |= BIT(1); 457 458 /* GenF PPM will do correction using cpts refclk tick which is 459 * (cpts->ts_add_val + 1) ns, so GenF length PPM adj period 460 * need to be corrected. 461 */ 462 pps_adj_period = adj_period * (cpts->ts_add_val + 1); 463 estf_ppm_hi = upper_32_bits(pps_adj_period) & 0x3FF; 464 estf_ppm_low = lower_32_bits(pps_adj_period); 465 } 466 467 spin_lock_irqsave(&cpts->lock, flags); 468 469 /* All below writes must be done extremely fast: 470 * - delay between PPM dir and PPM value changes can cause err due old 471 * PPM correction applied in wrong direction 472 * - delay between CPTS-clock PPM cfg and GenF PPM cfg can cause err 473 * due CPTS-clock PPM working with new cfg while GenF PPM cfg still 474 * with old for short period of time 475 */ 476 477 am65_cpts_write32(cpts, ctrl_val, control); 478 am65_cpts_write32(cpts, ppm_hi, ts_ppm_hi); 479 am65_cpts_write32(cpts, ppm_low, ts_ppm_low); 480 481 if (cpts->pps_enabled) { 482 am65_cpts_write32(cpts, estf_ctrl_val, genf[pps_index].control); 483 am65_cpts_write32(cpts, estf_ppm_hi, genf[pps_index].ppm_hi); 484 am65_cpts_write32(cpts, estf_ppm_low, genf[pps_index].ppm_low); 485 } 486 487 for (i = 0; i < AM65_CPTS_ESTF_MAX_NUM; i++) { 488 if (cpts->estf_enable & BIT(i)) { 489 am65_cpts_write32(cpts, estf_ctrl_val, estf[i].control); 490 am65_cpts_write32(cpts, estf_ppm_hi, estf[i].ppm_hi); 491 am65_cpts_write32(cpts, estf_ppm_low, estf[i].ppm_low); 492 } 493 } 494 /* All GenF/EstF can be updated here the same way */ 495 spin_unlock_irqrestore(&cpts->lock, flags); 496 497 mutex_unlock(&cpts->ptp_clk_lock); 498 499 return 0; 500 } 501 502 static int am65_cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 503 { 504 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info); 505 s64 ns; 506 507 mutex_lock(&cpts->ptp_clk_lock); 508 ns = am65_cpts_gettime(cpts, NULL); 509 ns += delta; 510 am65_cpts_settime(cpts, ns); 511 mutex_unlock(&cpts->ptp_clk_lock); 512 513 return 0; 514 } 515 516 static int am65_cpts_ptp_gettimex(struct ptp_clock_info *ptp, 517 struct timespec64 *ts, 518 struct ptp_system_timestamp *sts) 519 { 520 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info); 521 u64 ns; 522 523 mutex_lock(&cpts->ptp_clk_lock); 524 ns = am65_cpts_gettime(cpts, sts); 525 mutex_unlock(&cpts->ptp_clk_lock); 526 *ts = ns_to_timespec64(ns); 527 528 return 0; 529 } 530 531 u64 am65_cpts_ns_gettime(struct am65_cpts *cpts) 532 { 533 u64 ns; 534 535 /* reuse ptp_clk_lock as it serialize ts push */ 536 mutex_lock(&cpts->ptp_clk_lock); 537 ns = am65_cpts_gettime(cpts, NULL); 538 mutex_unlock(&cpts->ptp_clk_lock); 539 540 return ns; 541 } 542 EXPORT_SYMBOL_GPL(am65_cpts_ns_gettime); 543 544 static int am65_cpts_ptp_settime(struct ptp_clock_info *ptp, 545 const struct timespec64 *ts) 546 { 547 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info); 548 u64 ns; 549 550 ns = timespec64_to_ns(ts); 551 mutex_lock(&cpts->ptp_clk_lock); 552 am65_cpts_settime(cpts, ns); 553 mutex_unlock(&cpts->ptp_clk_lock); 554 555 return 0; 556 } 557 558 static void am65_cpts_extts_enable_hw(struct am65_cpts *cpts, u32 index, int on) 559 { 560 u32 v; 561 562 v = am65_cpts_read32(cpts, control); 563 if (on) { 564 v |= BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET + index); 565 cpts->hw_ts_enable |= BIT(index); 566 } else { 567 v &= ~BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET + index); 568 cpts->hw_ts_enable &= ~BIT(index); 569 } 570 am65_cpts_write32(cpts, v, control); 571 } 572 573 static int am65_cpts_extts_enable(struct am65_cpts *cpts, u32 index, int on) 574 { 575 if (index >= cpts->ptp_info.n_ext_ts) 576 return -ENXIO; 577 578 if (cpts->pps_present && index == cpts->pps_hw_ts_idx) 579 return -EINVAL; 580 581 if (((cpts->hw_ts_enable & BIT(index)) >> index) == on) 582 return 0; 583 584 mutex_lock(&cpts->ptp_clk_lock); 585 am65_cpts_extts_enable_hw(cpts, index, on); 586 mutex_unlock(&cpts->ptp_clk_lock); 587 588 dev_dbg(cpts->dev, "%s: ExtTS:%u %s\n", 589 __func__, index, on ? "enabled" : "disabled"); 590 591 return 0; 592 } 593 594 int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx, 595 struct am65_cpts_estf_cfg *cfg) 596 { 597 u64 cycles; 598 u32 val; 599 600 cycles = cfg->ns_period * cpts->refclk_freq; 601 cycles = DIV_ROUND_UP(cycles, NSEC_PER_SEC); 602 if (cycles > U32_MAX) 603 return -EINVAL; 604 605 /* according to TRM should be zeroed */ 606 am65_cpts_write32(cpts, 0, estf[idx].length); 607 608 val = upper_32_bits(cfg->ns_start); 609 am65_cpts_write32(cpts, val, estf[idx].comp_hi); 610 val = lower_32_bits(cfg->ns_start); 611 am65_cpts_write32(cpts, val, estf[idx].comp_lo); 612 val = lower_32_bits(cycles); 613 am65_cpts_write32(cpts, val, estf[idx].length); 614 am65_cpts_write32(cpts, 0, estf[idx].control); 615 am65_cpts_write32(cpts, 0, estf[idx].ppm_hi); 616 am65_cpts_write32(cpts, 0, estf[idx].ppm_low); 617 618 cpts->estf_enable |= BIT(idx); 619 620 dev_dbg(cpts->dev, "%s: ESTF:%u enabled\n", __func__, idx); 621 622 return 0; 623 } 624 EXPORT_SYMBOL_GPL(am65_cpts_estf_enable); 625 626 void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx) 627 { 628 am65_cpts_write32(cpts, 0, estf[idx].length); 629 cpts->estf_enable &= ~BIT(idx); 630 631 dev_dbg(cpts->dev, "%s: ESTF:%u disabled\n", __func__, idx); 632 } 633 EXPORT_SYMBOL_GPL(am65_cpts_estf_disable); 634 635 static void am65_cpts_perout_enable_hw(struct am65_cpts *cpts, 636 struct ptp_perout_request *req, int on) 637 { 638 u64 ns_period, ns_start, cycles; 639 struct timespec64 ts; 640 u32 val; 641 642 if (on) { 643 ts.tv_sec = req->period.sec; 644 ts.tv_nsec = req->period.nsec; 645 ns_period = timespec64_to_ns(&ts); 646 647 cycles = (ns_period * cpts->refclk_freq) / NSEC_PER_SEC; 648 649 ts.tv_sec = req->start.sec; 650 ts.tv_nsec = req->start.nsec; 651 ns_start = timespec64_to_ns(&ts); 652 653 val = upper_32_bits(ns_start); 654 am65_cpts_write32(cpts, val, genf[req->index].comp_hi); 655 val = lower_32_bits(ns_start); 656 am65_cpts_write32(cpts, val, genf[req->index].comp_lo); 657 val = lower_32_bits(cycles); 658 am65_cpts_write32(cpts, val, genf[req->index].length); 659 660 am65_cpts_write32(cpts, 0, genf[req->index].control); 661 am65_cpts_write32(cpts, 0, genf[req->index].ppm_hi); 662 am65_cpts_write32(cpts, 0, genf[req->index].ppm_low); 663 664 cpts->genf_enable |= BIT(req->index); 665 } else { 666 am65_cpts_write32(cpts, 0, genf[req->index].length); 667 668 cpts->genf_enable &= ~BIT(req->index); 669 } 670 } 671 672 static int am65_cpts_perout_enable(struct am65_cpts *cpts, 673 struct ptp_perout_request *req, int on) 674 { 675 if (req->index >= cpts->ptp_info.n_per_out) 676 return -ENXIO; 677 678 if (cpts->pps_present && req->index == cpts->pps_genf_idx) 679 return -EINVAL; 680 681 if (!!(cpts->genf_enable & BIT(req->index)) == !!on) 682 return 0; 683 684 mutex_lock(&cpts->ptp_clk_lock); 685 am65_cpts_perout_enable_hw(cpts, req, on); 686 mutex_unlock(&cpts->ptp_clk_lock); 687 688 dev_dbg(cpts->dev, "%s: GenF:%u %s\n", 689 __func__, req->index, on ? "enabled" : "disabled"); 690 691 return 0; 692 } 693 694 static int am65_cpts_pps_enable(struct am65_cpts *cpts, int on) 695 { 696 int ret = 0; 697 struct timespec64 ts; 698 struct ptp_clock_request rq; 699 u64 ns; 700 701 if (!cpts->pps_present) 702 return -EINVAL; 703 704 if (cpts->pps_enabled == !!on) 705 return 0; 706 707 mutex_lock(&cpts->ptp_clk_lock); 708 709 if (on) { 710 am65_cpts_extts_enable_hw(cpts, cpts->pps_hw_ts_idx, on); 711 712 ns = am65_cpts_gettime(cpts, NULL); 713 ts = ns_to_timespec64(ns); 714 rq.perout.period.sec = 1; 715 rq.perout.period.nsec = 0; 716 rq.perout.start.sec = ts.tv_sec + 2; 717 rq.perout.start.nsec = 0; 718 rq.perout.index = cpts->pps_genf_idx; 719 720 am65_cpts_perout_enable_hw(cpts, &rq.perout, on); 721 cpts->pps_enabled = true; 722 } else { 723 rq.perout.index = cpts->pps_genf_idx; 724 am65_cpts_perout_enable_hw(cpts, &rq.perout, on); 725 am65_cpts_extts_enable_hw(cpts, cpts->pps_hw_ts_idx, on); 726 cpts->pps_enabled = false; 727 } 728 729 mutex_unlock(&cpts->ptp_clk_lock); 730 731 dev_dbg(cpts->dev, "%s: pps: %s\n", 732 __func__, on ? "enabled" : "disabled"); 733 return ret; 734 } 735 736 static int am65_cpts_ptp_enable(struct ptp_clock_info *ptp, 737 struct ptp_clock_request *rq, int on) 738 { 739 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info); 740 741 switch (rq->type) { 742 case PTP_CLK_REQ_EXTTS: 743 return am65_cpts_extts_enable(cpts, rq->extts.index, on); 744 case PTP_CLK_REQ_PEROUT: 745 return am65_cpts_perout_enable(cpts, &rq->perout, on); 746 case PTP_CLK_REQ_PPS: 747 return am65_cpts_pps_enable(cpts, on); 748 default: 749 break; 750 } 751 752 return -EOPNOTSUPP; 753 } 754 755 static long am65_cpts_ts_work(struct ptp_clock_info *ptp); 756 757 static struct ptp_clock_info am65_ptp_info = { 758 .owner = THIS_MODULE, 759 .name = "CTPS timer", 760 .adjfine = am65_cpts_ptp_adjfine, 761 .adjtime = am65_cpts_ptp_adjtime, 762 .gettimex64 = am65_cpts_ptp_gettimex, 763 .settime64 = am65_cpts_ptp_settime, 764 .enable = am65_cpts_ptp_enable, 765 .do_aux_work = am65_cpts_ts_work, 766 }; 767 768 static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts, 769 struct am65_cpts_event *event) 770 { 771 struct sk_buff_head txq_list; 772 struct sk_buff *skb, *tmp; 773 unsigned long flags; 774 bool found = false; 775 u32 mtype_seqid; 776 777 mtype_seqid = event->event1 & 778 (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK | 779 AM65_CPTS_EVENT_1_EVENT_TYPE_MASK | 780 AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK); 781 782 __skb_queue_head_init(&txq_list); 783 784 spin_lock_irqsave(&cpts->txq.lock, flags); 785 skb_queue_splice_init(&cpts->txq, &txq_list); 786 spin_unlock_irqrestore(&cpts->txq.lock, flags); 787 788 /* no need to grab txq.lock as access is always done under cpts->lock */ 789 skb_queue_walk_safe(&txq_list, skb, tmp) { 790 struct skb_shared_hwtstamps ssh; 791 struct am65_cpts_skb_cb_data *skb_cb = 792 (struct am65_cpts_skb_cb_data *)skb->cb; 793 794 if ((ptp_classify_raw(skb) & PTP_CLASS_V1) && 795 ((mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK) == 796 (skb_cb->skb_mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK))) 797 mtype_seqid = skb_cb->skb_mtype_seqid; 798 799 if (mtype_seqid == skb_cb->skb_mtype_seqid) { 800 u64 ns = event->timestamp; 801 802 memset(&ssh, 0, sizeof(ssh)); 803 ssh.hwtstamp = ns_to_ktime(ns); 804 skb_tstamp_tx(skb, &ssh); 805 found = true; 806 __skb_unlink(skb, &txq_list); 807 dev_consume_skb_any(skb); 808 dev_dbg(cpts->dev, 809 "match tx timestamp mtype_seqid %08x\n", 810 mtype_seqid); 811 break; 812 } 813 814 if (time_after(jiffies, skb_cb->tmo)) { 815 /* timeout any expired skbs over 100 ms */ 816 dev_dbg(cpts->dev, 817 "expiring tx timestamp mtype_seqid %08x\n", 818 mtype_seqid); 819 __skb_unlink(skb, &txq_list); 820 dev_consume_skb_any(skb); 821 } 822 } 823 824 spin_lock_irqsave(&cpts->txq.lock, flags); 825 skb_queue_splice(&txq_list, &cpts->txq); 826 spin_unlock_irqrestore(&cpts->txq.lock, flags); 827 828 return found; 829 } 830 831 static void am65_cpts_find_ts(struct am65_cpts *cpts) 832 { 833 struct am65_cpts_event *event; 834 struct list_head *this, *next; 835 LIST_HEAD(events_free); 836 unsigned long flags; 837 LIST_HEAD(events); 838 839 spin_lock_irqsave(&cpts->lock, flags); 840 list_splice_init(&cpts->events, &events); 841 spin_unlock_irqrestore(&cpts->lock, flags); 842 843 list_for_each_safe(this, next, &events) { 844 event = list_entry(this, struct am65_cpts_event, list); 845 if (am65_cpts_match_tx_ts(cpts, event) || 846 time_after(jiffies, event->tmo)) { 847 list_del_init(&event->list); 848 list_add(&event->list, &events_free); 849 } 850 } 851 852 spin_lock_irqsave(&cpts->lock, flags); 853 list_splice_tail(&events, &cpts->events); 854 list_splice_tail(&events_free, &cpts->pool); 855 spin_unlock_irqrestore(&cpts->lock, flags); 856 } 857 858 static long am65_cpts_ts_work(struct ptp_clock_info *ptp) 859 { 860 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info); 861 unsigned long flags; 862 long delay = -1; 863 864 am65_cpts_find_ts(cpts); 865 866 spin_lock_irqsave(&cpts->txq.lock, flags); 867 if (!skb_queue_empty(&cpts->txq)) 868 delay = AM65_CPTS_SKB_TX_WORK_TIMEOUT; 869 spin_unlock_irqrestore(&cpts->txq.lock, flags); 870 871 return delay; 872 } 873 874 static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid) 875 { 876 unsigned int ptp_class = ptp_classify_raw(skb); 877 struct ptp_header *hdr; 878 u8 msgtype; 879 u16 seqid; 880 881 if (ptp_class == PTP_CLASS_NONE) 882 return 0; 883 884 hdr = ptp_parse_header(skb, ptp_class); 885 if (!hdr) 886 return 0; 887 888 msgtype = ptp_get_msgtype(hdr, ptp_class); 889 seqid = ntohs(hdr->sequence_id); 890 891 *mtype_seqid = (msgtype << AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT) & 892 AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK; 893 *mtype_seqid |= (seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK); 894 895 return 1; 896 } 897 898 static u64 am65_cpts_find_rx_ts(struct am65_cpts *cpts, u32 skb_mtype_seqid) 899 { 900 struct list_head *this, *next; 901 struct am65_cpts_event *event; 902 unsigned long flags; 903 u32 mtype_seqid; 904 u64 ns = 0; 905 906 spin_lock_irqsave(&cpts->lock, flags); 907 __am65_cpts_fifo_read(cpts); 908 list_for_each_safe(this, next, &cpts->events) { 909 event = list_entry(this, struct am65_cpts_event, list); 910 if (time_after(jiffies, event->tmo)) { 911 list_move(&event->list, &cpts->pool); 912 continue; 913 } 914 915 mtype_seqid = event->event1 & 916 (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK | 917 AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK | 918 AM65_CPTS_EVENT_1_EVENT_TYPE_MASK); 919 920 if (mtype_seqid == skb_mtype_seqid) { 921 ns = event->timestamp; 922 list_move(&event->list, &cpts->pool); 923 break; 924 } 925 } 926 spin_unlock_irqrestore(&cpts->lock, flags); 927 928 return ns; 929 } 930 931 void am65_cpts_rx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb) 932 { 933 struct am65_cpts_skb_cb_data *skb_cb = (struct am65_cpts_skb_cb_data *)skb->cb; 934 struct skb_shared_hwtstamps *ssh; 935 int ret; 936 u64 ns; 937 938 /* am65_cpts_rx_timestamp() is called before eth_type_trans(), so 939 * skb MAC Hdr properties are not configured yet. Hence need to 940 * reset skb MAC header here 941 */ 942 skb_reset_mac_header(skb); 943 ret = am65_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid); 944 if (!ret) 945 return; /* if not PTP class packet */ 946 947 skb_cb->skb_mtype_seqid |= (AM65_CPTS_EV_RX << AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT); 948 949 dev_dbg(cpts->dev, "%s mtype seqid %08x\n", __func__, skb_cb->skb_mtype_seqid); 950 951 ns = am65_cpts_find_rx_ts(cpts, skb_cb->skb_mtype_seqid); 952 if (!ns) 953 return; 954 955 ssh = skb_hwtstamps(skb); 956 memset(ssh, 0, sizeof(*ssh)); 957 ssh->hwtstamp = ns_to_ktime(ns); 958 } 959 EXPORT_SYMBOL_GPL(am65_cpts_rx_timestamp); 960 961 /** 962 * am65_cpts_tx_timestamp - save tx packet for timestamping 963 * @cpts: cpts handle 964 * @skb: packet 965 * 966 * This functions saves tx packet for timestamping if packet can be timestamped. 967 * The future processing is done in from PTP auxiliary worker. 968 */ 969 void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb) 970 { 971 struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb; 972 973 if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) 974 return; 975 976 /* add frame to queue for processing later. 977 * The periodic FIFO check will handle this. 978 */ 979 skb_get(skb); 980 /* get the timestamp for timeouts */ 981 skb_cb->tmo = jiffies + msecs_to_jiffies(100); 982 skb_queue_tail(&cpts->txq, skb); 983 ptp_schedule_worker(cpts->ptp_clock, 0); 984 } 985 EXPORT_SYMBOL_GPL(am65_cpts_tx_timestamp); 986 987 /** 988 * am65_cpts_prep_tx_timestamp - check and prepare tx packet for timestamping 989 * @cpts: cpts handle 990 * @skb: packet 991 * 992 * This functions should be called from .xmit(). 993 * It checks if packet can be timestamped, fills internal cpts data 994 * in skb-cb and marks packet as SKBTX_IN_PROGRESS. 995 */ 996 void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb) 997 { 998 struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb; 999 int ret; 1000 1001 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 1002 return; 1003 1004 ret = am65_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid); 1005 if (!ret) 1006 return; 1007 skb_cb->skb_mtype_seqid |= (AM65_CPTS_EV_TX << 1008 AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT); 1009 1010 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1011 } 1012 EXPORT_SYMBOL_GPL(am65_cpts_prep_tx_timestamp); 1013 1014 int am65_cpts_phc_index(struct am65_cpts *cpts) 1015 { 1016 return cpts->phc_index; 1017 } 1018 EXPORT_SYMBOL_GPL(am65_cpts_phc_index); 1019 1020 static void cpts_free_clk_mux(void *data) 1021 { 1022 struct am65_cpts *cpts = data; 1023 1024 of_clk_del_provider(cpts->clk_mux_np); 1025 clk_hw_unregister_mux(cpts->clk_mux_hw); 1026 of_node_put(cpts->clk_mux_np); 1027 } 1028 1029 static int cpts_of_mux_clk_setup(struct am65_cpts *cpts, 1030 struct device_node *node) 1031 { 1032 unsigned int num_parents; 1033 const char **parent_names; 1034 char *clk_mux_name; 1035 void __iomem *reg; 1036 int ret = -EINVAL; 1037 1038 cpts->clk_mux_np = of_get_child_by_name(node, "refclk-mux"); 1039 if (!cpts->clk_mux_np) 1040 return 0; 1041 1042 num_parents = of_clk_get_parent_count(cpts->clk_mux_np); 1043 if (num_parents < 1) { 1044 dev_err(cpts->dev, "mux-clock %pOF must have parents\n", 1045 cpts->clk_mux_np); 1046 goto mux_fail; 1047 } 1048 1049 parent_names = devm_kcalloc(cpts->dev, sizeof(char *), num_parents, 1050 GFP_KERNEL); 1051 if (!parent_names) { 1052 ret = -ENOMEM; 1053 goto mux_fail; 1054 } 1055 1056 of_clk_parent_fill(cpts->clk_mux_np, parent_names, num_parents); 1057 1058 clk_mux_name = devm_kasprintf(cpts->dev, GFP_KERNEL, "%s.%pOFn", 1059 dev_name(cpts->dev), cpts->clk_mux_np); 1060 if (!clk_mux_name) { 1061 ret = -ENOMEM; 1062 goto mux_fail; 1063 } 1064 1065 reg = &cpts->reg->rftclk_sel; 1066 /* dev must be NULL to avoid recursive incrementing 1067 * of module refcnt 1068 */ 1069 cpts->clk_mux_hw = clk_hw_register_mux(NULL, clk_mux_name, 1070 parent_names, num_parents, 1071 0, reg, 0, 5, 0, NULL); 1072 if (IS_ERR(cpts->clk_mux_hw)) { 1073 ret = PTR_ERR(cpts->clk_mux_hw); 1074 goto mux_fail; 1075 } 1076 1077 ret = of_clk_add_hw_provider(cpts->clk_mux_np, of_clk_hw_simple_get, 1078 cpts->clk_mux_hw); 1079 if (ret) 1080 goto clk_hw_register; 1081 1082 ret = devm_add_action_or_reset(cpts->dev, cpts_free_clk_mux, cpts); 1083 if (ret) 1084 dev_err(cpts->dev, "failed to add clkmux reset action %d", ret); 1085 1086 return ret; 1087 1088 clk_hw_register: 1089 clk_hw_unregister_mux(cpts->clk_mux_hw); 1090 mux_fail: 1091 of_node_put(cpts->clk_mux_np); 1092 return ret; 1093 } 1094 1095 static int am65_cpts_of_parse(struct am65_cpts *cpts, struct device_node *node) 1096 { 1097 u32 prop[2]; 1098 1099 if (!of_property_read_u32(node, "ti,cpts-ext-ts-inputs", &prop[0])) 1100 cpts->ext_ts_inputs = prop[0]; 1101 1102 if (!of_property_read_u32(node, "ti,cpts-periodic-outputs", &prop[0])) 1103 cpts->genf_num = prop[0]; 1104 1105 if (!of_property_read_u32_array(node, "ti,pps", prop, 2)) { 1106 cpts->pps_present = true; 1107 1108 if (prop[0] > 7) { 1109 dev_err(cpts->dev, "invalid HWx_TS_PUSH index: %u provided\n", prop[0]); 1110 cpts->pps_present = false; 1111 } 1112 if (prop[1] > 1) { 1113 dev_err(cpts->dev, "invalid GENFy index: %u provided\n", prop[1]); 1114 cpts->pps_present = false; 1115 } 1116 if (cpts->pps_present) { 1117 cpts->pps_hw_ts_idx = prop[0]; 1118 cpts->pps_genf_idx = prop[1]; 1119 } 1120 } 1121 1122 return cpts_of_mux_clk_setup(cpts, node); 1123 } 1124 1125 void am65_cpts_release(struct am65_cpts *cpts) 1126 { 1127 ptp_clock_unregister(cpts->ptp_clock); 1128 am65_cpts_disable(cpts); 1129 clk_disable_unprepare(cpts->refclk); 1130 } 1131 EXPORT_SYMBOL_GPL(am65_cpts_release); 1132 1133 struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs, 1134 struct device_node *node) 1135 { 1136 struct am65_cpts *cpts; 1137 int ret, i; 1138 1139 cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL); 1140 if (!cpts) 1141 return ERR_PTR(-ENOMEM); 1142 1143 cpts->dev = dev; 1144 cpts->reg = (struct am65_cpts_regs __iomem *)regs; 1145 1146 cpts->irq = of_irq_get_byname(node, "cpts"); 1147 if (cpts->irq <= 0) { 1148 ret = cpts->irq ?: -ENXIO; 1149 dev_err_probe(dev, ret, "Failed to get IRQ number\n"); 1150 return ERR_PTR(ret); 1151 } 1152 1153 ret = am65_cpts_of_parse(cpts, node); 1154 if (ret) 1155 return ERR_PTR(ret); 1156 1157 mutex_init(&cpts->ptp_clk_lock); 1158 INIT_LIST_HEAD(&cpts->events); 1159 INIT_LIST_HEAD(&cpts->pool); 1160 spin_lock_init(&cpts->lock); 1161 skb_queue_head_init(&cpts->txq); 1162 1163 for (i = 0; i < AM65_CPTS_MAX_EVENTS; i++) 1164 list_add(&cpts->pool_data[i].list, &cpts->pool); 1165 1166 cpts->refclk = devm_get_clk_from_child(dev, node, "cpts"); 1167 if (IS_ERR(cpts->refclk)) { 1168 ret = PTR_ERR(cpts->refclk); 1169 dev_err_probe(dev, ret, "Failed to get refclk\n"); 1170 return ERR_PTR(ret); 1171 } 1172 1173 ret = clk_prepare_enable(cpts->refclk); 1174 if (ret) { 1175 dev_err(dev, "Failed to enable refclk %d\n", ret); 1176 return ERR_PTR(ret); 1177 } 1178 1179 cpts->refclk_freq = clk_get_rate(cpts->refclk); 1180 1181 am65_ptp_info.max_adj = cpts->refclk_freq / AM65_CPTS_MIN_PPM; 1182 cpts->ptp_info = am65_ptp_info; 1183 1184 if (cpts->ext_ts_inputs) 1185 cpts->ptp_info.n_ext_ts = cpts->ext_ts_inputs; 1186 if (cpts->genf_num) 1187 cpts->ptp_info.n_per_out = cpts->genf_num; 1188 if (cpts->pps_present) 1189 cpts->ptp_info.pps = 1; 1190 1191 am65_cpts_set_add_val(cpts); 1192 1193 am65_cpts_write32(cpts, AM65_CPTS_CONTROL_EN | 1194 AM65_CPTS_CONTROL_64MODE | 1195 AM65_CPTS_CONTROL_TX_GENF_CLR_EN, 1196 control); 1197 am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable); 1198 1199 /* set time to the current system time */ 1200 am65_cpts_settime(cpts, ktime_to_ns(ktime_get_real())); 1201 1202 cpts->ptp_clock = ptp_clock_register(&cpts->ptp_info, cpts->dev); 1203 if (IS_ERR_OR_NULL(cpts->ptp_clock)) { 1204 dev_err(dev, "Failed to register ptp clk %ld\n", 1205 PTR_ERR(cpts->ptp_clock)); 1206 ret = cpts->ptp_clock ? PTR_ERR(cpts->ptp_clock) : -ENODEV; 1207 goto refclk_disable; 1208 } 1209 cpts->phc_index = ptp_clock_index(cpts->ptp_clock); 1210 1211 ret = devm_request_threaded_irq(dev, cpts->irq, NULL, 1212 am65_cpts_interrupt, 1213 IRQF_ONESHOT, dev_name(dev), cpts); 1214 if (ret < 0) { 1215 dev_err(cpts->dev, "error attaching irq %d\n", ret); 1216 goto reset_ptpclk; 1217 } 1218 1219 dev_info(dev, "CPTS ver 0x%08x, freq:%u, add_val:%u pps:%d\n", 1220 am65_cpts_read32(cpts, idver), 1221 cpts->refclk_freq, cpts->ts_add_val, cpts->pps_present); 1222 1223 return cpts; 1224 1225 reset_ptpclk: 1226 am65_cpts_release(cpts); 1227 refclk_disable: 1228 clk_disable_unprepare(cpts->refclk); 1229 return ERR_PTR(ret); 1230 } 1231 EXPORT_SYMBOL_GPL(am65_cpts_create); 1232 1233 void am65_cpts_suspend(struct am65_cpts *cpts) 1234 { 1235 /* save state and disable CPTS */ 1236 cpts->sr_control = am65_cpts_read32(cpts, control); 1237 cpts->sr_int_enable = am65_cpts_read32(cpts, int_enable); 1238 cpts->sr_rftclk_sel = am65_cpts_read32(cpts, rftclk_sel); 1239 cpts->sr_ts_ppm_hi = am65_cpts_read32(cpts, ts_ppm_hi); 1240 cpts->sr_ts_ppm_low = am65_cpts_read32(cpts, ts_ppm_low); 1241 cpts->sr_cpts_ns = am65_cpts_gettime(cpts, NULL); 1242 cpts->sr_ktime_ns = ktime_to_ns(ktime_get_real()); 1243 am65_cpts_disable(cpts); 1244 clk_disable(cpts->refclk); 1245 1246 /* Save GENF state */ 1247 memcpy_fromio(&cpts->sr_genf, &cpts->reg->genf, sizeof(cpts->sr_genf)); 1248 1249 /* Save ESTF state */ 1250 memcpy_fromio(&cpts->sr_estf, &cpts->reg->estf, sizeof(cpts->sr_estf)); 1251 } 1252 EXPORT_SYMBOL_GPL(am65_cpts_suspend); 1253 1254 void am65_cpts_resume(struct am65_cpts *cpts) 1255 { 1256 int i; 1257 s64 ktime_ns; 1258 1259 /* restore state and enable CPTS */ 1260 clk_enable(cpts->refclk); 1261 am65_cpts_write32(cpts, cpts->sr_rftclk_sel, rftclk_sel); 1262 am65_cpts_set_add_val(cpts); 1263 am65_cpts_write32(cpts, cpts->sr_control, control); 1264 am65_cpts_write32(cpts, cpts->sr_int_enable, int_enable); 1265 1266 /* Restore time to saved CPTS time + time in suspend/resume */ 1267 ktime_ns = ktime_to_ns(ktime_get_real()); 1268 ktime_ns -= cpts->sr_ktime_ns; 1269 am65_cpts_settime(cpts, cpts->sr_cpts_ns + ktime_ns); 1270 1271 /* Restore compensation (PPM) */ 1272 am65_cpts_write32(cpts, cpts->sr_ts_ppm_hi, ts_ppm_hi); 1273 am65_cpts_write32(cpts, cpts->sr_ts_ppm_low, ts_ppm_low); 1274 1275 /* Restore GENF state */ 1276 for (i = 0; i < AM65_CPTS_GENF_MAX_NUM; i++) { 1277 am65_cpts_write32(cpts, 0, genf[i].length); /* TRM sequence */ 1278 am65_cpts_write32(cpts, cpts->sr_genf[i].comp_hi, genf[i].comp_hi); 1279 am65_cpts_write32(cpts, cpts->sr_genf[i].comp_lo, genf[i].comp_lo); 1280 am65_cpts_write32(cpts, cpts->sr_genf[i].length, genf[i].length); 1281 am65_cpts_write32(cpts, cpts->sr_genf[i].control, genf[i].control); 1282 am65_cpts_write32(cpts, cpts->sr_genf[i].ppm_hi, genf[i].ppm_hi); 1283 am65_cpts_write32(cpts, cpts->sr_genf[i].ppm_low, genf[i].ppm_low); 1284 } 1285 1286 /* Restore ESTTF state */ 1287 for (i = 0; i < AM65_CPTS_ESTF_MAX_NUM; i++) { 1288 am65_cpts_write32(cpts, 0, estf[i].length); /* TRM sequence */ 1289 am65_cpts_write32(cpts, cpts->sr_estf[i].comp_hi, estf[i].comp_hi); 1290 am65_cpts_write32(cpts, cpts->sr_estf[i].comp_lo, estf[i].comp_lo); 1291 am65_cpts_write32(cpts, cpts->sr_estf[i].length, estf[i].length); 1292 am65_cpts_write32(cpts, cpts->sr_estf[i].control, estf[i].control); 1293 am65_cpts_write32(cpts, cpts->sr_estf[i].ppm_hi, estf[i].ppm_hi); 1294 am65_cpts_write32(cpts, cpts->sr_estf[i].ppm_low, estf[i].ppm_low); 1295 } 1296 } 1297 EXPORT_SYMBOL_GPL(am65_cpts_resume); 1298 1299 static int am65_cpts_probe(struct platform_device *pdev) 1300 { 1301 struct device_node *node = pdev->dev.of_node; 1302 struct device *dev = &pdev->dev; 1303 struct am65_cpts *cpts; 1304 void __iomem *base; 1305 1306 base = devm_platform_ioremap_resource_byname(pdev, "cpts"); 1307 if (IS_ERR(base)) 1308 return PTR_ERR(base); 1309 1310 cpts = am65_cpts_create(dev, base, node); 1311 return PTR_ERR_OR_ZERO(cpts); 1312 } 1313 1314 static const struct of_device_id am65_cpts_of_match[] = { 1315 { .compatible = "ti,am65-cpts", }, 1316 { .compatible = "ti,j721e-cpts", }, 1317 {}, 1318 }; 1319 MODULE_DEVICE_TABLE(of, am65_cpts_of_match); 1320 1321 static struct platform_driver am65_cpts_driver = { 1322 .probe = am65_cpts_probe, 1323 .driver = { 1324 .name = "am65-cpts", 1325 .of_match_table = am65_cpts_of_match, 1326 }, 1327 }; 1328 module_platform_driver(am65_cpts_driver); 1329 1330 MODULE_LICENSE("GPL v2"); 1331 MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>"); 1332 MODULE_DESCRIPTION("TI K3 AM65 CPTS driver"); 1333