1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Texas Instruments ICSSG Industrial Ethernet Peripheral (IEP) Driver 4 * 5 * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com 6 * 7 */ 8 9 #include <linux/bitops.h> 10 #include <linux/clk.h> 11 #include <linux/err.h> 12 #include <linux/io.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/of_platform.h> 16 #include <linux/platform_device.h> 17 #include <linux/timekeeping.h> 18 #include <linux/interrupt.h> 19 #include <linux/of_irq.h> 20 #include <linux/workqueue.h> 21 22 #include "icss_iep.h" 23 24 #define IEP_MAX_DEF_INC 0xf 25 #define IEP_MAX_COMPEN_INC 0xfff 26 #define IEP_MAX_COMPEN_COUNT 0xffffff 27 28 #define IEP_GLOBAL_CFG_CNT_ENABLE BIT(0) 29 #define IEP_GLOBAL_CFG_DEFAULT_INC_MASK GENMASK(7, 4) 30 #define IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT 4 31 #define IEP_GLOBAL_CFG_COMPEN_INC_MASK GENMASK(19, 8) 32 #define IEP_GLOBAL_CFG_COMPEN_INC_SHIFT 8 33 34 #define IEP_GLOBAL_STATUS_CNT_OVF BIT(0) 35 36 #define IEP_CMP_CFG_SHADOW_EN BIT(17) 37 #define IEP_CMP_CFG_CMP0_RST_CNT_EN BIT(0) 38 #define IEP_CMP_CFG_CMP_EN(cmp) (GENMASK(16, 1) & (1 << ((cmp) + 1))) 39 40 #define IEP_CMP_STATUS(cmp) (1 << (cmp)) 41 42 #define IEP_SYNC_CTRL_SYNC_EN BIT(0) 43 #define IEP_SYNC_CTRL_SYNC_N_EN(n) (GENMASK(2, 1) & (BIT(1) << (n))) 44 45 #define IEP_MIN_CMP 0 46 #define IEP_MAX_CMP 15 47 48 #define ICSS_IEP_64BIT_COUNTER_SUPPORT BIT(0) 49 #define ICSS_IEP_SLOW_COMPEN_REG_SUPPORT BIT(1) 50 #define ICSS_IEP_SHADOW_MODE_SUPPORT BIT(2) 51 52 #define LATCH_INDEX(ts_index) ((ts_index) + 6) 53 #define IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(n) BIT(LATCH_INDEX(n)) 54 #define IEP_CAP_CFG_CAP_ASYNC_EN(n) BIT(LATCH_INDEX(n) + 10) 55 56 enum { 57 ICSS_IEP_GLOBAL_CFG_REG, 58 ICSS_IEP_GLOBAL_STATUS_REG, 59 ICSS_IEP_COMPEN_REG, 60 ICSS_IEP_SLOW_COMPEN_REG, 61 ICSS_IEP_COUNT_REG0, 62 ICSS_IEP_COUNT_REG1, 63 ICSS_IEP_CAPTURE_CFG_REG, 64 ICSS_IEP_CAPTURE_STAT_REG, 65 66 ICSS_IEP_CAP6_RISE_REG0, 67 ICSS_IEP_CAP6_RISE_REG1, 68 69 ICSS_IEP_CAP7_RISE_REG0, 70 ICSS_IEP_CAP7_RISE_REG1, 71 72 ICSS_IEP_CMP_CFG_REG, 73 ICSS_IEP_CMP_STAT_REG, 74 ICSS_IEP_CMP0_REG0, 75 ICSS_IEP_CMP0_REG1, 76 ICSS_IEP_CMP1_REG0, 77 ICSS_IEP_CMP1_REG1, 78 79 ICSS_IEP_CMP8_REG0, 80 ICSS_IEP_CMP8_REG1, 81 ICSS_IEP_SYNC_CTRL_REG, 82 ICSS_IEP_SYNC0_STAT_REG, 83 ICSS_IEP_SYNC1_STAT_REG, 84 ICSS_IEP_SYNC_PWIDTH_REG, 85 ICSS_IEP_SYNC0_PERIOD_REG, 86 ICSS_IEP_SYNC1_DELAY_REG, 87 ICSS_IEP_SYNC_START_REG, 88 ICSS_IEP_MAX_REGS, 89 }; 90 91 /** 92 * struct icss_iep_plat_data - Plat data to handle SoC variants 93 * @config: Regmap configuration data 94 * @reg_offs: register offsets to capture offset differences across SoCs 95 * @flags: Flags to represent IEP properties 96 */ 97 struct icss_iep_plat_data { 98 const struct regmap_config *config; 99 u32 reg_offs[ICSS_IEP_MAX_REGS]; 100 u32 flags; 101 }; 102 103 struct icss_iep { 104 struct device *dev; 105 void __iomem *base; 106 const struct icss_iep_plat_data *plat_data; 107 struct regmap *map; 108 struct device_node *client_np; 109 unsigned long refclk_freq; 110 int clk_tick_time; /* one refclk tick time in ns */ 111 struct ptp_clock_info ptp_info; 112 struct ptp_clock *ptp_clock; 113 struct mutex ptp_clk_mutex; /* PHC access serializer */ 114 u32 def_inc; 115 s16 slow_cmp_inc; 116 u32 slow_cmp_count; 117 const struct icss_iep_clockops *ops; 118 void *clockops_data; 119 u32 cycle_time_ns; 120 u32 perout_enabled; 121 bool pps_enabled; 122 int cap_cmp_irq; 123 u64 period; 124 u32 latch_enable; 125 struct work_struct work; 126 }; 127 128 /** 129 * icss_iep_get_count_hi() - Get the upper 32 bit IEP counter 130 * @iep: Pointer to structure representing IEP. 131 * 132 * Return: upper 32 bit IEP counter 133 */ 134 int icss_iep_get_count_hi(struct icss_iep *iep) 135 { 136 u32 val = 0; 137 138 if (iep && (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)) 139 val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]); 140 141 return val; 142 } 143 EXPORT_SYMBOL_GPL(icss_iep_get_count_hi); 144 145 /** 146 * icss_iep_get_count_low() - Get the lower 32 bit IEP counter 147 * @iep: Pointer to structure representing IEP. 148 * 149 * Return: lower 32 bit IEP counter 150 */ 151 int icss_iep_get_count_low(struct icss_iep *iep) 152 { 153 u32 val = 0; 154 155 if (iep) 156 val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]); 157 158 return val; 159 } 160 EXPORT_SYMBOL_GPL(icss_iep_get_count_low); 161 162 /** 163 * icss_iep_get_ptp_clock_idx() - Get PTP clock index using IEP driver 164 * @iep: Pointer to structure representing IEP. 165 * 166 * Return: PTP clock index, -1 if not registered 167 */ 168 int icss_iep_get_ptp_clock_idx(struct icss_iep *iep) 169 { 170 if (!iep || !iep->ptp_clock) 171 return -1; 172 return ptp_clock_index(iep->ptp_clock); 173 } 174 EXPORT_SYMBOL_GPL(icss_iep_get_ptp_clock_idx); 175 176 static void icss_iep_set_counter(struct icss_iep *iep, u64 ns) 177 { 178 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) 179 writel(upper_32_bits(ns), iep->base + 180 iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]); 181 writel(lower_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]); 182 } 183 184 static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns); 185 186 /** 187 * icss_iep_settime() - Set time of the PTP clock using IEP driver 188 * @iep: Pointer to structure representing IEP. 189 * @ns: Time to be set in nanoseconds 190 * 191 * This API uses writel() instead of regmap_write() for write operations as 192 * regmap_write() is too slow and this API is time sensitive. 193 */ 194 static void icss_iep_settime(struct icss_iep *iep, u64 ns) 195 { 196 if (iep->ops && iep->ops->settime) { 197 iep->ops->settime(iep->clockops_data, ns); 198 return; 199 } 200 201 if (iep->pps_enabled || iep->perout_enabled) 202 writel(0, iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]); 203 204 icss_iep_set_counter(iep, ns); 205 206 if (iep->pps_enabled || iep->perout_enabled) { 207 icss_iep_update_to_next_boundary(iep, ns); 208 writel(IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN, 209 iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]); 210 } 211 } 212 213 /** 214 * icss_iep_gettime() - Get time of the PTP clock using IEP driver 215 * @iep: Pointer to structure representing IEP. 216 * @sts: Pointer to structure representing PTP system timestamp. 217 * 218 * This API uses readl() instead of regmap_read() for read operations as 219 * regmap_read() is too slow and this API is time sensitive. 220 * 221 * Return: The current timestamp of the PTP clock using IEP driver 222 */ 223 static u64 icss_iep_gettime(struct icss_iep *iep, 224 struct ptp_system_timestamp *sts) 225 { 226 u32 ts_hi = 0, ts_lo; 227 unsigned long flags; 228 229 if (iep->ops && iep->ops->gettime) 230 return iep->ops->gettime(iep->clockops_data, sts); 231 232 /* use local_irq_x() to make it work for both RT/non-RT */ 233 local_irq_save(flags); 234 235 /* no need to play with hi-lo, hi is latched when lo is read */ 236 ptp_read_system_prets(sts); 237 ts_lo = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]); 238 ptp_read_system_postts(sts); 239 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) 240 ts_hi = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]); 241 242 local_irq_restore(flags); 243 244 return (u64)ts_lo | (u64)ts_hi << 32; 245 } 246 247 static void icss_iep_enable(struct icss_iep *iep) 248 { 249 regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG, 250 IEP_GLOBAL_CFG_CNT_ENABLE, 251 IEP_GLOBAL_CFG_CNT_ENABLE); 252 } 253 254 static void icss_iep_disable(struct icss_iep *iep) 255 { 256 regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG, 257 IEP_GLOBAL_CFG_CNT_ENABLE, 258 0); 259 } 260 261 static void icss_iep_enable_shadow_mode(struct icss_iep *iep) 262 { 263 u32 cycle_time; 264 int cmp; 265 266 cycle_time = iep->cycle_time_ns - iep->def_inc; 267 268 icss_iep_disable(iep); 269 270 /* disable shadow mode */ 271 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, 272 IEP_CMP_CFG_SHADOW_EN, 0); 273 274 /* enable shadow mode */ 275 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, 276 IEP_CMP_CFG_SHADOW_EN, IEP_CMP_CFG_SHADOW_EN); 277 278 /* clear counters */ 279 icss_iep_set_counter(iep, 0); 280 281 /* clear overflow status */ 282 regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_STATUS_REG, 283 IEP_GLOBAL_STATUS_CNT_OVF, 284 IEP_GLOBAL_STATUS_CNT_OVF); 285 286 /* clear compare status */ 287 for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) { 288 regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG, 289 IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp)); 290 } 291 292 /* enable reset counter on CMP0 event */ 293 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, 294 IEP_CMP_CFG_CMP0_RST_CNT_EN, 295 IEP_CMP_CFG_CMP0_RST_CNT_EN); 296 /* enable compare */ 297 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, 298 IEP_CMP_CFG_CMP_EN(0), 299 IEP_CMP_CFG_CMP_EN(0)); 300 301 /* set CMP0 value to cycle time */ 302 regmap_write(iep->map, ICSS_IEP_CMP0_REG0, cycle_time); 303 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) 304 regmap_write(iep->map, ICSS_IEP_CMP0_REG1, cycle_time); 305 306 icss_iep_set_counter(iep, 0); 307 icss_iep_enable(iep); 308 } 309 310 static void icss_iep_set_default_inc(struct icss_iep *iep, u8 def_inc) 311 { 312 regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG, 313 IEP_GLOBAL_CFG_DEFAULT_INC_MASK, 314 def_inc << IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT); 315 } 316 317 static void icss_iep_set_compensation_inc(struct icss_iep *iep, u16 compen_inc) 318 { 319 struct device *dev = regmap_get_device(iep->map); 320 321 if (compen_inc > IEP_MAX_COMPEN_INC) { 322 dev_err(dev, "%s: too high compensation inc %d\n", 323 __func__, compen_inc); 324 compen_inc = IEP_MAX_COMPEN_INC; 325 } 326 327 regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG, 328 IEP_GLOBAL_CFG_COMPEN_INC_MASK, 329 compen_inc << IEP_GLOBAL_CFG_COMPEN_INC_SHIFT); 330 } 331 332 static void icss_iep_set_compensation_count(struct icss_iep *iep, 333 u32 compen_count) 334 { 335 struct device *dev = regmap_get_device(iep->map); 336 337 if (compen_count > IEP_MAX_COMPEN_COUNT) { 338 dev_err(dev, "%s: too high compensation count %d\n", 339 __func__, compen_count); 340 compen_count = IEP_MAX_COMPEN_COUNT; 341 } 342 343 regmap_write(iep->map, ICSS_IEP_COMPEN_REG, compen_count); 344 } 345 346 static void icss_iep_set_slow_compensation_count(struct icss_iep *iep, 347 u32 compen_count) 348 { 349 regmap_write(iep->map, ICSS_IEP_SLOW_COMPEN_REG, compen_count); 350 } 351 352 /* PTP PHC operations */ 353 static int icss_iep_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 354 { 355 struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info); 356 s32 ppb = scaled_ppm_to_ppb(scaled_ppm); 357 u32 cyc_count; 358 u16 cmp_inc; 359 360 mutex_lock(&iep->ptp_clk_mutex); 361 362 /* ppb is amount of frequency we want to adjust in 1GHz (billion) 363 * e.g. 100ppb means we need to speed up clock by 100Hz 364 * i.e. at end of 1 second (1 billion ns) clock time, we should be 365 * counting 100 more ns. 366 * We use IEP slow compensation to achieve continuous freq. adjustment. 367 * There are 2 parts. Cycle time and adjustment per cycle. 368 * Simplest case would be 1 sec Cycle time. Then adjustment 369 * pre cycle would be (def_inc + ppb) value. 370 * Cycle time will have to be chosen based on how worse the ppb is. 371 * e.g. smaller the ppb, cycle time has to be large. 372 * The minimum adjustment we can do is +-1ns per cycle so let's 373 * reduce the cycle time to get 1ns per cycle adjustment. 374 * 1ppb = 1sec cycle time & 1ns adjust 375 * 1000ppb = 1/1000 cycle time & 1ns adjust per cycle 376 */ 377 378 if (iep->cycle_time_ns) 379 iep->slow_cmp_inc = iep->clk_tick_time; /* 4ns adj per cycle */ 380 else 381 iep->slow_cmp_inc = 1; /* 1ns adjust per cycle */ 382 383 if (ppb < 0) { 384 iep->slow_cmp_inc = -iep->slow_cmp_inc; 385 ppb = -ppb; 386 } 387 388 cyc_count = NSEC_PER_SEC; /* 1s cycle time @1GHz */ 389 cyc_count /= ppb; /* cycle time per ppb */ 390 391 /* slow_cmp_count is decremented every clock cycle, e.g. @250MHz */ 392 if (!iep->cycle_time_ns) 393 cyc_count /= iep->clk_tick_time; 394 iep->slow_cmp_count = cyc_count; 395 396 /* iep->clk_tick_time is def_inc */ 397 cmp_inc = iep->clk_tick_time + iep->slow_cmp_inc; 398 icss_iep_set_compensation_inc(iep, cmp_inc); 399 icss_iep_set_slow_compensation_count(iep, iep->slow_cmp_count); 400 401 mutex_unlock(&iep->ptp_clk_mutex); 402 403 return 0; 404 } 405 406 static int icss_iep_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 407 { 408 struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info); 409 s64 ns; 410 411 mutex_lock(&iep->ptp_clk_mutex); 412 if (iep->ops && iep->ops->adjtime) { 413 iep->ops->adjtime(iep->clockops_data, delta); 414 } else { 415 ns = icss_iep_gettime(iep, NULL); 416 ns += delta; 417 icss_iep_settime(iep, ns); 418 } 419 mutex_unlock(&iep->ptp_clk_mutex); 420 421 return 0; 422 } 423 424 static int icss_iep_ptp_gettimeex(struct ptp_clock_info *ptp, 425 struct timespec64 *ts, 426 struct ptp_system_timestamp *sts) 427 { 428 struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info); 429 u64 ns; 430 431 mutex_lock(&iep->ptp_clk_mutex); 432 ns = icss_iep_gettime(iep, sts); 433 *ts = ns_to_timespec64(ns); 434 mutex_unlock(&iep->ptp_clk_mutex); 435 436 return 0; 437 } 438 439 static int icss_iep_ptp_settime(struct ptp_clock_info *ptp, 440 const struct timespec64 *ts) 441 { 442 struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info); 443 u64 ns; 444 445 mutex_lock(&iep->ptp_clk_mutex); 446 ns = timespec64_to_ns(ts); 447 icss_iep_settime(iep, ns); 448 mutex_unlock(&iep->ptp_clk_mutex); 449 450 return 0; 451 } 452 453 static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns) 454 { 455 u64 ns, p_ns; 456 u32 offset; 457 458 ns = icss_iep_gettime(iep, NULL); 459 if (start_ns < ns) 460 start_ns = ns; 461 p_ns = iep->period; 462 /* Round up to next period boundary */ 463 start_ns += p_ns - 1; 464 offset = do_div(start_ns, p_ns); 465 start_ns = start_ns * p_ns; 466 /* If it is too close to update, shift to next boundary */ 467 if (p_ns - offset < 10) 468 start_ns += p_ns; 469 470 regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(start_ns)); 471 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) 472 regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(start_ns)); 473 } 474 475 static int icss_iep_perout_enable_hw(struct icss_iep *iep, 476 struct ptp_perout_request *req, int on) 477 { 478 int ret; 479 u64 cmp; 480 481 if (iep->ops && iep->ops->perout_enable) { 482 ret = iep->ops->perout_enable(iep->clockops_data, req, on, &cmp); 483 if (ret) 484 return ret; 485 486 if (on) { 487 /* Configure CMP */ 488 regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp)); 489 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) 490 regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp)); 491 /* Configure SYNC, 1ms pulse width */ 492 regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, 1000000); 493 regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0); 494 regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, 0); 495 regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */ 496 /* Enable CMP 1 */ 497 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, 498 IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1)); 499 } else { 500 /* Disable CMP 1 */ 501 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, 502 IEP_CMP_CFG_CMP_EN(1), 0); 503 504 /* clear regs */ 505 regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0); 506 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) 507 regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0); 508 } 509 } else { 510 if (on) { 511 u64 start_ns; 512 513 iep->period = ((u64)req->period.sec * NSEC_PER_SEC) + 514 req->period.nsec; 515 start_ns = ((u64)req->period.sec * NSEC_PER_SEC) 516 + req->period.nsec; 517 icss_iep_update_to_next_boundary(iep, start_ns); 518 519 /* Enable Sync in single shot mode */ 520 regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 521 IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN); 522 /* Enable CMP 1 */ 523 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, 524 IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1)); 525 } else { 526 /* Disable CMP 1 */ 527 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, 528 IEP_CMP_CFG_CMP_EN(1), 0); 529 530 /* clear CMP regs */ 531 regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0); 532 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) 533 regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0); 534 535 /* Disable sync */ 536 regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); 537 } 538 } 539 540 return 0; 541 } 542 543 static int icss_iep_perout_enable(struct icss_iep *iep, 544 struct ptp_perout_request *req, int on) 545 { 546 int ret = 0; 547 548 mutex_lock(&iep->ptp_clk_mutex); 549 550 if (iep->pps_enabled) { 551 ret = -EBUSY; 552 goto exit; 553 } 554 555 if (iep->perout_enabled == !!on) 556 goto exit; 557 558 ret = icss_iep_perout_enable_hw(iep, req, on); 559 if (!ret) 560 iep->perout_enabled = !!on; 561 562 exit: 563 mutex_unlock(&iep->ptp_clk_mutex); 564 565 return ret; 566 } 567 568 static void icss_iep_cap_cmp_work(struct work_struct *work) 569 { 570 struct icss_iep *iep = container_of(work, struct icss_iep, work); 571 const u32 *reg_offs = iep->plat_data->reg_offs; 572 struct ptp_clock_event pevent; 573 unsigned int val; 574 u64 ns, ns_next; 575 576 mutex_lock(&iep->ptp_clk_mutex); 577 578 ns = readl(iep->base + reg_offs[ICSS_IEP_CMP1_REG0]); 579 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) { 580 val = readl(iep->base + reg_offs[ICSS_IEP_CMP1_REG1]); 581 ns |= (u64)val << 32; 582 } 583 /* set next event */ 584 ns_next = ns + iep->period; 585 writel(lower_32_bits(ns_next), 586 iep->base + reg_offs[ICSS_IEP_CMP1_REG0]); 587 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) 588 writel(upper_32_bits(ns_next), 589 iep->base + reg_offs[ICSS_IEP_CMP1_REG1]); 590 591 pevent.pps_times.ts_real = ns_to_timespec64(ns); 592 pevent.type = PTP_CLOCK_PPSUSR; 593 pevent.index = 0; 594 ptp_clock_event(iep->ptp_clock, &pevent); 595 dev_dbg(iep->dev, "IEP:pps ts: %llu next:%llu:\n", ns, ns_next); 596 597 mutex_unlock(&iep->ptp_clk_mutex); 598 } 599 600 static irqreturn_t icss_iep_cap_cmp_irq(int irq, void *dev_id) 601 { 602 struct icss_iep *iep = (struct icss_iep *)dev_id; 603 const u32 *reg_offs = iep->plat_data->reg_offs; 604 unsigned int val; 605 606 val = readl(iep->base + reg_offs[ICSS_IEP_CMP_STAT_REG]); 607 /* The driver only enables CMP1 */ 608 if (val & BIT(1)) { 609 /* Clear the event */ 610 writel(BIT(1), iep->base + reg_offs[ICSS_IEP_CMP_STAT_REG]); 611 if (iep->pps_enabled || iep->perout_enabled) 612 schedule_work(&iep->work); 613 return IRQ_HANDLED; 614 } 615 616 return IRQ_NONE; 617 } 618 619 static int icss_iep_pps_enable(struct icss_iep *iep, int on) 620 { 621 struct ptp_clock_request rq; 622 struct timespec64 ts; 623 int ret = 0; 624 u64 ns; 625 626 mutex_lock(&iep->ptp_clk_mutex); 627 628 if (iep->perout_enabled) { 629 ret = -EBUSY; 630 goto exit; 631 } 632 633 if (iep->pps_enabled == !!on) 634 goto exit; 635 636 rq.perout.index = 0; 637 if (on) { 638 ns = icss_iep_gettime(iep, NULL); 639 ts = ns_to_timespec64(ns); 640 rq.perout.period.sec = 1; 641 rq.perout.period.nsec = 0; 642 rq.perout.start.sec = ts.tv_sec + 2; 643 rq.perout.start.nsec = 0; 644 ret = icss_iep_perout_enable_hw(iep, &rq.perout, on); 645 } else { 646 ret = icss_iep_perout_enable_hw(iep, &rq.perout, on); 647 if (iep->cap_cmp_irq) 648 cancel_work_sync(&iep->work); 649 } 650 651 if (!ret) 652 iep->pps_enabled = !!on; 653 654 exit: 655 mutex_unlock(&iep->ptp_clk_mutex); 656 657 return ret; 658 } 659 660 static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on) 661 { 662 u32 val, cap, ret = 0; 663 664 mutex_lock(&iep->ptp_clk_mutex); 665 666 if (iep->ops && iep->ops->extts_enable) { 667 ret = iep->ops->extts_enable(iep->clockops_data, index, on); 668 goto exit; 669 } 670 671 if (((iep->latch_enable & BIT(index)) >> index) == on) 672 goto exit; 673 674 regmap_read(iep->map, ICSS_IEP_CAPTURE_CFG_REG, &val); 675 cap = IEP_CAP_CFG_CAP_ASYNC_EN(index) | IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(index); 676 if (on) { 677 val |= cap; 678 iep->latch_enable |= BIT(index); 679 } else { 680 val &= ~cap; 681 iep->latch_enable &= ~BIT(index); 682 } 683 regmap_write(iep->map, ICSS_IEP_CAPTURE_CFG_REG, val); 684 685 exit: 686 mutex_unlock(&iep->ptp_clk_mutex); 687 688 return ret; 689 } 690 691 static int icss_iep_ptp_enable(struct ptp_clock_info *ptp, 692 struct ptp_clock_request *rq, int on) 693 { 694 struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info); 695 696 switch (rq->type) { 697 case PTP_CLK_REQ_PEROUT: 698 return icss_iep_perout_enable(iep, &rq->perout, on); 699 case PTP_CLK_REQ_PPS: 700 return icss_iep_pps_enable(iep, on); 701 case PTP_CLK_REQ_EXTTS: 702 return icss_iep_extts_enable(iep, rq->extts.index, on); 703 default: 704 break; 705 } 706 707 return -EOPNOTSUPP; 708 } 709 710 static struct ptp_clock_info icss_iep_ptp_info = { 711 .owner = THIS_MODULE, 712 .name = "ICSS IEP timer", 713 .max_adj = 10000000, 714 .adjfine = icss_iep_ptp_adjfine, 715 .adjtime = icss_iep_ptp_adjtime, 716 .gettimex64 = icss_iep_ptp_gettimeex, 717 .settime64 = icss_iep_ptp_settime, 718 .enable = icss_iep_ptp_enable, 719 }; 720 721 struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx) 722 { 723 struct platform_device *pdev; 724 struct device_node *iep_np; 725 struct icss_iep *iep; 726 727 iep_np = of_parse_phandle(np, "ti,iep", idx); 728 if (!iep_np || !of_device_is_available(iep_np)) 729 return ERR_PTR(-ENODEV); 730 731 pdev = of_find_device_by_node(iep_np); 732 of_node_put(iep_np); 733 734 if (!pdev) 735 /* probably IEP not yet probed */ 736 return ERR_PTR(-EPROBE_DEFER); 737 738 iep = platform_get_drvdata(pdev); 739 if (!iep) 740 return ERR_PTR(-EPROBE_DEFER); 741 742 device_lock(iep->dev); 743 if (iep->client_np) { 744 device_unlock(iep->dev); 745 dev_err(iep->dev, "IEP is already acquired by %s", 746 iep->client_np->name); 747 return ERR_PTR(-EBUSY); 748 } 749 iep->client_np = np; 750 device_unlock(iep->dev); 751 get_device(iep->dev); 752 753 return iep; 754 } 755 EXPORT_SYMBOL_GPL(icss_iep_get_idx); 756 757 struct icss_iep *icss_iep_get(struct device_node *np) 758 { 759 return icss_iep_get_idx(np, 0); 760 } 761 EXPORT_SYMBOL_GPL(icss_iep_get); 762 763 void icss_iep_put(struct icss_iep *iep) 764 { 765 device_lock(iep->dev); 766 iep->client_np = NULL; 767 device_unlock(iep->dev); 768 put_device(iep->dev); 769 } 770 EXPORT_SYMBOL_GPL(icss_iep_put); 771 772 void icss_iep_init_fw(struct icss_iep *iep) 773 { 774 /* start IEP for FW use in raw 64bit mode, no PTP support */ 775 iep->clk_tick_time = iep->def_inc; 776 iep->cycle_time_ns = 0; 777 iep->ops = NULL; 778 iep->clockops_data = NULL; 779 icss_iep_set_default_inc(iep, iep->def_inc); 780 icss_iep_set_compensation_inc(iep, iep->def_inc); 781 icss_iep_set_compensation_count(iep, 0); 782 regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */ 783 regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0); 784 if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT) 785 icss_iep_set_slow_compensation_count(iep, 0); 786 787 icss_iep_enable(iep); 788 icss_iep_settime(iep, 0); 789 } 790 EXPORT_SYMBOL_GPL(icss_iep_init_fw); 791 792 void icss_iep_exit_fw(struct icss_iep *iep) 793 { 794 icss_iep_disable(iep); 795 } 796 EXPORT_SYMBOL_GPL(icss_iep_exit_fw); 797 798 int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops, 799 void *clockops_data, u32 cycle_time_ns) 800 { 801 int ret = 0; 802 803 iep->cycle_time_ns = cycle_time_ns; 804 iep->clk_tick_time = iep->def_inc; 805 iep->ops = clkops; 806 iep->clockops_data = clockops_data; 807 icss_iep_set_default_inc(iep, iep->def_inc); 808 icss_iep_set_compensation_inc(iep, iep->def_inc); 809 icss_iep_set_compensation_count(iep, 0); 810 regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */ 811 regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0); 812 if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT) 813 icss_iep_set_slow_compensation_count(iep, 0); 814 815 if (!(iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) || 816 !(iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)) 817 goto skip_perout; 818 819 if (iep->ops && iep->ops->perout_enable) { 820 iep->ptp_info.n_per_out = 1; 821 iep->ptp_info.pps = 1; 822 } else if (iep->cap_cmp_irq) { 823 iep->ptp_info.pps = 1; 824 } 825 826 if (iep->ops && iep->ops->extts_enable) 827 iep->ptp_info.n_ext_ts = 2; 828 829 skip_perout: 830 if (cycle_time_ns) 831 icss_iep_enable_shadow_mode(iep); 832 else 833 icss_iep_enable(iep); 834 icss_iep_settime(iep, ktime_get_real_ns()); 835 836 iep->ptp_clock = ptp_clock_register(&iep->ptp_info, iep->dev); 837 if (IS_ERR(iep->ptp_clock)) { 838 ret = PTR_ERR(iep->ptp_clock); 839 iep->ptp_clock = NULL; 840 dev_err(iep->dev, "Failed to register ptp clk %d\n", ret); 841 } 842 843 return ret; 844 } 845 EXPORT_SYMBOL_GPL(icss_iep_init); 846 847 int icss_iep_exit(struct icss_iep *iep) 848 { 849 if (iep->ptp_clock) { 850 ptp_clock_unregister(iep->ptp_clock); 851 iep->ptp_clock = NULL; 852 } 853 icss_iep_disable(iep); 854 855 return 0; 856 } 857 EXPORT_SYMBOL_GPL(icss_iep_exit); 858 859 static int icss_iep_probe(struct platform_device *pdev) 860 { 861 struct device *dev = &pdev->dev; 862 struct icss_iep *iep; 863 struct clk *iep_clk; 864 int ret, irq; 865 866 iep = devm_kzalloc(dev, sizeof(*iep), GFP_KERNEL); 867 if (!iep) 868 return -ENOMEM; 869 870 iep->dev = dev; 871 iep->base = devm_platform_ioremap_resource(pdev, 0); 872 if (IS_ERR(iep->base)) 873 return -ENODEV; 874 875 irq = platform_get_irq_byname_optional(pdev, "iep_cap_cmp"); 876 if (irq == -EPROBE_DEFER) 877 return irq; 878 879 if (irq > 0) { 880 ret = devm_request_irq(dev, irq, icss_iep_cap_cmp_irq, 881 IRQF_TRIGGER_HIGH, "iep_cap_cmp", iep); 882 if (ret) { 883 dev_info(iep->dev, "cap_cmp irq request failed: %x\n", 884 ret); 885 } else { 886 iep->cap_cmp_irq = irq; 887 INIT_WORK(&iep->work, icss_iep_cap_cmp_work); 888 } 889 } 890 891 iep_clk = devm_clk_get(dev, NULL); 892 if (IS_ERR(iep_clk)) 893 return PTR_ERR(iep_clk); 894 895 iep->refclk_freq = clk_get_rate(iep_clk); 896 897 iep->def_inc = NSEC_PER_SEC / iep->refclk_freq; /* ns per clock tick */ 898 if (iep->def_inc > IEP_MAX_DEF_INC) { 899 dev_err(dev, "Failed to set def_inc %d. IEP_clock is too slow to be supported\n", 900 iep->def_inc); 901 return -EINVAL; 902 } 903 904 iep->plat_data = device_get_match_data(dev); 905 if (!iep->plat_data) 906 return -EINVAL; 907 908 iep->map = devm_regmap_init(dev, NULL, iep, iep->plat_data->config); 909 if (IS_ERR(iep->map)) { 910 dev_err(dev, "Failed to create regmap for IEP %ld\n", 911 PTR_ERR(iep->map)); 912 return PTR_ERR(iep->map); 913 } 914 915 iep->ptp_info = icss_iep_ptp_info; 916 mutex_init(&iep->ptp_clk_mutex); 917 dev_set_drvdata(dev, iep); 918 icss_iep_disable(iep); 919 920 return 0; 921 } 922 923 static bool am654_icss_iep_valid_reg(struct device *dev, unsigned int reg) 924 { 925 switch (reg) { 926 case ICSS_IEP_GLOBAL_CFG_REG ... ICSS_IEP_SYNC_START_REG: 927 return true; 928 default: 929 return false; 930 } 931 932 return false; 933 } 934 935 static int icss_iep_regmap_write(void *context, unsigned int reg, 936 unsigned int val) 937 { 938 struct icss_iep *iep = context; 939 940 writel(val, iep->base + iep->plat_data->reg_offs[reg]); 941 942 return 0; 943 } 944 945 static int icss_iep_regmap_read(void *context, unsigned int reg, 946 unsigned int *val) 947 { 948 struct icss_iep *iep = context; 949 950 *val = readl(iep->base + iep->plat_data->reg_offs[reg]); 951 952 return 0; 953 } 954 955 static const struct regmap_config am654_icss_iep_regmap_config = { 956 .name = "icss iep", 957 .reg_stride = 1, 958 .reg_write = icss_iep_regmap_write, 959 .reg_read = icss_iep_regmap_read, 960 .writeable_reg = am654_icss_iep_valid_reg, 961 .readable_reg = am654_icss_iep_valid_reg, 962 .fast_io = 1, 963 }; 964 965 static const struct icss_iep_plat_data am654_icss_iep_plat_data = { 966 .flags = ICSS_IEP_64BIT_COUNTER_SUPPORT | 967 ICSS_IEP_SLOW_COMPEN_REG_SUPPORT | 968 ICSS_IEP_SHADOW_MODE_SUPPORT, 969 .reg_offs = { 970 [ICSS_IEP_GLOBAL_CFG_REG] = 0x00, 971 [ICSS_IEP_COMPEN_REG] = 0x08, 972 [ICSS_IEP_SLOW_COMPEN_REG] = 0x0C, 973 [ICSS_IEP_COUNT_REG0] = 0x10, 974 [ICSS_IEP_COUNT_REG1] = 0x14, 975 [ICSS_IEP_CAPTURE_CFG_REG] = 0x18, 976 [ICSS_IEP_CAPTURE_STAT_REG] = 0x1c, 977 978 [ICSS_IEP_CAP6_RISE_REG0] = 0x50, 979 [ICSS_IEP_CAP6_RISE_REG1] = 0x54, 980 981 [ICSS_IEP_CAP7_RISE_REG0] = 0x60, 982 [ICSS_IEP_CAP7_RISE_REG1] = 0x64, 983 984 [ICSS_IEP_CMP_CFG_REG] = 0x70, 985 [ICSS_IEP_CMP_STAT_REG] = 0x74, 986 [ICSS_IEP_CMP0_REG0] = 0x78, 987 [ICSS_IEP_CMP0_REG1] = 0x7c, 988 [ICSS_IEP_CMP1_REG0] = 0x80, 989 [ICSS_IEP_CMP1_REG1] = 0x84, 990 991 [ICSS_IEP_CMP8_REG0] = 0xc0, 992 [ICSS_IEP_CMP8_REG1] = 0xc4, 993 [ICSS_IEP_SYNC_CTRL_REG] = 0x180, 994 [ICSS_IEP_SYNC0_STAT_REG] = 0x188, 995 [ICSS_IEP_SYNC1_STAT_REG] = 0x18c, 996 [ICSS_IEP_SYNC_PWIDTH_REG] = 0x190, 997 [ICSS_IEP_SYNC0_PERIOD_REG] = 0x194, 998 [ICSS_IEP_SYNC1_DELAY_REG] = 0x198, 999 [ICSS_IEP_SYNC_START_REG] = 0x19c, 1000 }, 1001 .config = &am654_icss_iep_regmap_config, 1002 }; 1003 1004 static const struct of_device_id icss_iep_of_match[] = { 1005 { 1006 .compatible = "ti,am654-icss-iep", 1007 .data = &am654_icss_iep_plat_data, 1008 }, 1009 {}, 1010 }; 1011 MODULE_DEVICE_TABLE(of, icss_iep_of_match); 1012 1013 static struct platform_driver icss_iep_driver = { 1014 .driver = { 1015 .name = "icss-iep", 1016 .of_match_table = icss_iep_of_match, 1017 }, 1018 .probe = icss_iep_probe, 1019 }; 1020 module_platform_driver(icss_iep_driver); 1021 1022 MODULE_LICENSE("GPL"); 1023 MODULE_DESCRIPTION("TI ICSS IEP driver"); 1024 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>"); 1025 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>"); 1026