1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Texas Instruments ICSSG Industrial Ethernet Peripheral (IEP) Driver
4 *
5 * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com
6 *
7 */
8
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/err.h>
12 #include <linux/io.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_platform.h>
16 #include <linux/platform_device.h>
17 #include <linux/timekeeping.h>
18 #include <linux/interrupt.h>
19 #include <linux/of_irq.h>
20 #include <linux/workqueue.h>
21
22 #include "icss_iep.h"
23
24 #define IEP_MAX_DEF_INC 0xf
25 #define IEP_MAX_COMPEN_INC 0xfff
26 #define IEP_MAX_COMPEN_COUNT 0xffffff
27
28 #define IEP_GLOBAL_CFG_CNT_ENABLE BIT(0)
29 #define IEP_GLOBAL_CFG_DEFAULT_INC_MASK GENMASK(7, 4)
30 #define IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT 4
31 #define IEP_GLOBAL_CFG_COMPEN_INC_MASK GENMASK(19, 8)
32 #define IEP_GLOBAL_CFG_COMPEN_INC_SHIFT 8
33
34 #define IEP_GLOBAL_STATUS_CNT_OVF BIT(0)
35
36 #define IEP_CMP_CFG_SHADOW_EN BIT(17)
37 #define IEP_CMP_CFG_CMP0_RST_CNT_EN BIT(0)
38 #define IEP_CMP_CFG_CMP_EN(cmp) (GENMASK(16, 1) & (1 << ((cmp) + 1)))
39
40 #define IEP_CMP_STATUS(cmp) (1 << (cmp))
41
42 #define IEP_SYNC_CTRL_SYNC_EN BIT(0)
43 #define IEP_SYNC_CTRL_SYNC_N_EN(n) (GENMASK(2, 1) & (BIT(1) << (n)))
44
45 #define IEP_MIN_CMP 0
46 #define IEP_MAX_CMP 15
47
48 #define ICSS_IEP_64BIT_COUNTER_SUPPORT BIT(0)
49 #define ICSS_IEP_SLOW_COMPEN_REG_SUPPORT BIT(1)
50 #define ICSS_IEP_SHADOW_MODE_SUPPORT BIT(2)
51
52 #define LATCH_INDEX(ts_index) ((ts_index) + 6)
53 #define IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(n) BIT(LATCH_INDEX(n))
54 #define IEP_CAP_CFG_CAP_ASYNC_EN(n) BIT(LATCH_INDEX(n) + 10)
55
56 /**
57 * icss_iep_get_count_hi() - Get the upper 32 bit IEP counter
58 * @iep: Pointer to structure representing IEP.
59 *
60 * Return: upper 32 bit IEP counter
61 */
icss_iep_get_count_hi(struct icss_iep * iep)62 int icss_iep_get_count_hi(struct icss_iep *iep)
63 {
64 u32 val = 0;
65
66 if (iep && (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT))
67 val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
68
69 return val;
70 }
71 EXPORT_SYMBOL_GPL(icss_iep_get_count_hi);
72
73 /**
74 * icss_iep_get_count_low() - Get the lower 32 bit IEP counter
75 * @iep: Pointer to structure representing IEP.
76 *
77 * Return: lower 32 bit IEP counter
78 */
icss_iep_get_count_low(struct icss_iep * iep)79 int icss_iep_get_count_low(struct icss_iep *iep)
80 {
81 u32 val = 0;
82
83 if (iep)
84 val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
85
86 return val;
87 }
88 EXPORT_SYMBOL_GPL(icss_iep_get_count_low);
89
90 /**
91 * icss_iep_get_ptp_clock_idx() - Get PTP clock index using IEP driver
92 * @iep: Pointer to structure representing IEP.
93 *
94 * Return: PTP clock index, -1 if not registered
95 */
icss_iep_get_ptp_clock_idx(struct icss_iep * iep)96 int icss_iep_get_ptp_clock_idx(struct icss_iep *iep)
97 {
98 if (!iep || !iep->ptp_clock)
99 return -1;
100 return ptp_clock_index(iep->ptp_clock);
101 }
102 EXPORT_SYMBOL_GPL(icss_iep_get_ptp_clock_idx);
103
icss_iep_set_counter(struct icss_iep * iep,u64 ns)104 static void icss_iep_set_counter(struct icss_iep *iep, u64 ns)
105 {
106 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
107 writel(upper_32_bits(ns), iep->base +
108 iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
109 writel(lower_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
110 }
111
112 static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns);
113
114 /**
115 * icss_iep_settime() - Set time of the PTP clock using IEP driver
116 * @iep: Pointer to structure representing IEP.
117 * @ns: Time to be set in nanoseconds
118 *
119 * This API uses writel() instead of regmap_write() for write operations as
120 * regmap_write() is too slow and this API is time sensitive.
121 */
icss_iep_settime(struct icss_iep * iep,u64 ns)122 static void icss_iep_settime(struct icss_iep *iep, u64 ns)
123 {
124 if (iep->ops && iep->ops->settime) {
125 iep->ops->settime(iep->clockops_data, ns);
126 return;
127 }
128
129 if (iep->pps_enabled || iep->perout_enabled)
130 writel(0, iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
131
132 icss_iep_set_counter(iep, ns);
133
134 if (iep->pps_enabled || iep->perout_enabled) {
135 icss_iep_update_to_next_boundary(iep, ns);
136 writel(IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN,
137 iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
138 }
139 }
140
141 /**
142 * icss_iep_gettime() - Get time of the PTP clock using IEP driver
143 * @iep: Pointer to structure representing IEP.
144 * @sts: Pointer to structure representing PTP system timestamp.
145 *
146 * This API uses readl() instead of regmap_read() for read operations as
147 * regmap_read() is too slow and this API is time sensitive.
148 *
149 * Return: The current timestamp of the PTP clock using IEP driver
150 */
icss_iep_gettime(struct icss_iep * iep,struct ptp_system_timestamp * sts)151 static u64 icss_iep_gettime(struct icss_iep *iep,
152 struct ptp_system_timestamp *sts)
153 {
154 u32 ts_hi = 0, ts_lo;
155 unsigned long flags;
156
157 if (iep->ops && iep->ops->gettime)
158 return iep->ops->gettime(iep->clockops_data, sts);
159
160 /* use local_irq_x() to make it work for both RT/non-RT */
161 local_irq_save(flags);
162
163 /* no need to play with hi-lo, hi is latched when lo is read */
164 ptp_read_system_prets(sts);
165 ts_lo = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
166 ptp_read_system_postts(sts);
167 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
168 ts_hi = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
169
170 local_irq_restore(flags);
171
172 return (u64)ts_lo | (u64)ts_hi << 32;
173 }
174
icss_iep_enable(struct icss_iep * iep)175 static void icss_iep_enable(struct icss_iep *iep)
176 {
177 regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
178 IEP_GLOBAL_CFG_CNT_ENABLE,
179 IEP_GLOBAL_CFG_CNT_ENABLE);
180 }
181
icss_iep_disable(struct icss_iep * iep)182 static void icss_iep_disable(struct icss_iep *iep)
183 {
184 regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
185 IEP_GLOBAL_CFG_CNT_ENABLE,
186 0);
187 }
188
icss_iep_enable_shadow_mode(struct icss_iep * iep)189 static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
190 {
191 u32 cycle_time;
192 int cmp;
193
194 cycle_time = iep->cycle_time_ns - iep->def_inc;
195
196 icss_iep_disable(iep);
197
198 /* disable shadow mode */
199 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
200 IEP_CMP_CFG_SHADOW_EN, 0);
201
202 /* enable shadow mode */
203 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
204 IEP_CMP_CFG_SHADOW_EN, IEP_CMP_CFG_SHADOW_EN);
205
206 /* clear counters */
207 icss_iep_set_counter(iep, 0);
208
209 /* clear overflow status */
210 regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_STATUS_REG,
211 IEP_GLOBAL_STATUS_CNT_OVF,
212 IEP_GLOBAL_STATUS_CNT_OVF);
213
214 /* clear compare status */
215 for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
216 regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
217 IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
218
219 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
220 IEP_CMP_CFG_CMP_EN(cmp), 0);
221 }
222
223 /* enable reset counter on CMP0 event */
224 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
225 IEP_CMP_CFG_CMP0_RST_CNT_EN,
226 IEP_CMP_CFG_CMP0_RST_CNT_EN);
227 /* enable compare */
228 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
229 IEP_CMP_CFG_CMP_EN(0),
230 IEP_CMP_CFG_CMP_EN(0));
231
232 /* set CMP0 value to cycle time */
233 regmap_write(iep->map, ICSS_IEP_CMP0_REG0, cycle_time);
234 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
235 regmap_write(iep->map, ICSS_IEP_CMP0_REG1, cycle_time);
236
237 icss_iep_set_counter(iep, 0);
238 icss_iep_enable(iep);
239 }
240
icss_iep_set_default_inc(struct icss_iep * iep,u8 def_inc)241 static void icss_iep_set_default_inc(struct icss_iep *iep, u8 def_inc)
242 {
243 regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
244 IEP_GLOBAL_CFG_DEFAULT_INC_MASK,
245 def_inc << IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT);
246 }
247
icss_iep_set_compensation_inc(struct icss_iep * iep,u16 compen_inc)248 static void icss_iep_set_compensation_inc(struct icss_iep *iep, u16 compen_inc)
249 {
250 struct device *dev = regmap_get_device(iep->map);
251
252 if (compen_inc > IEP_MAX_COMPEN_INC) {
253 dev_err(dev, "%s: too high compensation inc %d\n",
254 __func__, compen_inc);
255 compen_inc = IEP_MAX_COMPEN_INC;
256 }
257
258 regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
259 IEP_GLOBAL_CFG_COMPEN_INC_MASK,
260 compen_inc << IEP_GLOBAL_CFG_COMPEN_INC_SHIFT);
261 }
262
icss_iep_set_compensation_count(struct icss_iep * iep,u32 compen_count)263 static void icss_iep_set_compensation_count(struct icss_iep *iep,
264 u32 compen_count)
265 {
266 struct device *dev = regmap_get_device(iep->map);
267
268 if (compen_count > IEP_MAX_COMPEN_COUNT) {
269 dev_err(dev, "%s: too high compensation count %d\n",
270 __func__, compen_count);
271 compen_count = IEP_MAX_COMPEN_COUNT;
272 }
273
274 regmap_write(iep->map, ICSS_IEP_COMPEN_REG, compen_count);
275 }
276
icss_iep_set_slow_compensation_count(struct icss_iep * iep,u32 compen_count)277 static void icss_iep_set_slow_compensation_count(struct icss_iep *iep,
278 u32 compen_count)
279 {
280 regmap_write(iep->map, ICSS_IEP_SLOW_COMPEN_REG, compen_count);
281 }
282
283 /* PTP PHC operations */
icss_iep_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)284 static int icss_iep_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
285 {
286 struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
287 s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
288 u32 cyc_count;
289 u16 cmp_inc;
290
291 mutex_lock(&iep->ptp_clk_mutex);
292
293 /* ppb is amount of frequency we want to adjust in 1GHz (billion)
294 * e.g. 100ppb means we need to speed up clock by 100Hz
295 * i.e. at end of 1 second (1 billion ns) clock time, we should be
296 * counting 100 more ns.
297 * We use IEP slow compensation to achieve continuous freq. adjustment.
298 * There are 2 parts. Cycle time and adjustment per cycle.
299 * Simplest case would be 1 sec Cycle time. Then adjustment
300 * pre cycle would be (def_inc + ppb) value.
301 * Cycle time will have to be chosen based on how worse the ppb is.
302 * e.g. smaller the ppb, cycle time has to be large.
303 * The minimum adjustment we can do is +-1ns per cycle so let's
304 * reduce the cycle time to get 1ns per cycle adjustment.
305 * 1ppb = 1sec cycle time & 1ns adjust
306 * 1000ppb = 1/1000 cycle time & 1ns adjust per cycle
307 */
308
309 if (iep->cycle_time_ns)
310 iep->slow_cmp_inc = iep->clk_tick_time; /* 4ns adj per cycle */
311 else
312 iep->slow_cmp_inc = 1; /* 1ns adjust per cycle */
313
314 if (ppb < 0) {
315 iep->slow_cmp_inc = -iep->slow_cmp_inc;
316 ppb = -ppb;
317 }
318
319 cyc_count = NSEC_PER_SEC; /* 1s cycle time @1GHz */
320 cyc_count /= ppb; /* cycle time per ppb */
321
322 /* slow_cmp_count is decremented every clock cycle, e.g. @250MHz */
323 if (!iep->cycle_time_ns)
324 cyc_count /= iep->clk_tick_time;
325 iep->slow_cmp_count = cyc_count;
326
327 /* iep->clk_tick_time is def_inc */
328 cmp_inc = iep->clk_tick_time + iep->slow_cmp_inc;
329 icss_iep_set_compensation_inc(iep, cmp_inc);
330 icss_iep_set_slow_compensation_count(iep, iep->slow_cmp_count);
331
332 mutex_unlock(&iep->ptp_clk_mutex);
333
334 return 0;
335 }
336
icss_iep_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)337 static int icss_iep_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
338 {
339 struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
340 s64 ns;
341
342 mutex_lock(&iep->ptp_clk_mutex);
343 if (iep->ops && iep->ops->adjtime) {
344 iep->ops->adjtime(iep->clockops_data, delta);
345 } else {
346 ns = icss_iep_gettime(iep, NULL);
347 ns += delta;
348 icss_iep_settime(iep, ns);
349 }
350 mutex_unlock(&iep->ptp_clk_mutex);
351
352 return 0;
353 }
354
icss_iep_ptp_gettimeex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)355 static int icss_iep_ptp_gettimeex(struct ptp_clock_info *ptp,
356 struct timespec64 *ts,
357 struct ptp_system_timestamp *sts)
358 {
359 struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
360 u64 ns;
361
362 mutex_lock(&iep->ptp_clk_mutex);
363 ns = icss_iep_gettime(iep, sts);
364 *ts = ns_to_timespec64(ns);
365 mutex_unlock(&iep->ptp_clk_mutex);
366
367 return 0;
368 }
369
icss_iep_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)370 static int icss_iep_ptp_settime(struct ptp_clock_info *ptp,
371 const struct timespec64 *ts)
372 {
373 struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
374 u64 ns;
375
376 mutex_lock(&iep->ptp_clk_mutex);
377 ns = timespec64_to_ns(ts);
378 icss_iep_settime(iep, ns);
379 mutex_unlock(&iep->ptp_clk_mutex);
380
381 return 0;
382 }
383
icss_iep_update_to_next_boundary(struct icss_iep * iep,u64 start_ns)384 static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns)
385 {
386 u64 ns, p_ns;
387 u32 offset;
388
389 ns = icss_iep_gettime(iep, NULL);
390 if (start_ns < ns)
391 start_ns = ns;
392 p_ns = iep->period;
393 /* Round up to next period boundary */
394 start_ns += p_ns - 1;
395 offset = do_div(start_ns, p_ns);
396 start_ns = start_ns * p_ns;
397 /* If it is too close to update, shift to next boundary */
398 if (p_ns - offset < 10)
399 start_ns += p_ns;
400
401 regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(start_ns));
402 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
403 regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(start_ns));
404 }
405
icss_iep_perout_enable_hw(struct icss_iep * iep,struct ptp_perout_request * req,int on)406 static int icss_iep_perout_enable_hw(struct icss_iep *iep,
407 struct ptp_perout_request *req, int on)
408 {
409 struct timespec64 ts;
410 u64 ns_start;
411 u64 ns_width;
412 int ret;
413 u64 cmp;
414
415 /* Calculate width of the signal for PPS/PEROUT handling */
416 ts.tv_sec = req->on.sec;
417 ts.tv_nsec = req->on.nsec;
418 ns_width = timespec64_to_ns(&ts);
419
420 if (req->flags & PTP_PEROUT_PHASE) {
421 ts.tv_sec = req->phase.sec;
422 ts.tv_nsec = req->phase.nsec;
423 ns_start = timespec64_to_ns(&ts);
424 } else {
425 ns_start = 0;
426 }
427
428 if (iep->ops && iep->ops->perout_enable) {
429 ret = iep->ops->perout_enable(iep->clockops_data, req, on, &cmp);
430 if (ret)
431 return ret;
432
433 if (on) {
434 /* Configure CMP */
435 regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
436 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
437 regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
438 /* Configure SYNC, based on req on width */
439 regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
440 div_u64(ns_width, iep->def_inc));
441 regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
442 regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
443 div_u64(ns_start, iep->def_inc));
444 regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
445 /* Enable CMP 1 */
446 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
447 IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
448 } else {
449 /* Disable CMP 1 */
450 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
451 IEP_CMP_CFG_CMP_EN(1), 0);
452
453 /* clear regs */
454 regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
455 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
456 regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
457 }
458 } else {
459 if (on) {
460 u64 start_ns;
461
462 iep->period = ((u64)req->period.sec * NSEC_PER_SEC) +
463 req->period.nsec;
464 start_ns = ((u64)req->period.sec * NSEC_PER_SEC)
465 + req->period.nsec;
466 icss_iep_update_to_next_boundary(iep, start_ns);
467
468 regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
469 div_u64(ns_width, iep->def_inc));
470 regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
471 div_u64(ns_start, iep->def_inc));
472 /* Enable Sync in single shot mode */
473 regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
474 IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
475 /* Enable CMP 1 */
476 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
477 IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
478 } else {
479 /* Disable CMP 1 */
480 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
481 IEP_CMP_CFG_CMP_EN(1), 0);
482
483 /* clear CMP regs */
484 regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
485 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
486 regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
487
488 /* Disable sync */
489 regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0);
490 }
491 }
492
493 return 0;
494 }
495
icss_iep_perout_enable(struct icss_iep * iep,struct ptp_perout_request * req,int on)496 static int icss_iep_perout_enable(struct icss_iep *iep,
497 struct ptp_perout_request *req, int on)
498 {
499 int ret = 0;
500
501 /* Reject requests with unsupported flags */
502 if (req->flags & ~(PTP_PEROUT_DUTY_CYCLE |
503 PTP_PEROUT_PHASE))
504 return -EOPNOTSUPP;
505
506 mutex_lock(&iep->ptp_clk_mutex);
507
508 if (iep->pps_enabled) {
509 ret = -EBUSY;
510 goto exit;
511 }
512
513 if (iep->perout_enabled == !!on)
514 goto exit;
515
516 /* Set default "on" time (1ms) for the signal if not passed by the app */
517 if (!(req->flags & PTP_PEROUT_DUTY_CYCLE)) {
518 req->on.sec = 0;
519 req->on.nsec = NSEC_PER_MSEC;
520 }
521
522 ret = icss_iep_perout_enable_hw(iep, req, on);
523 if (!ret)
524 iep->perout_enabled = !!on;
525
526 exit:
527 mutex_unlock(&iep->ptp_clk_mutex);
528
529 return ret;
530 }
531
icss_iep_cap_cmp_work(struct work_struct * work)532 static void icss_iep_cap_cmp_work(struct work_struct *work)
533 {
534 struct icss_iep *iep = container_of(work, struct icss_iep, work);
535 const u32 *reg_offs = iep->plat_data->reg_offs;
536 struct ptp_clock_event pevent;
537 unsigned int val;
538 u64 ns, ns_next;
539
540 mutex_lock(&iep->ptp_clk_mutex);
541
542 ns = readl(iep->base + reg_offs[ICSS_IEP_CMP1_REG0]);
543 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) {
544 val = readl(iep->base + reg_offs[ICSS_IEP_CMP1_REG1]);
545 ns |= (u64)val << 32;
546 }
547 /* set next event */
548 ns_next = ns + iep->period;
549 writel(lower_32_bits(ns_next),
550 iep->base + reg_offs[ICSS_IEP_CMP1_REG0]);
551 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
552 writel(upper_32_bits(ns_next),
553 iep->base + reg_offs[ICSS_IEP_CMP1_REG1]);
554
555 pevent.pps_times.ts_real = ns_to_timespec64(ns);
556 pevent.type = PTP_CLOCK_PPSUSR;
557 pevent.index = 0;
558 ptp_clock_event(iep->ptp_clock, &pevent);
559 dev_dbg(iep->dev, "IEP:pps ts: %llu next:%llu:\n", ns, ns_next);
560
561 mutex_unlock(&iep->ptp_clk_mutex);
562 }
563
icss_iep_cap_cmp_irq(int irq,void * dev_id)564 static irqreturn_t icss_iep_cap_cmp_irq(int irq, void *dev_id)
565 {
566 struct icss_iep *iep = (struct icss_iep *)dev_id;
567 const u32 *reg_offs = iep->plat_data->reg_offs;
568 unsigned int val;
569
570 val = readl(iep->base + reg_offs[ICSS_IEP_CMP_STAT_REG]);
571 /* The driver only enables CMP1 */
572 if (val & BIT(1)) {
573 /* Clear the event */
574 writel(BIT(1), iep->base + reg_offs[ICSS_IEP_CMP_STAT_REG]);
575 if (iep->pps_enabled || iep->perout_enabled)
576 schedule_work(&iep->work);
577 return IRQ_HANDLED;
578 }
579
580 return IRQ_NONE;
581 }
582
icss_iep_pps_enable(struct icss_iep * iep,int on)583 static int icss_iep_pps_enable(struct icss_iep *iep, int on)
584 {
585 struct ptp_clock_request rq;
586 struct timespec64 ts;
587 int ret = 0;
588 u64 ns;
589
590 mutex_lock(&iep->ptp_clk_mutex);
591
592 if (iep->perout_enabled) {
593 ret = -EBUSY;
594 goto exit;
595 }
596
597 if (iep->pps_enabled == !!on)
598 goto exit;
599
600 rq.perout.index = 0;
601 if (on) {
602 ns = icss_iep_gettime(iep, NULL);
603 ts = ns_to_timespec64(ns);
604 rq.perout.flags = 0;
605 rq.perout.period.sec = 1;
606 rq.perout.period.nsec = 0;
607 rq.perout.start.sec = ts.tv_sec + 2;
608 rq.perout.start.nsec = 0;
609 rq.perout.on.sec = 0;
610 rq.perout.on.nsec = NSEC_PER_MSEC;
611 ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
612 } else {
613 ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
614 if (iep->cap_cmp_irq)
615 cancel_work_sync(&iep->work);
616 }
617
618 if (!ret)
619 iep->pps_enabled = !!on;
620
621 exit:
622 mutex_unlock(&iep->ptp_clk_mutex);
623
624 return ret;
625 }
626
icss_iep_extts_enable(struct icss_iep * iep,u32 index,int on)627 static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on)
628 {
629 u32 val, cap, ret = 0;
630
631 mutex_lock(&iep->ptp_clk_mutex);
632
633 if (iep->ops && iep->ops->extts_enable) {
634 ret = iep->ops->extts_enable(iep->clockops_data, index, on);
635 goto exit;
636 }
637
638 if (((iep->latch_enable & BIT(index)) >> index) == on)
639 goto exit;
640
641 regmap_read(iep->map, ICSS_IEP_CAPTURE_CFG_REG, &val);
642 cap = IEP_CAP_CFG_CAP_ASYNC_EN(index) | IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(index);
643 if (on) {
644 val |= cap;
645 iep->latch_enable |= BIT(index);
646 } else {
647 val &= ~cap;
648 iep->latch_enable &= ~BIT(index);
649 }
650 regmap_write(iep->map, ICSS_IEP_CAPTURE_CFG_REG, val);
651
652 exit:
653 mutex_unlock(&iep->ptp_clk_mutex);
654
655 return ret;
656 }
657
icss_iep_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)658 static int icss_iep_ptp_enable(struct ptp_clock_info *ptp,
659 struct ptp_clock_request *rq, int on)
660 {
661 struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
662
663 switch (rq->type) {
664 case PTP_CLK_REQ_PEROUT:
665 return icss_iep_perout_enable(iep, &rq->perout, on);
666 case PTP_CLK_REQ_PPS:
667 return icss_iep_pps_enable(iep, on);
668 case PTP_CLK_REQ_EXTTS:
669 return icss_iep_extts_enable(iep, rq->extts.index, on);
670 default:
671 break;
672 }
673
674 return -EOPNOTSUPP;
675 }
676
677 static struct ptp_clock_info icss_iep_ptp_info = {
678 .owner = THIS_MODULE,
679 .name = "ICSS IEP timer",
680 .max_adj = 10000000,
681 .adjfine = icss_iep_ptp_adjfine,
682 .adjtime = icss_iep_ptp_adjtime,
683 .gettimex64 = icss_iep_ptp_gettimeex,
684 .settime64 = icss_iep_ptp_settime,
685 .enable = icss_iep_ptp_enable,
686 };
687
icss_iep_get_idx(struct device_node * np,int idx)688 struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
689 {
690 struct platform_device *pdev;
691 struct device_node *iep_np;
692 struct icss_iep *iep;
693
694 iep_np = of_parse_phandle(np, "ti,iep", idx);
695 if (!iep_np || !of_device_is_available(iep_np))
696 return ERR_PTR(-ENODEV);
697
698 pdev = of_find_device_by_node(iep_np);
699 of_node_put(iep_np);
700
701 if (!pdev)
702 /* probably IEP not yet probed */
703 return ERR_PTR(-EPROBE_DEFER);
704
705 iep = platform_get_drvdata(pdev);
706 if (!iep)
707 return ERR_PTR(-EPROBE_DEFER);
708
709 device_lock(iep->dev);
710 if (iep->client_np) {
711 device_unlock(iep->dev);
712 dev_err(iep->dev, "IEP is already acquired by %s",
713 iep->client_np->name);
714 return ERR_PTR(-EBUSY);
715 }
716 iep->client_np = np;
717 device_unlock(iep->dev);
718 get_device(iep->dev);
719
720 return iep;
721 }
722 EXPORT_SYMBOL_GPL(icss_iep_get_idx);
723
icss_iep_get(struct device_node * np)724 struct icss_iep *icss_iep_get(struct device_node *np)
725 {
726 return icss_iep_get_idx(np, 0);
727 }
728 EXPORT_SYMBOL_GPL(icss_iep_get);
729
icss_iep_put(struct icss_iep * iep)730 void icss_iep_put(struct icss_iep *iep)
731 {
732 device_lock(iep->dev);
733 iep->client_np = NULL;
734 device_unlock(iep->dev);
735 put_device(iep->dev);
736 }
737 EXPORT_SYMBOL_GPL(icss_iep_put);
738
icss_iep_init_fw(struct icss_iep * iep)739 void icss_iep_init_fw(struct icss_iep *iep)
740 {
741 /* start IEP for FW use in raw 64bit mode, no PTP support */
742 iep->clk_tick_time = iep->def_inc;
743 iep->cycle_time_ns = 0;
744 iep->ops = NULL;
745 iep->clockops_data = NULL;
746 icss_iep_set_default_inc(iep, iep->def_inc);
747 icss_iep_set_compensation_inc(iep, iep->def_inc);
748 icss_iep_set_compensation_count(iep, 0);
749 regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
750 regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
751 if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)
752 icss_iep_set_slow_compensation_count(iep, 0);
753
754 icss_iep_enable(iep);
755 icss_iep_settime(iep, 0);
756 }
757 EXPORT_SYMBOL_GPL(icss_iep_init_fw);
758
icss_iep_exit_fw(struct icss_iep * iep)759 void icss_iep_exit_fw(struct icss_iep *iep)
760 {
761 icss_iep_disable(iep);
762 }
763 EXPORT_SYMBOL_GPL(icss_iep_exit_fw);
764
icss_iep_init(struct icss_iep * iep,const struct icss_iep_clockops * clkops,void * clockops_data,u32 cycle_time_ns)765 int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops,
766 void *clockops_data, u32 cycle_time_ns)
767 {
768 int ret = 0;
769
770 iep->cycle_time_ns = cycle_time_ns;
771 iep->clk_tick_time = iep->def_inc;
772 iep->ops = clkops;
773 iep->clockops_data = clockops_data;
774 icss_iep_set_default_inc(iep, iep->def_inc);
775 icss_iep_set_compensation_inc(iep, iep->def_inc);
776 icss_iep_set_compensation_count(iep, 0);
777 regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
778 regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
779 if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)
780 icss_iep_set_slow_compensation_count(iep, 0);
781
782 if (!(iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) ||
783 !(iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT))
784 goto skip_perout;
785
786 if (iep->ops && iep->ops->perout_enable) {
787 iep->ptp_info.n_per_out = 1;
788 iep->ptp_info.pps = 1;
789 } else if (iep->cap_cmp_irq) {
790 iep->ptp_info.pps = 1;
791 }
792
793 if (iep->ops && iep->ops->extts_enable)
794 iep->ptp_info.n_ext_ts = 2;
795
796 skip_perout:
797 if (cycle_time_ns)
798 icss_iep_enable_shadow_mode(iep);
799 else
800 icss_iep_enable(iep);
801 icss_iep_settime(iep, ktime_get_real_ns());
802
803 iep->ptp_clock = ptp_clock_register(&iep->ptp_info, iep->dev);
804 if (IS_ERR(iep->ptp_clock)) {
805 ret = PTR_ERR(iep->ptp_clock);
806 iep->ptp_clock = NULL;
807 dev_err(iep->dev, "Failed to register ptp clk %d\n", ret);
808 }
809
810 return ret;
811 }
812 EXPORT_SYMBOL_GPL(icss_iep_init);
813
icss_iep_exit(struct icss_iep * iep)814 int icss_iep_exit(struct icss_iep *iep)
815 {
816 if (iep->ptp_clock) {
817 ptp_clock_unregister(iep->ptp_clock);
818 iep->ptp_clock = NULL;
819 }
820 icss_iep_disable(iep);
821
822 if (iep->pps_enabled)
823 icss_iep_pps_enable(iep, false);
824 else if (iep->perout_enabled)
825 icss_iep_perout_enable(iep, NULL, false);
826
827 return 0;
828 }
829 EXPORT_SYMBOL_GPL(icss_iep_exit);
830
icss_iep_probe(struct platform_device * pdev)831 static int icss_iep_probe(struct platform_device *pdev)
832 {
833 struct device *dev = &pdev->dev;
834 struct icss_iep *iep;
835 struct clk *iep_clk;
836 int ret, irq;
837
838 iep = devm_kzalloc(dev, sizeof(*iep), GFP_KERNEL);
839 if (!iep)
840 return -ENOMEM;
841
842 iep->dev = dev;
843 iep->base = devm_platform_ioremap_resource(pdev, 0);
844 if (IS_ERR(iep->base))
845 return -ENODEV;
846
847 irq = platform_get_irq_byname_optional(pdev, "iep_cap_cmp");
848 if (irq == -EPROBE_DEFER)
849 return irq;
850
851 if (irq > 0) {
852 ret = devm_request_irq(dev, irq, icss_iep_cap_cmp_irq,
853 IRQF_TRIGGER_HIGH, "iep_cap_cmp", iep);
854 if (ret) {
855 dev_info(iep->dev, "cap_cmp irq request failed: %x\n",
856 ret);
857 } else {
858 iep->cap_cmp_irq = irq;
859 INIT_WORK(&iep->work, icss_iep_cap_cmp_work);
860 }
861 }
862
863 iep_clk = devm_clk_get(dev, NULL);
864 if (IS_ERR(iep_clk))
865 return PTR_ERR(iep_clk);
866
867 iep->refclk_freq = clk_get_rate(iep_clk);
868
869 iep->def_inc = NSEC_PER_SEC / iep->refclk_freq; /* ns per clock tick */
870 if (iep->def_inc > IEP_MAX_DEF_INC) {
871 dev_err(dev, "Failed to set def_inc %d. IEP_clock is too slow to be supported\n",
872 iep->def_inc);
873 return -EINVAL;
874 }
875
876 iep->plat_data = device_get_match_data(dev);
877 if (!iep->plat_data)
878 return -EINVAL;
879
880 iep->map = devm_regmap_init(dev, NULL, iep, iep->plat_data->config);
881 if (IS_ERR(iep->map)) {
882 dev_err(dev, "Failed to create regmap for IEP %ld\n",
883 PTR_ERR(iep->map));
884 return PTR_ERR(iep->map);
885 }
886
887 iep->ptp_info = icss_iep_ptp_info;
888 mutex_init(&iep->ptp_clk_mutex);
889 dev_set_drvdata(dev, iep);
890 icss_iep_disable(iep);
891
892 return 0;
893 }
894
am654_icss_iep_valid_reg(struct device * dev,unsigned int reg)895 static bool am654_icss_iep_valid_reg(struct device *dev, unsigned int reg)
896 {
897 switch (reg) {
898 case ICSS_IEP_GLOBAL_CFG_REG ... ICSS_IEP_SYNC_START_REG:
899 return true;
900 default:
901 return false;
902 }
903
904 return false;
905 }
906
icss_iep_regmap_write(void * context,unsigned int reg,unsigned int val)907 static int icss_iep_regmap_write(void *context, unsigned int reg,
908 unsigned int val)
909 {
910 struct icss_iep *iep = context;
911
912 writel(val, iep->base + iep->plat_data->reg_offs[reg]);
913
914 return 0;
915 }
916
icss_iep_regmap_read(void * context,unsigned int reg,unsigned int * val)917 static int icss_iep_regmap_read(void *context, unsigned int reg,
918 unsigned int *val)
919 {
920 struct icss_iep *iep = context;
921
922 *val = readl(iep->base + iep->plat_data->reg_offs[reg]);
923
924 return 0;
925 }
926
927 static const struct regmap_config am654_icss_iep_regmap_config = {
928 .name = "icss iep",
929 .reg_stride = 1,
930 .reg_write = icss_iep_regmap_write,
931 .reg_read = icss_iep_regmap_read,
932 .writeable_reg = am654_icss_iep_valid_reg,
933 .readable_reg = am654_icss_iep_valid_reg,
934 .fast_io = 1,
935 };
936
937 static const struct icss_iep_plat_data am654_icss_iep_plat_data = {
938 .flags = ICSS_IEP_64BIT_COUNTER_SUPPORT |
939 ICSS_IEP_SLOW_COMPEN_REG_SUPPORT |
940 ICSS_IEP_SHADOW_MODE_SUPPORT,
941 .reg_offs = {
942 [ICSS_IEP_GLOBAL_CFG_REG] = 0x00,
943 [ICSS_IEP_COMPEN_REG] = 0x08,
944 [ICSS_IEP_SLOW_COMPEN_REG] = 0x0C,
945 [ICSS_IEP_COUNT_REG0] = 0x10,
946 [ICSS_IEP_COUNT_REG1] = 0x14,
947 [ICSS_IEP_CAPTURE_CFG_REG] = 0x18,
948 [ICSS_IEP_CAPTURE_STAT_REG] = 0x1c,
949
950 [ICSS_IEP_CAP6_RISE_REG0] = 0x50,
951 [ICSS_IEP_CAP6_RISE_REG1] = 0x54,
952
953 [ICSS_IEP_CAP7_RISE_REG0] = 0x60,
954 [ICSS_IEP_CAP7_RISE_REG1] = 0x64,
955
956 [ICSS_IEP_CMP_CFG_REG] = 0x70,
957 [ICSS_IEP_CMP_STAT_REG] = 0x74,
958 [ICSS_IEP_CMP0_REG0] = 0x78,
959 [ICSS_IEP_CMP0_REG1] = 0x7c,
960 [ICSS_IEP_CMP1_REG0] = 0x80,
961 [ICSS_IEP_CMP1_REG1] = 0x84,
962
963 [ICSS_IEP_CMP8_REG0] = 0xc0,
964 [ICSS_IEP_CMP8_REG1] = 0xc4,
965 [ICSS_IEP_SYNC_CTRL_REG] = 0x180,
966 [ICSS_IEP_SYNC0_STAT_REG] = 0x188,
967 [ICSS_IEP_SYNC1_STAT_REG] = 0x18c,
968 [ICSS_IEP_SYNC_PWIDTH_REG] = 0x190,
969 [ICSS_IEP_SYNC0_PERIOD_REG] = 0x194,
970 [ICSS_IEP_SYNC1_DELAY_REG] = 0x198,
971 [ICSS_IEP_SYNC_START_REG] = 0x19c,
972 },
973 .config = &am654_icss_iep_regmap_config,
974 };
975
976 static const struct of_device_id icss_iep_of_match[] = {
977 {
978 .compatible = "ti,am654-icss-iep",
979 .data = &am654_icss_iep_plat_data,
980 },
981 {},
982 };
983 MODULE_DEVICE_TABLE(of, icss_iep_of_match);
984
985 static struct platform_driver icss_iep_driver = {
986 .driver = {
987 .name = "icss-iep",
988 .of_match_table = icss_iep_of_match,
989 },
990 .probe = icss_iep_probe,
991 };
992 module_platform_driver(icss_iep_driver);
993
994 MODULE_LICENSE("GPL");
995 MODULE_DESCRIPTION("TI ICSS IEP driver");
996 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
997 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
998