1 // SPDX-License-Identifier: GPL-2.0
2 /* TI K3 AM65x Common Platform Time Sync
3 *
4 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
5 *
6 */
7
8 #include <linux/clk.h>
9 #include <linux/clk-provider.h>
10 #include <linux/err.h>
11 #include <linux/if_vlan.h>
12 #include <linux/interrupt.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/net_tstamp.h>
16 #include <linux/of.h>
17 #include <linux/of_irq.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/ptp_classify.h>
21 #include <linux/ptp_clock_kernel.h>
22
23 #include "am65-cpts.h"
24
25 struct am65_genf_regs {
26 u32 comp_lo; /* Comparison Low Value 0:31 */
27 u32 comp_hi; /* Comparison High Value 32:63 */
28 u32 control; /* control */
29 u32 length; /* Length */
30 u32 ppm_low; /* PPM Load Low Value 0:31 */
31 u32 ppm_hi; /* PPM Load High Value 32:63 */
32 u32 ts_nudge; /* Nudge value */
33 } __aligned(32) __packed;
34
35 #define AM65_CPTS_GENF_MAX_NUM 9
36 #define AM65_CPTS_ESTF_MAX_NUM 8
37
38 struct am65_cpts_regs {
39 u32 idver; /* Identification and version */
40 u32 control; /* Time sync control */
41 u32 rftclk_sel; /* Reference Clock Select Register */
42 u32 ts_push; /* Time stamp event push */
43 u32 ts_load_val_lo; /* Time Stamp Load Low Value 0:31 */
44 u32 ts_load_en; /* Time stamp load enable */
45 u32 ts_comp_lo; /* Time Stamp Comparison Low Value 0:31 */
46 u32 ts_comp_length; /* Time Stamp Comparison Length */
47 u32 intstat_raw; /* Time sync interrupt status raw */
48 u32 intstat_masked; /* Time sync interrupt status masked */
49 u32 int_enable; /* Time sync interrupt enable */
50 u32 ts_comp_nudge; /* Time Stamp Comparison Nudge Value */
51 u32 event_pop; /* Event interrupt pop */
52 u32 event_0; /* Event Time Stamp lo 0:31 */
53 u32 event_1; /* Event Type Fields */
54 u32 event_2; /* Event Type Fields domain */
55 u32 event_3; /* Event Time Stamp hi 32:63 */
56 u32 ts_load_val_hi; /* Time Stamp Load High Value 32:63 */
57 u32 ts_comp_hi; /* Time Stamp Comparison High Value 32:63 */
58 u32 ts_add_val; /* Time Stamp Add value */
59 u32 ts_ppm_low; /* Time Stamp PPM Load Low Value 0:31 */
60 u32 ts_ppm_hi; /* Time Stamp PPM Load High Value 32:63 */
61 u32 ts_nudge; /* Time Stamp Nudge value */
62 u32 reserv[33];
63 struct am65_genf_regs genf[AM65_CPTS_GENF_MAX_NUM];
64 struct am65_genf_regs estf[AM65_CPTS_ESTF_MAX_NUM];
65 };
66
67 /* CONTROL_REG */
68 #define AM65_CPTS_CONTROL_EN BIT(0)
69 #define AM65_CPTS_CONTROL_INT_TEST BIT(1)
70 #define AM65_CPTS_CONTROL_TS_COMP_POLARITY BIT(2)
71 #define AM65_CPTS_CONTROL_TSTAMP_EN BIT(3)
72 #define AM65_CPTS_CONTROL_SEQUENCE_EN BIT(4)
73 #define AM65_CPTS_CONTROL_64MODE BIT(5)
74 #define AM65_CPTS_CONTROL_TS_COMP_TOG BIT(6)
75 #define AM65_CPTS_CONTROL_TS_PPM_DIR BIT(7)
76 #define AM65_CPTS_CONTROL_HW1_TS_PUSH_EN BIT(8)
77 #define AM65_CPTS_CONTROL_HW2_TS_PUSH_EN BIT(9)
78 #define AM65_CPTS_CONTROL_HW3_TS_PUSH_EN BIT(10)
79 #define AM65_CPTS_CONTROL_HW4_TS_PUSH_EN BIT(11)
80 #define AM65_CPTS_CONTROL_HW5_TS_PUSH_EN BIT(12)
81 #define AM65_CPTS_CONTROL_HW6_TS_PUSH_EN BIT(13)
82 #define AM65_CPTS_CONTROL_HW7_TS_PUSH_EN BIT(14)
83 #define AM65_CPTS_CONTROL_HW8_TS_PUSH_EN BIT(15)
84 #define AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET (8)
85
86 #define AM65_CPTS_CONTROL_TX_GENF_CLR_EN BIT(17)
87
88 #define AM65_CPTS_CONTROL_TS_SYNC_SEL_MASK (0xF)
89 #define AM65_CPTS_CONTROL_TS_SYNC_SEL_SHIFT (28)
90
91 /* RFTCLK_SEL_REG */
92 #define AM65_CPTS_RFTCLK_SEL_MASK (0x1F)
93
94 /* TS_PUSH_REG */
95 #define AM65_CPTS_TS_PUSH BIT(0)
96
97 /* TS_LOAD_EN_REG */
98 #define AM65_CPTS_TS_LOAD_EN BIT(0)
99
100 /* INTSTAT_RAW_REG */
101 #define AM65_CPTS_INTSTAT_RAW_TS_PEND BIT(0)
102
103 /* INTSTAT_MASKED_REG */
104 #define AM65_CPTS_INTSTAT_MASKED_TS_PEND BIT(0)
105
106 /* INT_ENABLE_REG */
107 #define AM65_CPTS_INT_ENABLE_TS_PEND_EN BIT(0)
108
109 /* TS_COMP_NUDGE_REG */
110 #define AM65_CPTS_TS_COMP_NUDGE_MASK (0xFF)
111
112 /* EVENT_POP_REG */
113 #define AM65_CPTS_EVENT_POP BIT(0)
114
115 /* EVENT_1_REG */
116 #define AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK GENMASK(15, 0)
117
118 #define AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK GENMASK(19, 16)
119 #define AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT (16)
120
121 #define AM65_CPTS_EVENT_1_EVENT_TYPE_MASK GENMASK(23, 20)
122 #define AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT (20)
123
124 #define AM65_CPTS_EVENT_1_PORT_NUMBER_MASK GENMASK(28, 24)
125 #define AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT (24)
126
127 /* EVENT_2_REG */
128 #define AM65_CPTS_EVENT_2_REG_DOMAIN_MASK (0xFF)
129 #define AM65_CPTS_EVENT_2_REG_DOMAIN_SHIFT (0)
130
131 enum {
132 AM65_CPTS_EV_PUSH, /* Time Stamp Push Event */
133 AM65_CPTS_EV_ROLL, /* Time Stamp Rollover Event */
134 AM65_CPTS_EV_HALF, /* Time Stamp Half Rollover Event */
135 AM65_CPTS_EV_HW, /* Hardware Time Stamp Push Event */
136 AM65_CPTS_EV_RX, /* Ethernet Receive Event */
137 AM65_CPTS_EV_TX, /* Ethernet Transmit Event */
138 AM65_CPTS_EV_TS_COMP, /* Time Stamp Compare Event */
139 AM65_CPTS_EV_HOST, /* Host Transmit Event */
140 };
141
142 struct am65_cpts_event {
143 struct list_head list;
144 unsigned long tmo;
145 u32 event1;
146 u32 event2;
147 u64 timestamp;
148 };
149
150 #define AM65_CPTS_FIFO_DEPTH (16)
151 #define AM65_CPTS_MAX_EVENTS (32)
152 #define AM65_CPTS_EVENT_RX_TX_TIMEOUT (20) /* ms */
153 #define AM65_CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
154 #define AM65_CPTS_MIN_PPM 0x400
155
156 struct am65_cpts {
157 struct device *dev;
158 struct am65_cpts_regs __iomem *reg;
159 struct ptp_clock_info ptp_info;
160 struct ptp_clock *ptp_clock;
161 int phc_index;
162 struct clk_hw *clk_mux_hw;
163 struct device_node *clk_mux_np;
164 struct clk *refclk;
165 u32 refclk_freq;
166 /* separate lists to handle TX and RX timestamp independently */
167 struct list_head events_tx;
168 struct list_head events_rx;
169 struct list_head pool;
170 struct am65_cpts_event pool_data[AM65_CPTS_MAX_EVENTS];
171 spinlock_t lock; /* protects events lists*/
172 u32 ext_ts_inputs;
173 u32 genf_num;
174 u32 ts_add_val;
175 int irq;
176 struct mutex ptp_clk_lock; /* PHC access sync */
177 u64 timestamp;
178 u32 genf_enable;
179 u32 hw_ts_enable;
180 u32 estf_enable;
181 struct sk_buff_head txq;
182 bool pps_enabled;
183 bool pps_present;
184 u32 pps_hw_ts_idx;
185 u32 pps_genf_idx;
186 /* context save/restore */
187 u64 sr_cpts_ns;
188 u64 sr_ktime_ns;
189 u32 sr_control;
190 u32 sr_int_enable;
191 u32 sr_rftclk_sel;
192 u32 sr_ts_ppm_hi;
193 u32 sr_ts_ppm_low;
194 struct am65_genf_regs sr_genf[AM65_CPTS_GENF_MAX_NUM];
195 struct am65_genf_regs sr_estf[AM65_CPTS_ESTF_MAX_NUM];
196 };
197
198 struct am65_cpts_skb_cb_data {
199 unsigned long tmo;
200 u32 skb_mtype_seqid;
201 };
202
203 #define am65_cpts_write32(c, v, r) writel(v, &(c)->reg->r)
204 #define am65_cpts_read32(c, r) readl(&(c)->reg->r)
205
am65_cpts_settime(struct am65_cpts * cpts,u64 start_tstamp)206 static void am65_cpts_settime(struct am65_cpts *cpts, u64 start_tstamp)
207 {
208 u32 val;
209
210 val = upper_32_bits(start_tstamp);
211 am65_cpts_write32(cpts, val, ts_load_val_hi);
212 val = lower_32_bits(start_tstamp);
213 am65_cpts_write32(cpts, val, ts_load_val_lo);
214
215 am65_cpts_write32(cpts, AM65_CPTS_TS_LOAD_EN, ts_load_en);
216 }
217
am65_cpts_set_add_val(struct am65_cpts * cpts)218 static void am65_cpts_set_add_val(struct am65_cpts *cpts)
219 {
220 /* select coefficient according to the rate */
221 cpts->ts_add_val = (NSEC_PER_SEC / cpts->refclk_freq - 1) & 0x7;
222
223 am65_cpts_write32(cpts, cpts->ts_add_val, ts_add_val);
224 }
225
am65_cpts_disable(struct am65_cpts * cpts)226 static void am65_cpts_disable(struct am65_cpts *cpts)
227 {
228 am65_cpts_write32(cpts, 0, control);
229 am65_cpts_write32(cpts, 0, int_enable);
230 }
231
am65_cpts_purge_event_list(struct am65_cpts * cpts,struct list_head * events)232 static int am65_cpts_purge_event_list(struct am65_cpts *cpts,
233 struct list_head *events)
234 {
235 struct list_head *this, *next;
236 struct am65_cpts_event *event;
237 int removed = 0;
238
239 list_for_each_safe(this, next, events) {
240 event = list_entry(this, struct am65_cpts_event, list);
241 if (time_after(jiffies, event->tmo)) {
242 list_del_init(&event->list);
243 list_add(&event->list, &cpts->pool);
244 ++removed;
245 }
246 }
247 return removed;
248 }
249
am65_cpts_event_get_port(struct am65_cpts_event * event)250 static int am65_cpts_event_get_port(struct am65_cpts_event *event)
251 {
252 return (event->event1 & AM65_CPTS_EVENT_1_PORT_NUMBER_MASK) >>
253 AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT;
254 }
255
am65_cpts_event_get_type(struct am65_cpts_event * event)256 static int am65_cpts_event_get_type(struct am65_cpts_event *event)
257 {
258 return (event->event1 & AM65_CPTS_EVENT_1_EVENT_TYPE_MASK) >>
259 AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT;
260 }
261
am65_cpts_purge_events(struct am65_cpts * cpts)262 static int am65_cpts_purge_events(struct am65_cpts *cpts)
263 {
264 int removed = 0;
265
266 removed += am65_cpts_purge_event_list(cpts, &cpts->events_tx);
267 removed += am65_cpts_purge_event_list(cpts, &cpts->events_rx);
268
269 if (removed)
270 dev_dbg(cpts->dev, "event pool cleaned up %d\n", removed);
271 return removed ? 0 : -1;
272 }
273
am65_cpts_fifo_pop_event(struct am65_cpts * cpts,struct am65_cpts_event * event)274 static bool am65_cpts_fifo_pop_event(struct am65_cpts *cpts,
275 struct am65_cpts_event *event)
276 {
277 u32 r = am65_cpts_read32(cpts, intstat_raw);
278
279 if (r & AM65_CPTS_INTSTAT_RAW_TS_PEND) {
280 event->timestamp = am65_cpts_read32(cpts, event_0);
281 event->event1 = am65_cpts_read32(cpts, event_1);
282 event->event2 = am65_cpts_read32(cpts, event_2);
283 event->timestamp |= (u64)am65_cpts_read32(cpts, event_3) << 32;
284 am65_cpts_write32(cpts, AM65_CPTS_EVENT_POP, event_pop);
285 return false;
286 }
287 return true;
288 }
289
__am65_cpts_fifo_read(struct am65_cpts * cpts)290 static int __am65_cpts_fifo_read(struct am65_cpts *cpts)
291 {
292 struct ptp_clock_event pevent;
293 struct am65_cpts_event *event;
294 bool schedule = false;
295 int i, type, ret = 0;
296
297 for (i = 0; i < AM65_CPTS_FIFO_DEPTH; i++) {
298 event = list_first_entry_or_null(&cpts->pool,
299 struct am65_cpts_event, list);
300
301 if (!event) {
302 if (am65_cpts_purge_events(cpts)) {
303 dev_err(cpts->dev, "cpts: event pool empty\n");
304 ret = -1;
305 goto out;
306 }
307 continue;
308 }
309
310 if (am65_cpts_fifo_pop_event(cpts, event))
311 break;
312
313 type = am65_cpts_event_get_type(event);
314 switch (type) {
315 case AM65_CPTS_EV_PUSH:
316 cpts->timestamp = event->timestamp;
317 dev_dbg(cpts->dev, "AM65_CPTS_EV_PUSH t:%llu\n",
318 cpts->timestamp);
319 break;
320 case AM65_CPTS_EV_RX:
321 event->tmo = jiffies +
322 msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
323
324 list_move_tail(&event->list, &cpts->events_rx);
325
326 dev_dbg(cpts->dev,
327 "AM65_CPTS_EV_RX e1:%08x e2:%08x t:%lld\n",
328 event->event1, event->event2,
329 event->timestamp);
330 break;
331 case AM65_CPTS_EV_TX:
332 event->tmo = jiffies +
333 msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
334
335 list_move_tail(&event->list, &cpts->events_tx);
336
337 dev_dbg(cpts->dev,
338 "AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n",
339 event->event1, event->event2,
340 event->timestamp);
341 schedule = true;
342 break;
343 case AM65_CPTS_EV_HW:
344 pevent.index = am65_cpts_event_get_port(event) - 1;
345 pevent.timestamp = event->timestamp;
346 if (cpts->pps_enabled && pevent.index == cpts->pps_hw_ts_idx) {
347 pevent.type = PTP_CLOCK_PPSUSR;
348 pevent.pps_times.ts_real = ns_to_timespec64(pevent.timestamp);
349 } else {
350 pevent.type = PTP_CLOCK_EXTTS;
351 }
352 dev_dbg(cpts->dev, "AM65_CPTS_EV_HW:%s p:%d t:%llu\n",
353 pevent.type == PTP_CLOCK_EXTTS ?
354 "extts" : "pps",
355 pevent.index, event->timestamp);
356
357 ptp_clock_event(cpts->ptp_clock, &pevent);
358 break;
359 case AM65_CPTS_EV_HOST:
360 break;
361 case AM65_CPTS_EV_ROLL:
362 case AM65_CPTS_EV_HALF:
363 case AM65_CPTS_EV_TS_COMP:
364 dev_dbg(cpts->dev,
365 "AM65_CPTS_EVT: %d e1:%08x e2:%08x t:%lld\n",
366 type,
367 event->event1, event->event2,
368 event->timestamp);
369 break;
370 default:
371 dev_err(cpts->dev, "cpts: unknown event type\n");
372 ret = -1;
373 goto out;
374 }
375 }
376
377 out:
378 if (schedule)
379 ptp_schedule_worker(cpts->ptp_clock, 0);
380
381 return ret;
382 }
383
am65_cpts_fifo_read(struct am65_cpts * cpts)384 static int am65_cpts_fifo_read(struct am65_cpts *cpts)
385 {
386 unsigned long flags;
387 int ret = 0;
388
389 spin_lock_irqsave(&cpts->lock, flags);
390 ret = __am65_cpts_fifo_read(cpts);
391 spin_unlock_irqrestore(&cpts->lock, flags);
392
393 return ret;
394 }
395
am65_cpts_gettime(struct am65_cpts * cpts,struct ptp_system_timestamp * sts)396 static u64 am65_cpts_gettime(struct am65_cpts *cpts,
397 struct ptp_system_timestamp *sts)
398 {
399 unsigned long flags;
400 u64 val = 0;
401
402 /* temporarily disable cpts interrupt to avoid intentional
403 * doubled read. Interrupt can be in-flight - it's Ok.
404 */
405 am65_cpts_write32(cpts, 0, int_enable);
406
407 /* use spin_lock_irqsave() here as it has to run very fast */
408 spin_lock_irqsave(&cpts->lock, flags);
409 ptp_read_system_prets(sts);
410 am65_cpts_write32(cpts, AM65_CPTS_TS_PUSH, ts_push);
411 am65_cpts_read32(cpts, ts_push);
412 ptp_read_system_postts(sts);
413 spin_unlock_irqrestore(&cpts->lock, flags);
414
415 am65_cpts_fifo_read(cpts);
416
417 am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
418
419 val = cpts->timestamp;
420
421 return val;
422 }
423
am65_cpts_interrupt(int irq,void * dev_id)424 static irqreturn_t am65_cpts_interrupt(int irq, void *dev_id)
425 {
426 struct am65_cpts *cpts = dev_id;
427
428 if (am65_cpts_fifo_read(cpts))
429 dev_dbg(cpts->dev, "cpts: unable to obtain a time stamp\n");
430
431 return IRQ_HANDLED;
432 }
433
434 /* PTP clock operations */
am65_cpts_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)435 static int am65_cpts_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
436 {
437 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
438 u32 estf_ctrl_val = 0, estf_ppm_hi = 0, estf_ppm_low = 0;
439 s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
440 int pps_index = cpts->pps_genf_idx;
441 u64 adj_period, pps_adj_period;
442 u32 ctrl_val, ppm_hi, ppm_low;
443 unsigned long flags;
444 int neg_adj = 0, i;
445
446 if (ppb < 0) {
447 neg_adj = 1;
448 ppb = -ppb;
449 }
450
451 /* base freq = 1GHz = 1 000 000 000
452 * ppb_norm = ppb * base_freq / clock_freq;
453 * ppm_norm = ppb_norm / 1000
454 * adj_period = 1 000 000 / ppm_norm
455 * adj_period = 1 000 000 000 / ppb_norm
456 * adj_period = 1 000 000 000 / (ppb * base_freq / clock_freq)
457 * adj_period = (1 000 000 000 * clock_freq) / (ppb * base_freq)
458 * adj_period = clock_freq / ppb
459 */
460 adj_period = div_u64(cpts->refclk_freq, ppb);
461
462 mutex_lock(&cpts->ptp_clk_lock);
463
464 ctrl_val = am65_cpts_read32(cpts, control);
465 if (neg_adj)
466 ctrl_val |= AM65_CPTS_CONTROL_TS_PPM_DIR;
467 else
468 ctrl_val &= ~AM65_CPTS_CONTROL_TS_PPM_DIR;
469
470 ppm_hi = upper_32_bits(adj_period) & 0x3FF;
471 ppm_low = lower_32_bits(adj_period);
472
473 if (cpts->pps_enabled) {
474 estf_ctrl_val = am65_cpts_read32(cpts, genf[pps_index].control);
475 if (neg_adj)
476 estf_ctrl_val &= ~BIT(1);
477 else
478 estf_ctrl_val |= BIT(1);
479
480 /* GenF PPM will do correction using cpts refclk tick which is
481 * (cpts->ts_add_val + 1) ns, so GenF length PPM adj period
482 * need to be corrected.
483 */
484 pps_adj_period = adj_period * (cpts->ts_add_val + 1);
485 estf_ppm_hi = upper_32_bits(pps_adj_period) & 0x3FF;
486 estf_ppm_low = lower_32_bits(pps_adj_period);
487 }
488
489 spin_lock_irqsave(&cpts->lock, flags);
490
491 /* All below writes must be done extremely fast:
492 * - delay between PPM dir and PPM value changes can cause err due old
493 * PPM correction applied in wrong direction
494 * - delay between CPTS-clock PPM cfg and GenF PPM cfg can cause err
495 * due CPTS-clock PPM working with new cfg while GenF PPM cfg still
496 * with old for short period of time
497 */
498
499 am65_cpts_write32(cpts, ctrl_val, control);
500 am65_cpts_write32(cpts, ppm_hi, ts_ppm_hi);
501 am65_cpts_write32(cpts, ppm_low, ts_ppm_low);
502
503 if (cpts->pps_enabled) {
504 am65_cpts_write32(cpts, estf_ctrl_val, genf[pps_index].control);
505 am65_cpts_write32(cpts, estf_ppm_hi, genf[pps_index].ppm_hi);
506 am65_cpts_write32(cpts, estf_ppm_low, genf[pps_index].ppm_low);
507 }
508
509 for (i = 0; i < AM65_CPTS_ESTF_MAX_NUM; i++) {
510 if (cpts->estf_enable & BIT(i)) {
511 am65_cpts_write32(cpts, estf_ctrl_val, estf[i].control);
512 am65_cpts_write32(cpts, estf_ppm_hi, estf[i].ppm_hi);
513 am65_cpts_write32(cpts, estf_ppm_low, estf[i].ppm_low);
514 }
515 }
516 /* All GenF/EstF can be updated here the same way */
517 spin_unlock_irqrestore(&cpts->lock, flags);
518
519 mutex_unlock(&cpts->ptp_clk_lock);
520
521 return 0;
522 }
523
am65_cpts_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)524 static int am65_cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
525 {
526 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
527 s64 ns;
528
529 mutex_lock(&cpts->ptp_clk_lock);
530 ns = am65_cpts_gettime(cpts, NULL);
531 ns += delta;
532 am65_cpts_settime(cpts, ns);
533 mutex_unlock(&cpts->ptp_clk_lock);
534
535 return 0;
536 }
537
am65_cpts_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)538 static int am65_cpts_ptp_gettimex(struct ptp_clock_info *ptp,
539 struct timespec64 *ts,
540 struct ptp_system_timestamp *sts)
541 {
542 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
543 u64 ns;
544
545 mutex_lock(&cpts->ptp_clk_lock);
546 ns = am65_cpts_gettime(cpts, sts);
547 mutex_unlock(&cpts->ptp_clk_lock);
548 *ts = ns_to_timespec64(ns);
549
550 return 0;
551 }
552
am65_cpts_ns_gettime(struct am65_cpts * cpts)553 u64 am65_cpts_ns_gettime(struct am65_cpts *cpts)
554 {
555 u64 ns;
556
557 /* reuse ptp_clk_lock as it serialize ts push */
558 mutex_lock(&cpts->ptp_clk_lock);
559 ns = am65_cpts_gettime(cpts, NULL);
560 mutex_unlock(&cpts->ptp_clk_lock);
561
562 return ns;
563 }
564 EXPORT_SYMBOL_GPL(am65_cpts_ns_gettime);
565
am65_cpts_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)566 static int am65_cpts_ptp_settime(struct ptp_clock_info *ptp,
567 const struct timespec64 *ts)
568 {
569 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
570 u64 ns;
571
572 ns = timespec64_to_ns(ts);
573 mutex_lock(&cpts->ptp_clk_lock);
574 am65_cpts_settime(cpts, ns);
575 mutex_unlock(&cpts->ptp_clk_lock);
576
577 return 0;
578 }
579
am65_cpts_extts_enable_hw(struct am65_cpts * cpts,u32 index,int on)580 static void am65_cpts_extts_enable_hw(struct am65_cpts *cpts, u32 index, int on)
581 {
582 u32 v;
583
584 v = am65_cpts_read32(cpts, control);
585 if (on) {
586 v |= BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET + index);
587 cpts->hw_ts_enable |= BIT(index);
588 } else {
589 v &= ~BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET + index);
590 cpts->hw_ts_enable &= ~BIT(index);
591 }
592 am65_cpts_write32(cpts, v, control);
593 }
594
am65_cpts_extts_enable(struct am65_cpts * cpts,u32 index,int on)595 static int am65_cpts_extts_enable(struct am65_cpts *cpts, u32 index, int on)
596 {
597 if (index >= cpts->ptp_info.n_ext_ts)
598 return -ENXIO;
599
600 if (cpts->pps_present && index == cpts->pps_hw_ts_idx)
601 return -EINVAL;
602
603 if (((cpts->hw_ts_enable & BIT(index)) >> index) == on)
604 return 0;
605
606 mutex_lock(&cpts->ptp_clk_lock);
607 am65_cpts_extts_enable_hw(cpts, index, on);
608 mutex_unlock(&cpts->ptp_clk_lock);
609
610 dev_dbg(cpts->dev, "%s: ExtTS:%u %s\n",
611 __func__, index, on ? "enabled" : "disabled");
612
613 return 0;
614 }
615
am65_cpts_estf_enable(struct am65_cpts * cpts,int idx,struct am65_cpts_estf_cfg * cfg)616 int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
617 struct am65_cpts_estf_cfg *cfg)
618 {
619 u64 cycles;
620 u32 val;
621
622 cycles = cfg->ns_period * cpts->refclk_freq;
623 cycles = DIV_ROUND_UP(cycles, NSEC_PER_SEC);
624 if (cycles > U32_MAX)
625 return -EINVAL;
626
627 /* according to TRM should be zeroed */
628 am65_cpts_write32(cpts, 0, estf[idx].length);
629
630 val = upper_32_bits(cfg->ns_start);
631 am65_cpts_write32(cpts, val, estf[idx].comp_hi);
632 val = lower_32_bits(cfg->ns_start);
633 am65_cpts_write32(cpts, val, estf[idx].comp_lo);
634 val = lower_32_bits(cycles);
635 am65_cpts_write32(cpts, val, estf[idx].length);
636 am65_cpts_write32(cpts, 0, estf[idx].control);
637 am65_cpts_write32(cpts, 0, estf[idx].ppm_hi);
638 am65_cpts_write32(cpts, 0, estf[idx].ppm_low);
639
640 cpts->estf_enable |= BIT(idx);
641
642 dev_dbg(cpts->dev, "%s: ESTF:%u enabled\n", __func__, idx);
643
644 return 0;
645 }
646 EXPORT_SYMBOL_GPL(am65_cpts_estf_enable);
647
am65_cpts_estf_disable(struct am65_cpts * cpts,int idx)648 void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx)
649 {
650 am65_cpts_write32(cpts, 0, estf[idx].length);
651 cpts->estf_enable &= ~BIT(idx);
652
653 dev_dbg(cpts->dev, "%s: ESTF:%u disabled\n", __func__, idx);
654 }
655 EXPORT_SYMBOL_GPL(am65_cpts_estf_disable);
656
am65_cpts_perout_enable_hw(struct am65_cpts * cpts,struct ptp_perout_request * req,int on)657 static void am65_cpts_perout_enable_hw(struct am65_cpts *cpts,
658 struct ptp_perout_request *req, int on)
659 {
660 u64 ns_period, ns_start, cycles;
661 struct timespec64 ts;
662 u32 val;
663
664 if (on) {
665 ts.tv_sec = req->period.sec;
666 ts.tv_nsec = req->period.nsec;
667 ns_period = timespec64_to_ns(&ts);
668
669 cycles = (ns_period * cpts->refclk_freq) / NSEC_PER_SEC;
670
671 ts.tv_sec = req->start.sec;
672 ts.tv_nsec = req->start.nsec;
673 ns_start = timespec64_to_ns(&ts);
674
675 val = upper_32_bits(ns_start);
676 am65_cpts_write32(cpts, val, genf[req->index].comp_hi);
677 val = lower_32_bits(ns_start);
678 am65_cpts_write32(cpts, val, genf[req->index].comp_lo);
679 val = lower_32_bits(cycles);
680 am65_cpts_write32(cpts, val, genf[req->index].length);
681
682 am65_cpts_write32(cpts, 0, genf[req->index].control);
683 am65_cpts_write32(cpts, 0, genf[req->index].ppm_hi);
684 am65_cpts_write32(cpts, 0, genf[req->index].ppm_low);
685
686 cpts->genf_enable |= BIT(req->index);
687 } else {
688 am65_cpts_write32(cpts, 0, genf[req->index].length);
689
690 cpts->genf_enable &= ~BIT(req->index);
691 }
692 }
693
am65_cpts_perout_enable(struct am65_cpts * cpts,struct ptp_perout_request * req,int on)694 static int am65_cpts_perout_enable(struct am65_cpts *cpts,
695 struct ptp_perout_request *req, int on)
696 {
697 if (req->index >= cpts->ptp_info.n_per_out)
698 return -ENXIO;
699
700 if (cpts->pps_present && req->index == cpts->pps_genf_idx)
701 return -EINVAL;
702
703 if (!!(cpts->genf_enable & BIT(req->index)) == !!on)
704 return 0;
705
706 mutex_lock(&cpts->ptp_clk_lock);
707 am65_cpts_perout_enable_hw(cpts, req, on);
708 mutex_unlock(&cpts->ptp_clk_lock);
709
710 dev_dbg(cpts->dev, "%s: GenF:%u %s\n",
711 __func__, req->index, on ? "enabled" : "disabled");
712
713 return 0;
714 }
715
am65_cpts_pps_enable(struct am65_cpts * cpts,int on)716 static int am65_cpts_pps_enable(struct am65_cpts *cpts, int on)
717 {
718 int ret = 0;
719 struct timespec64 ts;
720 struct ptp_clock_request rq;
721 u64 ns;
722
723 if (!cpts->pps_present)
724 return -EINVAL;
725
726 if (cpts->pps_enabled == !!on)
727 return 0;
728
729 mutex_lock(&cpts->ptp_clk_lock);
730
731 if (on) {
732 am65_cpts_extts_enable_hw(cpts, cpts->pps_hw_ts_idx, on);
733
734 ns = am65_cpts_gettime(cpts, NULL);
735 ts = ns_to_timespec64(ns);
736 rq.perout.period.sec = 1;
737 rq.perout.period.nsec = 0;
738 rq.perout.start.sec = ts.tv_sec + 2;
739 rq.perout.start.nsec = 0;
740 rq.perout.index = cpts->pps_genf_idx;
741
742 am65_cpts_perout_enable_hw(cpts, &rq.perout, on);
743 cpts->pps_enabled = true;
744 } else {
745 rq.perout.index = cpts->pps_genf_idx;
746 am65_cpts_perout_enable_hw(cpts, &rq.perout, on);
747 am65_cpts_extts_enable_hw(cpts, cpts->pps_hw_ts_idx, on);
748 cpts->pps_enabled = false;
749 }
750
751 mutex_unlock(&cpts->ptp_clk_lock);
752
753 dev_dbg(cpts->dev, "%s: pps: %s\n",
754 __func__, on ? "enabled" : "disabled");
755 return ret;
756 }
757
am65_cpts_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)758 static int am65_cpts_ptp_enable(struct ptp_clock_info *ptp,
759 struct ptp_clock_request *rq, int on)
760 {
761 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
762
763 switch (rq->type) {
764 case PTP_CLK_REQ_EXTTS:
765 return am65_cpts_extts_enable(cpts, rq->extts.index, on);
766 case PTP_CLK_REQ_PEROUT:
767 return am65_cpts_perout_enable(cpts, &rq->perout, on);
768 case PTP_CLK_REQ_PPS:
769 return am65_cpts_pps_enable(cpts, on);
770 default:
771 break;
772 }
773
774 return -EOPNOTSUPP;
775 }
776
777 static long am65_cpts_ts_work(struct ptp_clock_info *ptp);
778
779 static struct ptp_clock_info am65_ptp_info = {
780 .owner = THIS_MODULE,
781 .name = "CTPS timer",
782 .adjfine = am65_cpts_ptp_adjfine,
783 .adjtime = am65_cpts_ptp_adjtime,
784 .gettimex64 = am65_cpts_ptp_gettimex,
785 .settime64 = am65_cpts_ptp_settime,
786 .enable = am65_cpts_ptp_enable,
787 .do_aux_work = am65_cpts_ts_work,
788 };
789
am65_cpts_match_tx_ts(struct am65_cpts * cpts,struct am65_cpts_event * event)790 static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
791 struct am65_cpts_event *event)
792 {
793 struct sk_buff_head txq_list;
794 struct sk_buff *skb, *tmp;
795 unsigned long flags;
796 bool found = false;
797 u32 mtype_seqid;
798
799 mtype_seqid = event->event1 &
800 (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK |
801 AM65_CPTS_EVENT_1_EVENT_TYPE_MASK |
802 AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
803
804 __skb_queue_head_init(&txq_list);
805
806 spin_lock_irqsave(&cpts->txq.lock, flags);
807 skb_queue_splice_init(&cpts->txq, &txq_list);
808 spin_unlock_irqrestore(&cpts->txq.lock, flags);
809
810 /* no need to grab txq.lock as access is always done under cpts->lock */
811 skb_queue_walk_safe(&txq_list, skb, tmp) {
812 struct skb_shared_hwtstamps ssh;
813 struct am65_cpts_skb_cb_data *skb_cb =
814 (struct am65_cpts_skb_cb_data *)skb->cb;
815
816 if ((ptp_classify_raw(skb) & PTP_CLASS_V1) &&
817 ((mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK) ==
818 (skb_cb->skb_mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK)))
819 mtype_seqid = skb_cb->skb_mtype_seqid;
820
821 if (mtype_seqid == skb_cb->skb_mtype_seqid) {
822 u64 ns = event->timestamp;
823
824 memset(&ssh, 0, sizeof(ssh));
825 ssh.hwtstamp = ns_to_ktime(ns);
826 skb_tstamp_tx(skb, &ssh);
827 found = true;
828 __skb_unlink(skb, &txq_list);
829 dev_consume_skb_any(skb);
830 dev_dbg(cpts->dev,
831 "match tx timestamp mtype_seqid %08x\n",
832 mtype_seqid);
833 break;
834 }
835
836 if (time_after(jiffies, skb_cb->tmo)) {
837 /* timeout any expired skbs over 100 ms */
838 dev_dbg(cpts->dev,
839 "expiring tx timestamp mtype_seqid %08x\n",
840 mtype_seqid);
841 __skb_unlink(skb, &txq_list);
842 dev_consume_skb_any(skb);
843 }
844 }
845
846 spin_lock_irqsave(&cpts->txq.lock, flags);
847 skb_queue_splice(&txq_list, &cpts->txq);
848 spin_unlock_irqrestore(&cpts->txq.lock, flags);
849
850 return found;
851 }
852
am65_cpts_find_tx_ts(struct am65_cpts * cpts)853 static void am65_cpts_find_tx_ts(struct am65_cpts *cpts)
854 {
855 struct am65_cpts_event *event;
856 struct list_head *this, *next;
857 LIST_HEAD(events_free);
858 unsigned long flags;
859 LIST_HEAD(events);
860
861 spin_lock_irqsave(&cpts->lock, flags);
862 list_splice_init(&cpts->events_tx, &events);
863 spin_unlock_irqrestore(&cpts->lock, flags);
864
865 list_for_each_safe(this, next, &events) {
866 event = list_entry(this, struct am65_cpts_event, list);
867 if (am65_cpts_match_tx_ts(cpts, event) ||
868 time_after(jiffies, event->tmo)) {
869 list_del_init(&event->list);
870 list_add(&event->list, &events_free);
871 }
872 }
873
874 spin_lock_irqsave(&cpts->lock, flags);
875 list_splice_tail(&events, &cpts->events_tx);
876 list_splice_tail(&events_free, &cpts->pool);
877 spin_unlock_irqrestore(&cpts->lock, flags);
878 }
879
am65_cpts_ts_work(struct ptp_clock_info * ptp)880 static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
881 {
882 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
883 unsigned long flags;
884 long delay = -1;
885
886 am65_cpts_find_tx_ts(cpts);
887
888 spin_lock_irqsave(&cpts->txq.lock, flags);
889 if (!skb_queue_empty(&cpts->txq))
890 delay = AM65_CPTS_SKB_TX_WORK_TIMEOUT;
891 spin_unlock_irqrestore(&cpts->txq.lock, flags);
892
893 return delay;
894 }
895
am65_skb_get_mtype_seqid(struct sk_buff * skb,u32 * mtype_seqid)896 static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
897 {
898 unsigned int ptp_class = ptp_classify_raw(skb);
899 struct ptp_header *hdr;
900 u8 msgtype;
901 u16 seqid;
902
903 if (ptp_class == PTP_CLASS_NONE)
904 return 0;
905
906 hdr = ptp_parse_header(skb, ptp_class);
907 if (!hdr)
908 return 0;
909
910 msgtype = ptp_get_msgtype(hdr, ptp_class);
911 seqid = ntohs(hdr->sequence_id);
912
913 *mtype_seqid = (msgtype << AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT) &
914 AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK;
915 *mtype_seqid |= (seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
916
917 return 1;
918 }
919
am65_cpts_find_rx_ts(struct am65_cpts * cpts,u32 skb_mtype_seqid)920 static u64 am65_cpts_find_rx_ts(struct am65_cpts *cpts, u32 skb_mtype_seqid)
921 {
922 struct list_head *this, *next;
923 struct am65_cpts_event *event;
924 unsigned long flags;
925 u32 mtype_seqid;
926 u64 ns = 0;
927
928 spin_lock_irqsave(&cpts->lock, flags);
929 __am65_cpts_fifo_read(cpts);
930 list_for_each_safe(this, next, &cpts->events_rx) {
931 event = list_entry(this, struct am65_cpts_event, list);
932 if (time_after(jiffies, event->tmo)) {
933 list_move(&event->list, &cpts->pool);
934 continue;
935 }
936
937 mtype_seqid = event->event1 &
938 (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK |
939 AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK |
940 AM65_CPTS_EVENT_1_EVENT_TYPE_MASK);
941
942 if (mtype_seqid == skb_mtype_seqid) {
943 ns = event->timestamp;
944 list_move(&event->list, &cpts->pool);
945 break;
946 }
947 }
948 spin_unlock_irqrestore(&cpts->lock, flags);
949
950 return ns;
951 }
952
am65_cpts_rx_timestamp(struct am65_cpts * cpts,struct sk_buff * skb)953 void am65_cpts_rx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
954 {
955 struct am65_cpts_skb_cb_data *skb_cb = (struct am65_cpts_skb_cb_data *)skb->cb;
956 struct skb_shared_hwtstamps *ssh;
957 int ret;
958 u64 ns;
959
960 /* am65_cpts_rx_timestamp() is called before eth_type_trans(), so
961 * skb MAC Hdr properties are not configured yet. Hence need to
962 * reset skb MAC header here
963 */
964 skb_reset_mac_header(skb);
965 ret = am65_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
966 if (!ret)
967 return; /* if not PTP class packet */
968
969 skb_cb->skb_mtype_seqid |= (AM65_CPTS_EV_RX << AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT);
970
971 dev_dbg(cpts->dev, "%s mtype seqid %08x\n", __func__, skb_cb->skb_mtype_seqid);
972
973 ns = am65_cpts_find_rx_ts(cpts, skb_cb->skb_mtype_seqid);
974 if (!ns)
975 return;
976
977 ssh = skb_hwtstamps(skb);
978 memset(ssh, 0, sizeof(*ssh));
979 ssh->hwtstamp = ns_to_ktime(ns);
980 }
981 EXPORT_SYMBOL_GPL(am65_cpts_rx_timestamp);
982
983 /**
984 * am65_cpts_tx_timestamp - save tx packet for timestamping
985 * @cpts: cpts handle
986 * @skb: packet
987 *
988 * This functions saves tx packet for timestamping if packet can be timestamped.
989 * The future processing is done in from PTP auxiliary worker.
990 */
am65_cpts_tx_timestamp(struct am65_cpts * cpts,struct sk_buff * skb)991 void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
992 {
993 struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
994
995 if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
996 return;
997
998 /* add frame to queue for processing later.
999 * The periodic FIFO check will handle this.
1000 */
1001 skb_get(skb);
1002 /* get the timestamp for timeouts */
1003 skb_cb->tmo = jiffies + msecs_to_jiffies(100);
1004 skb_queue_tail(&cpts->txq, skb);
1005 ptp_schedule_worker(cpts->ptp_clock, 0);
1006 }
1007 EXPORT_SYMBOL_GPL(am65_cpts_tx_timestamp);
1008
1009 /**
1010 * am65_cpts_prep_tx_timestamp - check and prepare tx packet for timestamping
1011 * @cpts: cpts handle
1012 * @skb: packet
1013 *
1014 * This functions should be called from .xmit().
1015 * It checks if packet can be timestamped, fills internal cpts data
1016 * in skb-cb and marks packet as SKBTX_IN_PROGRESS.
1017 */
am65_cpts_prep_tx_timestamp(struct am65_cpts * cpts,struct sk_buff * skb)1018 void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
1019 {
1020 struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
1021 int ret;
1022
1023 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1024 return;
1025
1026 ret = am65_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
1027 if (!ret)
1028 return;
1029 skb_cb->skb_mtype_seqid |= (AM65_CPTS_EV_TX <<
1030 AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT);
1031
1032 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1033 }
1034 EXPORT_SYMBOL_GPL(am65_cpts_prep_tx_timestamp);
1035
am65_cpts_phc_index(struct am65_cpts * cpts)1036 int am65_cpts_phc_index(struct am65_cpts *cpts)
1037 {
1038 return cpts->phc_index;
1039 }
1040 EXPORT_SYMBOL_GPL(am65_cpts_phc_index);
1041
cpts_free_clk_mux(void * data)1042 static void cpts_free_clk_mux(void *data)
1043 {
1044 struct am65_cpts *cpts = data;
1045
1046 of_clk_del_provider(cpts->clk_mux_np);
1047 clk_hw_unregister_mux(cpts->clk_mux_hw);
1048 of_node_put(cpts->clk_mux_np);
1049 }
1050
cpts_of_mux_clk_setup(struct am65_cpts * cpts,struct device_node * node)1051 static int cpts_of_mux_clk_setup(struct am65_cpts *cpts,
1052 struct device_node *node)
1053 {
1054 unsigned int num_parents;
1055 const char **parent_names;
1056 char *clk_mux_name;
1057 void __iomem *reg;
1058 int ret = -EINVAL;
1059
1060 cpts->clk_mux_np = of_get_child_by_name(node, "refclk-mux");
1061 if (!cpts->clk_mux_np)
1062 return 0;
1063
1064 num_parents = of_clk_get_parent_count(cpts->clk_mux_np);
1065 if (num_parents < 1) {
1066 dev_err(cpts->dev, "mux-clock %pOF must have parents\n",
1067 cpts->clk_mux_np);
1068 goto mux_fail;
1069 }
1070
1071 parent_names = devm_kcalloc(cpts->dev, sizeof(char *), num_parents,
1072 GFP_KERNEL);
1073 if (!parent_names) {
1074 ret = -ENOMEM;
1075 goto mux_fail;
1076 }
1077
1078 of_clk_parent_fill(cpts->clk_mux_np, parent_names, num_parents);
1079
1080 clk_mux_name = devm_kasprintf(cpts->dev, GFP_KERNEL, "%s.%pOFn",
1081 dev_name(cpts->dev), cpts->clk_mux_np);
1082 if (!clk_mux_name) {
1083 ret = -ENOMEM;
1084 goto mux_fail;
1085 }
1086
1087 reg = &cpts->reg->rftclk_sel;
1088 /* dev must be NULL to avoid recursive incrementing
1089 * of module refcnt
1090 */
1091 cpts->clk_mux_hw = clk_hw_register_mux(NULL, clk_mux_name,
1092 parent_names, num_parents,
1093 0, reg, 0, 5, 0, NULL);
1094 if (IS_ERR(cpts->clk_mux_hw)) {
1095 ret = PTR_ERR(cpts->clk_mux_hw);
1096 goto mux_fail;
1097 }
1098
1099 ret = of_clk_add_hw_provider(cpts->clk_mux_np, of_clk_hw_simple_get,
1100 cpts->clk_mux_hw);
1101 if (ret)
1102 goto clk_hw_register;
1103
1104 ret = devm_add_action_or_reset(cpts->dev, cpts_free_clk_mux, cpts);
1105 if (ret)
1106 dev_err(cpts->dev, "failed to add clkmux reset action %d", ret);
1107
1108 return ret;
1109
1110 clk_hw_register:
1111 clk_hw_unregister_mux(cpts->clk_mux_hw);
1112 mux_fail:
1113 of_node_put(cpts->clk_mux_np);
1114 return ret;
1115 }
1116
am65_cpts_of_parse(struct am65_cpts * cpts,struct device_node * node)1117 static int am65_cpts_of_parse(struct am65_cpts *cpts, struct device_node *node)
1118 {
1119 u32 prop[2];
1120
1121 if (!of_property_read_u32(node, "ti,cpts-ext-ts-inputs", &prop[0]))
1122 cpts->ext_ts_inputs = prop[0];
1123
1124 if (!of_property_read_u32(node, "ti,cpts-periodic-outputs", &prop[0]))
1125 cpts->genf_num = prop[0];
1126
1127 if (!of_property_read_u32_array(node, "ti,pps", prop, 2)) {
1128 cpts->pps_present = true;
1129
1130 if (prop[0] > 7) {
1131 dev_err(cpts->dev, "invalid HWx_TS_PUSH index: %u provided\n", prop[0]);
1132 cpts->pps_present = false;
1133 }
1134 if (prop[1] > 1) {
1135 dev_err(cpts->dev, "invalid GENFy index: %u provided\n", prop[1]);
1136 cpts->pps_present = false;
1137 }
1138 if (cpts->pps_present) {
1139 cpts->pps_hw_ts_idx = prop[0];
1140 cpts->pps_genf_idx = prop[1];
1141 }
1142 }
1143
1144 return cpts_of_mux_clk_setup(cpts, node);
1145 }
1146
am65_cpts_release(struct am65_cpts * cpts)1147 void am65_cpts_release(struct am65_cpts *cpts)
1148 {
1149 ptp_clock_unregister(cpts->ptp_clock);
1150 am65_cpts_disable(cpts);
1151 clk_disable_unprepare(cpts->refclk);
1152 }
1153 EXPORT_SYMBOL_GPL(am65_cpts_release);
1154
am65_cpts_create(struct device * dev,void __iomem * regs,struct device_node * node)1155 struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
1156 struct device_node *node)
1157 {
1158 struct am65_cpts *cpts;
1159 int ret, i;
1160
1161 cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL);
1162 if (!cpts)
1163 return ERR_PTR(-ENOMEM);
1164
1165 cpts->dev = dev;
1166 cpts->reg = (struct am65_cpts_regs __iomem *)regs;
1167
1168 cpts->irq = of_irq_get_byname(node, "cpts");
1169 if (cpts->irq <= 0) {
1170 ret = cpts->irq ?: -ENXIO;
1171 dev_err_probe(dev, ret, "Failed to get IRQ number\n");
1172 return ERR_PTR(ret);
1173 }
1174
1175 ret = am65_cpts_of_parse(cpts, node);
1176 if (ret)
1177 return ERR_PTR(ret);
1178
1179 mutex_init(&cpts->ptp_clk_lock);
1180 INIT_LIST_HEAD(&cpts->events_tx);
1181 INIT_LIST_HEAD(&cpts->events_rx);
1182 INIT_LIST_HEAD(&cpts->pool);
1183 spin_lock_init(&cpts->lock);
1184 skb_queue_head_init(&cpts->txq);
1185
1186 for (i = 0; i < AM65_CPTS_MAX_EVENTS; i++)
1187 list_add(&cpts->pool_data[i].list, &cpts->pool);
1188
1189 cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
1190 if (IS_ERR(cpts->refclk)) {
1191 ret = PTR_ERR(cpts->refclk);
1192 dev_err_probe(dev, ret, "Failed to get refclk\n");
1193 return ERR_PTR(ret);
1194 }
1195
1196 ret = clk_prepare_enable(cpts->refclk);
1197 if (ret) {
1198 dev_err(dev, "Failed to enable refclk %d\n", ret);
1199 return ERR_PTR(ret);
1200 }
1201
1202 cpts->refclk_freq = clk_get_rate(cpts->refclk);
1203
1204 am65_ptp_info.max_adj = cpts->refclk_freq / AM65_CPTS_MIN_PPM;
1205 cpts->ptp_info = am65_ptp_info;
1206
1207 if (cpts->ext_ts_inputs)
1208 cpts->ptp_info.n_ext_ts = cpts->ext_ts_inputs;
1209 if (cpts->genf_num)
1210 cpts->ptp_info.n_per_out = cpts->genf_num;
1211 if (cpts->pps_present)
1212 cpts->ptp_info.pps = 1;
1213
1214 am65_cpts_set_add_val(cpts);
1215
1216 am65_cpts_write32(cpts, AM65_CPTS_CONTROL_EN |
1217 AM65_CPTS_CONTROL_64MODE |
1218 AM65_CPTS_CONTROL_TX_GENF_CLR_EN,
1219 control);
1220 am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
1221
1222 /* set time to the current system time */
1223 am65_cpts_settime(cpts, ktime_to_ns(ktime_get_real()));
1224
1225 cpts->ptp_clock = ptp_clock_register(&cpts->ptp_info, cpts->dev);
1226 if (IS_ERR_OR_NULL(cpts->ptp_clock)) {
1227 dev_err(dev, "Failed to register ptp clk %ld\n",
1228 PTR_ERR(cpts->ptp_clock));
1229 ret = cpts->ptp_clock ? PTR_ERR(cpts->ptp_clock) : -ENODEV;
1230 goto refclk_disable;
1231 }
1232 cpts->phc_index = ptp_clock_index(cpts->ptp_clock);
1233
1234 ret = devm_request_threaded_irq(dev, cpts->irq, NULL,
1235 am65_cpts_interrupt,
1236 IRQF_ONESHOT, dev_name(dev), cpts);
1237 if (ret < 0) {
1238 dev_err(cpts->dev, "error attaching irq %d\n", ret);
1239 goto reset_ptpclk;
1240 }
1241
1242 dev_info(dev, "CPTS ver 0x%08x, freq:%u, add_val:%u pps:%d\n",
1243 am65_cpts_read32(cpts, idver),
1244 cpts->refclk_freq, cpts->ts_add_val, cpts->pps_present);
1245
1246 return cpts;
1247
1248 reset_ptpclk:
1249 am65_cpts_release(cpts);
1250 refclk_disable:
1251 clk_disable_unprepare(cpts->refclk);
1252 return ERR_PTR(ret);
1253 }
1254 EXPORT_SYMBOL_GPL(am65_cpts_create);
1255
am65_cpts_suspend(struct am65_cpts * cpts)1256 void am65_cpts_suspend(struct am65_cpts *cpts)
1257 {
1258 /* save state and disable CPTS */
1259 cpts->sr_control = am65_cpts_read32(cpts, control);
1260 cpts->sr_int_enable = am65_cpts_read32(cpts, int_enable);
1261 cpts->sr_rftclk_sel = am65_cpts_read32(cpts, rftclk_sel);
1262 cpts->sr_ts_ppm_hi = am65_cpts_read32(cpts, ts_ppm_hi);
1263 cpts->sr_ts_ppm_low = am65_cpts_read32(cpts, ts_ppm_low);
1264 cpts->sr_cpts_ns = am65_cpts_gettime(cpts, NULL);
1265 cpts->sr_ktime_ns = ktime_to_ns(ktime_get_real());
1266 am65_cpts_disable(cpts);
1267 clk_disable(cpts->refclk);
1268
1269 /* Save GENF state */
1270 memcpy_fromio(&cpts->sr_genf, &cpts->reg->genf, sizeof(cpts->sr_genf));
1271
1272 /* Save ESTF state */
1273 memcpy_fromio(&cpts->sr_estf, &cpts->reg->estf, sizeof(cpts->sr_estf));
1274 }
1275 EXPORT_SYMBOL_GPL(am65_cpts_suspend);
1276
am65_cpts_resume(struct am65_cpts * cpts)1277 void am65_cpts_resume(struct am65_cpts *cpts)
1278 {
1279 int i;
1280 s64 ktime_ns;
1281
1282 /* restore state and enable CPTS */
1283 clk_enable(cpts->refclk);
1284 am65_cpts_write32(cpts, cpts->sr_rftclk_sel, rftclk_sel);
1285 am65_cpts_set_add_val(cpts);
1286 am65_cpts_write32(cpts, cpts->sr_control, control);
1287 am65_cpts_write32(cpts, cpts->sr_int_enable, int_enable);
1288
1289 /* Restore time to saved CPTS time + time in suspend/resume */
1290 ktime_ns = ktime_to_ns(ktime_get_real());
1291 ktime_ns -= cpts->sr_ktime_ns;
1292 am65_cpts_settime(cpts, cpts->sr_cpts_ns + ktime_ns);
1293
1294 /* Restore compensation (PPM) */
1295 am65_cpts_write32(cpts, cpts->sr_ts_ppm_hi, ts_ppm_hi);
1296 am65_cpts_write32(cpts, cpts->sr_ts_ppm_low, ts_ppm_low);
1297
1298 /* Restore GENF state */
1299 for (i = 0; i < AM65_CPTS_GENF_MAX_NUM; i++) {
1300 am65_cpts_write32(cpts, 0, genf[i].length); /* TRM sequence */
1301 am65_cpts_write32(cpts, cpts->sr_genf[i].comp_hi, genf[i].comp_hi);
1302 am65_cpts_write32(cpts, cpts->sr_genf[i].comp_lo, genf[i].comp_lo);
1303 am65_cpts_write32(cpts, cpts->sr_genf[i].length, genf[i].length);
1304 am65_cpts_write32(cpts, cpts->sr_genf[i].control, genf[i].control);
1305 am65_cpts_write32(cpts, cpts->sr_genf[i].ppm_hi, genf[i].ppm_hi);
1306 am65_cpts_write32(cpts, cpts->sr_genf[i].ppm_low, genf[i].ppm_low);
1307 }
1308
1309 /* Restore ESTTF state */
1310 for (i = 0; i < AM65_CPTS_ESTF_MAX_NUM; i++) {
1311 am65_cpts_write32(cpts, 0, estf[i].length); /* TRM sequence */
1312 am65_cpts_write32(cpts, cpts->sr_estf[i].comp_hi, estf[i].comp_hi);
1313 am65_cpts_write32(cpts, cpts->sr_estf[i].comp_lo, estf[i].comp_lo);
1314 am65_cpts_write32(cpts, cpts->sr_estf[i].length, estf[i].length);
1315 am65_cpts_write32(cpts, cpts->sr_estf[i].control, estf[i].control);
1316 am65_cpts_write32(cpts, cpts->sr_estf[i].ppm_hi, estf[i].ppm_hi);
1317 am65_cpts_write32(cpts, cpts->sr_estf[i].ppm_low, estf[i].ppm_low);
1318 }
1319 }
1320 EXPORT_SYMBOL_GPL(am65_cpts_resume);
1321
am65_cpts_probe(struct platform_device * pdev)1322 static int am65_cpts_probe(struct platform_device *pdev)
1323 {
1324 struct device_node *node = pdev->dev.of_node;
1325 struct device *dev = &pdev->dev;
1326 struct am65_cpts *cpts;
1327 void __iomem *base;
1328
1329 base = devm_platform_ioremap_resource_byname(pdev, "cpts");
1330 if (IS_ERR(base))
1331 return PTR_ERR(base);
1332
1333 cpts = am65_cpts_create(dev, base, node);
1334 return PTR_ERR_OR_ZERO(cpts);
1335 }
1336
1337 static const struct of_device_id am65_cpts_of_match[] = {
1338 { .compatible = "ti,am65-cpts", },
1339 { .compatible = "ti,j721e-cpts", },
1340 {},
1341 };
1342 MODULE_DEVICE_TABLE(of, am65_cpts_of_match);
1343
1344 static struct platform_driver am65_cpts_driver = {
1345 .probe = am65_cpts_probe,
1346 .driver = {
1347 .name = "am65-cpts",
1348 .of_match_table = am65_cpts_of_match,
1349 },
1350 };
1351 module_platform_driver(am65_cpts_driver);
1352
1353 MODULE_LICENSE("GPL v2");
1354 MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
1355 MODULE_DESCRIPTION("TI K3 AM65 CPTS driver");
1356