1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/clocksource.h>
34 #include <linux/highmem.h>
35 #include <linux/log2.h>
36 #include <linux/ptp_clock_kernel.h>
37 #include <rdma/mlx5-abi.h>
38 #include "lib/eq.h"
39 #include "en.h"
40 #include "clock.h"
41 #ifdef CONFIG_X86
42 #include <linux/timekeeping.h>
43 #include <linux/cpufeature.h>
44 #endif /* CONFIG_X86 */
45
46 #define MLX5_RT_CLOCK_IDENTITY_SIZE MLX5_FLD_SZ_BYTES(mrtcq_reg, rt_clock_identity)
47
48 enum {
49 MLX5_PIN_MODE_IN = 0x0,
50 MLX5_PIN_MODE_OUT = 0x1,
51 };
52
53 enum {
54 MLX5_OUT_PATTERN_PULSE = 0x0,
55 MLX5_OUT_PATTERN_PERIODIC = 0x1,
56 };
57
58 enum {
59 MLX5_EVENT_MODE_DISABLE = 0x0,
60 MLX5_EVENT_MODE_REPETETIVE = 0x1,
61 MLX5_EVENT_MODE_ONCE_TILL_ARM = 0x2,
62 };
63
64 enum {
65 MLX5_MTPPS_FS_ENABLE = BIT(0x0),
66 MLX5_MTPPS_FS_PATTERN = BIT(0x2),
67 MLX5_MTPPS_FS_PIN_MODE = BIT(0x3),
68 MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4),
69 MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
70 MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
71 MLX5_MTPPS_FS_NPPS_PERIOD = BIT(0x9),
72 MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS = BIT(0xa),
73 };
74
75 enum {
76 MLX5_MTUTC_OPERATION_ADJUST_TIME_MIN = S16_MIN,
77 MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX = S16_MAX,
78 MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MIN = -200000,
79 MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX = 200000,
80 };
81
82 struct mlx5_clock_dev_state {
83 struct mlx5_core_dev *mdev;
84 struct mlx5_devcom_comp_dev *compdev;
85 struct mlx5_nb pps_nb;
86 struct work_struct out_work;
87 };
88
89 struct mlx5_clock_priv {
90 struct mlx5_clock clock;
91 struct mlx5_core_dev *mdev;
92 struct mutex lock; /* protect mdev and used in PTP callbacks */
93 struct mlx5_core_dev *event_mdev;
94 };
95
clock_priv(struct mlx5_clock * clock)96 static struct mlx5_clock_priv *clock_priv(struct mlx5_clock *clock)
97 {
98 return container_of(clock, struct mlx5_clock_priv, clock);
99 }
100
mlx5_clock_lockdep_assert(struct mlx5_clock * clock)101 static void mlx5_clock_lockdep_assert(struct mlx5_clock *clock)
102 {
103 if (!clock->shared)
104 return;
105
106 lockdep_assert(lockdep_is_held(&clock_priv(clock)->lock));
107 }
108
mlx5_clock_mdev_get(struct mlx5_clock * clock)109 static struct mlx5_core_dev *mlx5_clock_mdev_get(struct mlx5_clock *clock)
110 {
111 mlx5_clock_lockdep_assert(clock);
112
113 return clock_priv(clock)->mdev;
114 }
115
mlx5_clock_lock(struct mlx5_clock * clock)116 static void mlx5_clock_lock(struct mlx5_clock *clock)
117 {
118 if (!clock->shared)
119 return;
120
121 mutex_lock(&clock_priv(clock)->lock);
122 }
123
mlx5_clock_unlock(struct mlx5_clock * clock)124 static void mlx5_clock_unlock(struct mlx5_clock *clock)
125 {
126 if (!clock->shared)
127 return;
128
129 mutex_unlock(&clock_priv(clock)->lock);
130 }
131
mlx5_real_time_mode(struct mlx5_core_dev * mdev)132 static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
133 {
134 return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
135 }
136
mlx5_npps_real_time_supported(struct mlx5_core_dev * mdev)137 static bool mlx5_npps_real_time_supported(struct mlx5_core_dev *mdev)
138 {
139 return (mlx5_real_time_mode(mdev) &&
140 MLX5_CAP_MCAM_FEATURE(mdev, npps_period) &&
141 MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns));
142 }
143
mlx5_modify_mtutc_allowed(struct mlx5_core_dev * mdev)144 static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
145 {
146 return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
147 }
148
mlx5_clock_identity_get(struct mlx5_core_dev * mdev,u8 identify[MLX5_RT_CLOCK_IDENTITY_SIZE])149 static int mlx5_clock_identity_get(struct mlx5_core_dev *mdev,
150 u8 identify[MLX5_RT_CLOCK_IDENTITY_SIZE])
151 {
152 u32 out[MLX5_ST_SZ_DW(mrtcq_reg)] = {};
153 u32 in[MLX5_ST_SZ_DW(mrtcq_reg)] = {};
154 int err;
155
156 err = mlx5_core_access_reg(mdev, in, sizeof(in),
157 out, sizeof(out), MLX5_REG_MRTCQ, 0, 0);
158 if (!err)
159 memcpy(identify, MLX5_ADDR_OF(mrtcq_reg, out, rt_clock_identity),
160 MLX5_RT_CLOCK_IDENTITY_SIZE);
161
162 return err;
163 }
164
mlx5_ptp_shift_constant(u32 dev_freq_khz)165 static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz)
166 {
167 /* Optimal shift constant leads to corrections above just 1 scaled ppm.
168 *
169 * Two sets of equations are needed to derive the optimal shift
170 * constant for the cyclecounter.
171 *
172 * dev_freq_khz * 1000 / 2^shift_constant = 1 scaled_ppm
173 * ppb = scaled_ppm * 1000 / 2^16
174 *
175 * Using the two equations together
176 *
177 * dev_freq_khz * 1000 / 1 scaled_ppm = 2^shift_constant
178 * dev_freq_khz * 2^16 / 1 ppb = 2^shift_constant
179 * dev_freq_khz = 2^(shift_constant - 16)
180 *
181 * then yields
182 *
183 * shift_constant = ilog2(dev_freq_khz) + 16
184 */
185
186 return min(ilog2(dev_freq_khz) + 16,
187 ilog2((U32_MAX / NSEC_PER_MSEC) * dev_freq_khz));
188 }
189
mlx5_clock_getmaxphase(struct mlx5_core_dev * mdev)190 static s32 mlx5_clock_getmaxphase(struct mlx5_core_dev *mdev)
191 {
192 return MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range) ?
193 MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX :
194 MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
195 }
196
mlx5_ptp_getmaxphase(struct ptp_clock_info * ptp)197 static s32 mlx5_ptp_getmaxphase(struct ptp_clock_info *ptp)
198 {
199 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
200 struct mlx5_core_dev *mdev;
201 s32 ret;
202
203 mlx5_clock_lock(clock);
204 mdev = mlx5_clock_mdev_get(clock);
205 ret = mlx5_clock_getmaxphase(mdev);
206 mlx5_clock_unlock(clock);
207
208 return ret;
209 }
210
mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev * mdev,s64 delta)211 static bool mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev *mdev, s64 delta)
212 {
213 s64 max = mlx5_clock_getmaxphase(mdev);
214
215 if (delta < -max || delta > max)
216 return false;
217
218 return true;
219 }
220
mlx5_set_mtutc(struct mlx5_core_dev * dev,u32 * mtutc,u32 size)221 static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
222 {
223 u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
224
225 if (!MLX5_CAP_MCAM_REG(dev, mtutc))
226 return -EOPNOTSUPP;
227
228 return mlx5_core_access_reg(dev, mtutc, size, out, sizeof(out),
229 MLX5_REG_MTUTC, 0, 1);
230 }
231
232 #ifdef CONFIG_X86
mlx5_is_ptm_source_time_available(struct mlx5_core_dev * dev)233 static bool mlx5_is_ptm_source_time_available(struct mlx5_core_dev *dev)
234 {
235 u32 out[MLX5_ST_SZ_DW(mtptm_reg)] = {0};
236 u32 in[MLX5_ST_SZ_DW(mtptm_reg)] = {0};
237 int err;
238
239 if (!MLX5_CAP_MCAM_REG3(dev, mtptm))
240 return false;
241
242 err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTPTM,
243 0, 0);
244 if (err)
245 return false;
246
247 return !!MLX5_GET(mtptm_reg, out, psta);
248 }
249
mlx5_mtctr_read(struct mlx5_core_dev * mdev,bool real_time_mode,struct system_counterval_t * sys_counterval,u64 * device)250 static int mlx5_mtctr_read(struct mlx5_core_dev *mdev,
251 bool real_time_mode,
252 struct system_counterval_t *sys_counterval,
253 u64 *device)
254 {
255 u32 out[MLX5_ST_SZ_DW(mtctr_reg)] = {0};
256 u32 in[MLX5_ST_SZ_DW(mtctr_reg)] = {0};
257 u64 host;
258 int err;
259
260 MLX5_SET(mtctr_reg, in, first_clock_timestamp_request,
261 MLX5_MTCTR_REQUEST_PTM_ROOT_CLOCK);
262 MLX5_SET(mtctr_reg, in, second_clock_timestamp_request,
263 real_time_mode ? MLX5_MTCTR_REQUEST_REAL_TIME_CLOCK :
264 MLX5_MTCTR_REQUEST_FREE_RUNNING_COUNTER);
265
266 err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
267 MLX5_REG_MTCTR, 0, 0);
268 if (err)
269 return err;
270
271 if (!MLX5_GET(mtctr_reg, out, first_clock_valid) ||
272 !MLX5_GET(mtctr_reg, out, second_clock_valid))
273 return -EINVAL;
274
275 host = MLX5_GET64(mtctr_reg, out, first_clock_timestamp);
276 *sys_counterval = (struct system_counterval_t) {
277 .cycles = host,
278 .cs_id = CSID_X86_ART,
279 .use_nsecs = true,
280 };
281 *device = MLX5_GET64(mtctr_reg, out, second_clock_timestamp);
282
283 return 0;
284 }
285
mlx5_mtctr_syncdevicetime(ktime_t * device_time,struct system_counterval_t * sys_counterval,void * ctx)286 static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
287 struct system_counterval_t *sys_counterval,
288 void *ctx)
289 {
290 struct mlx5_core_dev *mdev = ctx;
291 bool real_time_mode;
292 u64 device;
293 int err;
294
295 real_time_mode = mlx5_real_time_mode(mdev);
296
297 err = mlx5_mtctr_read(mdev, real_time_mode, sys_counterval, &device);
298 if (err)
299 return err;
300
301 if (real_time_mode)
302 *device_time = ns_to_ktime(REAL_TIME_TO_NS(device >> 32, device & U32_MAX));
303 else
304 *device_time = mlx5_timecounter_cyc2time(mdev->clock, device);
305
306 return 0;
307 }
308
309 static int
mlx5_mtctr_syncdevicecyclestime(ktime_t * device_time,struct system_counterval_t * sys_counterval,void * ctx)310 mlx5_mtctr_syncdevicecyclestime(ktime_t *device_time,
311 struct system_counterval_t *sys_counterval,
312 void *ctx)
313 {
314 struct mlx5_core_dev *mdev = ctx;
315 u64 device;
316 int err;
317
318 err = mlx5_mtctr_read(mdev, false, sys_counterval, &device);
319 if (err)
320 return err;
321 *device_time = ns_to_ktime(device);
322
323 return 0;
324 }
325
mlx5_ptp_getcrosststamp(struct ptp_clock_info * ptp,struct system_device_crosststamp * cts)326 static int mlx5_ptp_getcrosststamp(struct ptp_clock_info *ptp,
327 struct system_device_crosststamp *cts)
328 {
329 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
330 struct system_time_snapshot history_begin = {0};
331 struct mlx5_core_dev *mdev;
332 int err;
333
334 mlx5_clock_lock(clock);
335 mdev = mlx5_clock_mdev_get(clock);
336
337 if (!mlx5_is_ptm_source_time_available(mdev)) {
338 err = -EBUSY;
339 goto unlock;
340 }
341
342 ktime_get_snapshot(&history_begin);
343
344 err = get_device_system_crosststamp(mlx5_mtctr_syncdevicetime, mdev,
345 &history_begin, cts);
346 unlock:
347 mlx5_clock_unlock(clock);
348 return err;
349 }
350
mlx5_ptp_getcrosscycles(struct ptp_clock_info * ptp,struct system_device_crosststamp * cts)351 static int mlx5_ptp_getcrosscycles(struct ptp_clock_info *ptp,
352 struct system_device_crosststamp *cts)
353 {
354 struct mlx5_clock *clock =
355 container_of(ptp, struct mlx5_clock, ptp_info);
356 struct system_time_snapshot history_begin = {0};
357 struct mlx5_core_dev *mdev;
358 int err;
359
360 mlx5_clock_lock(clock);
361 mdev = mlx5_clock_mdev_get(clock);
362
363 if (!mlx5_is_ptm_source_time_available(mdev)) {
364 err = -EBUSY;
365 goto unlock;
366 }
367
368 ktime_get_snapshot(&history_begin);
369
370 err = get_device_system_crosststamp(mlx5_mtctr_syncdevicecyclestime,
371 mdev, &history_begin, cts);
372 unlock:
373 mlx5_clock_unlock(clock);
374 return err;
375 }
376 #endif /* CONFIG_X86 */
377
mlx5_read_time(struct mlx5_core_dev * dev,struct ptp_system_timestamp * sts,bool real_time)378 static u64 mlx5_read_time(struct mlx5_core_dev *dev,
379 struct ptp_system_timestamp *sts,
380 bool real_time)
381 {
382 u32 timer_h, timer_h1, timer_l;
383
384 timer_h = ioread32be(real_time ? &dev->iseg->real_time_h :
385 &dev->iseg->internal_timer_h);
386 ptp_read_system_prets(sts);
387 timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
388 &dev->iseg->internal_timer_l);
389 ptp_read_system_postts(sts);
390 timer_h1 = ioread32be(real_time ? &dev->iseg->real_time_h :
391 &dev->iseg->internal_timer_h);
392 if (timer_h != timer_h1) {
393 /* wrap around */
394 ptp_read_system_prets(sts);
395 timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
396 &dev->iseg->internal_timer_l);
397 ptp_read_system_postts(sts);
398 }
399
400 return real_time ? REAL_TIME_TO_NS(timer_h1, timer_l) :
401 (u64)timer_l | (u64)timer_h1 << 32;
402 }
403
read_internal_timer(struct cyclecounter * cc)404 static u64 read_internal_timer(struct cyclecounter *cc)
405 {
406 struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
407 struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
408 struct mlx5_core_dev *mdev = mlx5_clock_mdev_get(clock);
409
410 return mlx5_read_time(mdev, NULL, false) & cc->mask;
411 }
412
mlx5_update_clock_info_page(struct mlx5_core_dev * mdev)413 static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
414 {
415 struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
416 struct mlx5_clock *clock = mdev->clock;
417 struct mlx5_timer *timer;
418 u32 sign;
419
420 if (!clock_info)
421 return;
422
423 sign = smp_load_acquire(&clock_info->sign);
424 smp_store_mb(clock_info->sign,
425 sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
426
427 timer = &clock->timer;
428 clock_info->cycles = timer->tc.cycle_last;
429 clock_info->mult = timer->cycles.mult;
430 clock_info->nsec = timer->tc.nsec;
431 clock_info->frac = timer->tc.frac;
432
433 smp_store_release(&clock_info->sign,
434 sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
435 }
436
mlx5_pps_out(struct work_struct * work)437 static void mlx5_pps_out(struct work_struct *work)
438 {
439 struct mlx5_clock_dev_state *clock_state = container_of(work, struct mlx5_clock_dev_state,
440 out_work);
441 struct mlx5_core_dev *mdev = clock_state->mdev;
442 struct mlx5_clock *clock = mdev->clock;
443 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
444 unsigned long flags;
445 int i;
446
447 for (i = 0; i < clock->ptp_info.n_pins; i++) {
448 u64 tstart;
449
450 write_seqlock_irqsave(&clock->lock, flags);
451 tstart = clock->pps_info.start[i];
452 clock->pps_info.start[i] = 0;
453 write_sequnlock_irqrestore(&clock->lock, flags);
454 if (!tstart)
455 continue;
456
457 MLX5_SET(mtpps_reg, in, pin, i);
458 MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
459 MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
460 mlx5_set_mtpps(mdev, in, sizeof(in));
461 }
462 }
463
mlx5_timestamp_overflow(struct ptp_clock_info * ptp_info)464 static long mlx5_timestamp_overflow(struct ptp_clock_info *ptp_info)
465 {
466 struct mlx5_core_dev *mdev;
467 struct mlx5_timer *timer;
468 struct mlx5_clock *clock;
469 unsigned long flags;
470
471 clock = container_of(ptp_info, struct mlx5_clock, ptp_info);
472 mlx5_clock_lock(clock);
473 mdev = mlx5_clock_mdev_get(clock);
474 timer = &clock->timer;
475
476 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
477 goto out;
478
479 write_seqlock_irqsave(&clock->lock, flags);
480 timecounter_read(&timer->tc);
481 mlx5_update_clock_info_page(mdev);
482 write_sequnlock_irqrestore(&clock->lock, flags);
483
484 out:
485 mlx5_clock_unlock(clock);
486 return timer->overflow_period;
487 }
488
mlx5_ptp_settime_real_time(struct mlx5_core_dev * mdev,const struct timespec64 * ts)489 static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
490 const struct timespec64 *ts)
491 {
492 u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
493
494 if (ts->tv_sec < 0 || ts->tv_sec > U32_MAX ||
495 ts->tv_nsec < 0 || ts->tv_nsec > NSEC_PER_SEC)
496 return -EINVAL;
497
498 MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE);
499 MLX5_SET(mtutc_reg, in, utc_sec, ts->tv_sec);
500 MLX5_SET(mtutc_reg, in, utc_nsec, ts->tv_nsec);
501
502 return mlx5_set_mtutc(mdev, in, sizeof(in));
503 }
504
mlx5_clock_settime(struct mlx5_core_dev * mdev,struct mlx5_clock * clock,const struct timespec64 * ts)505 static int mlx5_clock_settime(struct mlx5_core_dev *mdev, struct mlx5_clock *clock,
506 const struct timespec64 *ts)
507 {
508 struct mlx5_timer *timer = &clock->timer;
509 unsigned long flags;
510
511 if (mlx5_modify_mtutc_allowed(mdev)) {
512 int err = mlx5_ptp_settime_real_time(mdev, ts);
513
514 if (err)
515 return err;
516 }
517
518 write_seqlock_irqsave(&clock->lock, flags);
519 timecounter_init(&timer->tc, &timer->cycles, timespec64_to_ns(ts));
520 mlx5_update_clock_info_page(mdev);
521 write_sequnlock_irqrestore(&clock->lock, flags);
522
523 return 0;
524 }
525
mlx5_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)526 static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
527 {
528 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
529 struct mlx5_core_dev *mdev;
530 int err;
531
532 mlx5_clock_lock(clock);
533 mdev = mlx5_clock_mdev_get(clock);
534 err = mlx5_clock_settime(mdev, clock, ts);
535 mlx5_clock_unlock(clock);
536
537 return err;
538 }
539
540 static
mlx5_ptp_gettimex_real_time(struct mlx5_core_dev * mdev,struct ptp_system_timestamp * sts)541 struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev,
542 struct ptp_system_timestamp *sts)
543 {
544 struct timespec64 ts;
545 u64 time;
546
547 time = mlx5_read_time(mdev, sts, true);
548 ts = ns_to_timespec64(time);
549 return ts;
550 }
551
mlx5_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)552 static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
553 struct ptp_system_timestamp *sts)
554 {
555 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
556 struct mlx5_core_dev *mdev;
557 u64 cycles, ns;
558
559 mlx5_clock_lock(clock);
560 mdev = mlx5_clock_mdev_get(clock);
561 if (mlx5_real_time_mode(mdev)) {
562 *ts = mlx5_ptp_gettimex_real_time(mdev, sts);
563 goto out;
564 }
565
566 cycles = mlx5_read_time(mdev, sts, false);
567 ns = mlx5_timecounter_cyc2time(clock, cycles);
568 *ts = ns_to_timespec64(ns);
569 out:
570 mlx5_clock_unlock(clock);
571 return 0;
572 }
573
mlx5_ptp_getcyclesx(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)574 static int mlx5_ptp_getcyclesx(struct ptp_clock_info *ptp,
575 struct timespec64 *ts,
576 struct ptp_system_timestamp *sts)
577 {
578 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
579 ptp_info);
580 struct mlx5_core_dev *mdev;
581 u64 cycles;
582
583 mlx5_clock_lock(clock);
584 mdev = mlx5_clock_mdev_get(clock);
585
586 cycles = mlx5_read_time(mdev, sts, false);
587 *ts = ns_to_timespec64(cycles);
588 mlx5_clock_unlock(clock);
589 return 0;
590 }
591
mlx5_ptp_adjtime_real_time(struct mlx5_core_dev * mdev,s64 delta)592 static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
593 {
594 u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
595
596 /* HW time adjustment range is checked. If out of range, settime instead */
597 if (!mlx5_is_mtutc_time_adj_cap(mdev, delta)) {
598 struct timespec64 ts;
599 s64 ns;
600
601 ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
602 ns = timespec64_to_ns(&ts) + delta;
603 ts = ns_to_timespec64(ns);
604 return mlx5_ptp_settime_real_time(mdev, &ts);
605 }
606
607 MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_TIME);
608 MLX5_SET(mtutc_reg, in, time_adjustment, delta);
609
610 return mlx5_set_mtutc(mdev, in, sizeof(in));
611 }
612
mlx5_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)613 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
614 {
615 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
616 struct mlx5_timer *timer = &clock->timer;
617 struct mlx5_core_dev *mdev;
618 unsigned long flags;
619 int err = 0;
620
621 mlx5_clock_lock(clock);
622 mdev = mlx5_clock_mdev_get(clock);
623
624 if (mlx5_modify_mtutc_allowed(mdev)) {
625 err = mlx5_ptp_adjtime_real_time(mdev, delta);
626
627 if (err)
628 goto unlock;
629 }
630
631 write_seqlock_irqsave(&clock->lock, flags);
632 timecounter_adjtime(&timer->tc, delta);
633 mlx5_update_clock_info_page(mdev);
634 write_sequnlock_irqrestore(&clock->lock, flags);
635
636 unlock:
637 mlx5_clock_unlock(clock);
638 return err;
639 }
640
mlx5_ptp_adjphase(struct ptp_clock_info * ptp,s32 delta)641 static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
642 {
643 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
644 struct mlx5_core_dev *mdev;
645 int err;
646
647 mlx5_clock_lock(clock);
648 mdev = mlx5_clock_mdev_get(clock);
649 err = mlx5_ptp_adjtime_real_time(mdev, delta);
650 mlx5_clock_unlock(clock);
651
652 return err;
653 }
654
mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev * mdev,long scaled_ppm)655 static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
656 {
657 u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
658
659 MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC);
660
661 if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_freq_adj_units) &&
662 scaled_ppm <= S32_MAX && scaled_ppm >= S32_MIN) {
663 /* HW scaled_ppm support on mlx5 devices only supports a 32-bit value */
664 MLX5_SET(mtutc_reg, in, freq_adj_units,
665 MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM);
666 MLX5_SET(mtutc_reg, in, freq_adjustment, (s32)scaled_ppm);
667 } else {
668 MLX5_SET(mtutc_reg, in, freq_adj_units, MLX5_MTUTC_FREQ_ADJ_UNITS_PPB);
669 MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm_to_ppb(scaled_ppm));
670 }
671
672 return mlx5_set_mtutc(mdev, in, sizeof(in));
673 }
674
mlx5_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)675 static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
676 {
677 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
678 struct mlx5_timer *timer = &clock->timer;
679 struct mlx5_core_dev *mdev;
680 unsigned long flags;
681 int err = 0;
682 u32 mult;
683
684 mlx5_clock_lock(clock);
685 mdev = mlx5_clock_mdev_get(clock);
686
687 if (mlx5_modify_mtutc_allowed(mdev)) {
688 err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
689
690 if (err)
691 goto unlock;
692 }
693
694 mult = (u32)adjust_by_scaled_ppm(timer->nominal_c_mult, scaled_ppm);
695
696 write_seqlock_irqsave(&clock->lock, flags);
697 timecounter_read(&timer->tc);
698 timer->cycles.mult = mult;
699 mlx5_update_clock_info_page(mdev);
700 write_sequnlock_irqrestore(&clock->lock, flags);
701 ptp_schedule_worker(clock->ptp, timer->overflow_period);
702
703 unlock:
704 mlx5_clock_unlock(clock);
705 return err;
706 }
707
mlx5_extts_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)708 static int mlx5_extts_configure(struct ptp_clock_info *ptp,
709 struct ptp_clock_request *rq,
710 int on)
711 {
712 struct mlx5_clock *clock =
713 container_of(ptp, struct mlx5_clock, ptp_info);
714 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
715 struct mlx5_core_dev *mdev;
716 u32 field_select = 0;
717 u8 pin_mode = 0;
718 u8 pattern = 0;
719 int pin = -1;
720 int err = 0;
721
722 /* Reject requests to enable time stamping on both edges. */
723 if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
724 (rq->extts.flags & PTP_ENABLE_FEATURE) &&
725 (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
726 return -EOPNOTSUPP;
727
728 if (rq->extts.index >= clock->ptp_info.n_pins)
729 return -EINVAL;
730
731 pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
732 if (pin < 0)
733 return -EBUSY;
734
735 if (on) {
736 pin_mode = MLX5_PIN_MODE_IN;
737 pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
738 field_select = MLX5_MTPPS_FS_PIN_MODE |
739 MLX5_MTPPS_FS_PATTERN |
740 MLX5_MTPPS_FS_ENABLE;
741 } else {
742 field_select = MLX5_MTPPS_FS_ENABLE;
743 }
744
745 mlx5_clock_lock(clock);
746 mdev = mlx5_clock_mdev_get(clock);
747
748 if (!MLX5_PPS_CAP(mdev)) {
749 err = -EOPNOTSUPP;
750 goto unlock;
751 }
752
753 MLX5_SET(mtpps_reg, in, pin, pin);
754 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
755 MLX5_SET(mtpps_reg, in, pattern, pattern);
756 MLX5_SET(mtpps_reg, in, enable, on);
757 MLX5_SET(mtpps_reg, in, field_select, field_select);
758
759 err = mlx5_set_mtpps(mdev, in, sizeof(in));
760 if (err)
761 goto unlock;
762
763 err = mlx5_set_mtppse(mdev, pin, 0, MLX5_EVENT_MODE_REPETETIVE & on);
764 if (err)
765 goto unlock;
766
767 clock->pps_info.pin_armed[pin] = on;
768 clock_priv(clock)->event_mdev = mdev;
769
770 unlock:
771 mlx5_clock_unlock(clock);
772 return err;
773 }
774
find_target_cycles(struct mlx5_core_dev * mdev,s64 target_ns)775 static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
776 {
777 struct mlx5_clock *clock = mdev->clock;
778 u64 cycles_now, cycles_delta;
779 u64 nsec_now, nsec_delta;
780 struct mlx5_timer *timer;
781 unsigned long flags;
782
783 timer = &clock->timer;
784
785 cycles_now = mlx5_read_time(mdev, NULL, false);
786 write_seqlock_irqsave(&clock->lock, flags);
787 nsec_now = timecounter_cyc2time(&timer->tc, cycles_now);
788 nsec_delta = target_ns - nsec_now;
789 cycles_delta = div64_u64(nsec_delta << timer->cycles.shift,
790 timer->cycles.mult);
791 write_sequnlock_irqrestore(&clock->lock, flags);
792
793 return cycles_now + cycles_delta;
794 }
795
perout_conf_internal_timer(struct mlx5_core_dev * mdev,s64 sec)796 static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, s64 sec)
797 {
798 struct timespec64 ts = {};
799 s64 target_ns;
800
801 ts.tv_sec = sec;
802 target_ns = timespec64_to_ns(&ts);
803
804 return find_target_cycles(mdev, target_ns);
805 }
806
perout_conf_real_time(s64 sec,u32 nsec)807 static u64 perout_conf_real_time(s64 sec, u32 nsec)
808 {
809 return (u64)nsec | (u64)sec << 32;
810 }
811
perout_conf_1pps(struct mlx5_core_dev * mdev,struct ptp_clock_request * rq,u64 * time_stamp,bool real_time)812 static int perout_conf_1pps(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
813 u64 *time_stamp, bool real_time)
814 {
815 struct timespec64 ts;
816 s64 ns;
817
818 ts.tv_nsec = rq->perout.period.nsec;
819 ts.tv_sec = rq->perout.period.sec;
820 ns = timespec64_to_ns(&ts);
821
822 if ((ns >> 1) != 500000000LL)
823 return -EINVAL;
824
825 *time_stamp = real_time ? perout_conf_real_time(rq->perout.start.sec, 0) :
826 perout_conf_internal_timer(mdev, rq->perout.start.sec);
827
828 return 0;
829 }
830
831 #define MLX5_MAX_PULSE_DURATION (BIT(__mlx5_bit_sz(mtpps_reg, out_pulse_duration_ns)) - 1)
mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev * mdev,struct ptp_clock_request * rq,u32 * out_pulse_duration_ns)832 static int mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev *mdev,
833 struct ptp_clock_request *rq,
834 u32 *out_pulse_duration_ns)
835 {
836 struct mlx5_pps *pps_info = &mdev->clock->pps_info;
837 u32 out_pulse_duration;
838 struct timespec64 ts;
839
840 if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
841 ts.tv_sec = rq->perout.on.sec;
842 ts.tv_nsec = rq->perout.on.nsec;
843 out_pulse_duration = (u32)timespec64_to_ns(&ts);
844 } else {
845 /* out_pulse_duration_ns should be up to 50% of the
846 * pulse period as default
847 */
848 ts.tv_sec = rq->perout.period.sec;
849 ts.tv_nsec = rq->perout.period.nsec;
850 out_pulse_duration = (u32)timespec64_to_ns(&ts) >> 1;
851 }
852
853 if (out_pulse_duration < pps_info->min_out_pulse_duration_ns ||
854 out_pulse_duration > MLX5_MAX_PULSE_DURATION) {
855 mlx5_core_err(mdev, "NPPS pulse duration %u is not in [%llu, %lu]\n",
856 out_pulse_duration, pps_info->min_out_pulse_duration_ns,
857 MLX5_MAX_PULSE_DURATION);
858 return -EINVAL;
859 }
860 *out_pulse_duration_ns = out_pulse_duration;
861
862 return 0;
863 }
864
perout_conf_npps_real_time(struct mlx5_core_dev * mdev,struct ptp_clock_request * rq,u32 * field_select,u32 * out_pulse_duration_ns,u64 * period,u64 * time_stamp)865 static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
866 u32 *field_select, u32 *out_pulse_duration_ns,
867 u64 *period, u64 *time_stamp)
868 {
869 struct mlx5_pps *pps_info = &mdev->clock->pps_info;
870 struct ptp_clock_time *time = &rq->perout.start;
871 struct timespec64 ts;
872
873 ts.tv_sec = rq->perout.period.sec;
874 ts.tv_nsec = rq->perout.period.nsec;
875 if (timespec64_to_ns(&ts) < pps_info->min_npps_period) {
876 mlx5_core_err(mdev, "NPPS period is lower than minimal npps period %llu\n",
877 pps_info->min_npps_period);
878 return -EINVAL;
879 }
880 *period = perout_conf_real_time(rq->perout.period.sec, rq->perout.period.nsec);
881
882 if (mlx5_perout_conf_out_pulse_duration(mdev, rq, out_pulse_duration_ns))
883 return -EINVAL;
884
885 *time_stamp = perout_conf_real_time(time->sec, time->nsec);
886 *field_select |= MLX5_MTPPS_FS_NPPS_PERIOD |
887 MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS;
888
889 return 0;
890 }
891
mlx5_perout_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)892 static int mlx5_perout_configure(struct ptp_clock_info *ptp,
893 struct ptp_clock_request *rq,
894 int on)
895 {
896 struct mlx5_clock *clock =
897 container_of(ptp, struct mlx5_clock, ptp_info);
898 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
899 u32 out_pulse_duration_ns = 0;
900 struct mlx5_core_dev *mdev;
901 u32 field_select = 0;
902 u64 npps_period = 0;
903 u64 time_stamp = 0;
904 u8 pin_mode = 0;
905 u8 pattern = 0;
906 bool rt_mode;
907 int pin = -1;
908 int err = 0;
909
910 if (rq->perout.index >= clock->ptp_info.n_pins)
911 return -EINVAL;
912
913 field_select = MLX5_MTPPS_FS_ENABLE;
914 pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
915 if (pin < 0)
916 return -EBUSY;
917
918 mlx5_clock_lock(clock);
919 mdev = mlx5_clock_mdev_get(clock);
920 rt_mode = mlx5_real_time_mode(mdev);
921
922 if (!MLX5_PPS_CAP(mdev)) {
923 err = -EOPNOTSUPP;
924 goto unlock;
925 }
926
927 if (on) {
928 pin_mode = MLX5_PIN_MODE_OUT;
929 pattern = MLX5_OUT_PATTERN_PERIODIC;
930
931 if (rt_mode && rq->perout.start.sec > U32_MAX) {
932 err = -EINVAL;
933 goto unlock;
934 }
935
936 field_select |= MLX5_MTPPS_FS_PIN_MODE |
937 MLX5_MTPPS_FS_PATTERN |
938 MLX5_MTPPS_FS_TIME_STAMP;
939
940 if (mlx5_npps_real_time_supported(mdev))
941 err = perout_conf_npps_real_time(mdev, rq, &field_select,
942 &out_pulse_duration_ns, &npps_period,
943 &time_stamp);
944 else
945 err = perout_conf_1pps(mdev, rq, &time_stamp, rt_mode);
946 if (err)
947 goto unlock;
948 }
949
950 MLX5_SET(mtpps_reg, in, pin, pin);
951 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
952 MLX5_SET(mtpps_reg, in, pattern, pattern);
953 MLX5_SET(mtpps_reg, in, enable, on);
954 MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
955 MLX5_SET(mtpps_reg, in, field_select, field_select);
956 MLX5_SET64(mtpps_reg, in, npps_period, npps_period);
957 MLX5_SET(mtpps_reg, in, out_pulse_duration_ns, out_pulse_duration_ns);
958 err = mlx5_set_mtpps(mdev, in, sizeof(in));
959 if (err)
960 goto unlock;
961
962 if (rt_mode)
963 goto unlock;
964
965 err = mlx5_set_mtppse(mdev, pin, 0, MLX5_EVENT_MODE_REPETETIVE & on);
966
967 unlock:
968 mlx5_clock_unlock(clock);
969 return err;
970 }
971
mlx5_pps_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)972 static int mlx5_pps_configure(struct ptp_clock_info *ptp,
973 struct ptp_clock_request *rq,
974 int on)
975 {
976 struct mlx5_clock *clock =
977 container_of(ptp, struct mlx5_clock, ptp_info);
978
979 clock->pps_info.enabled = !!on;
980 return 0;
981 }
982
mlx5_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)983 static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
984 struct ptp_clock_request *rq,
985 int on)
986 {
987 switch (rq->type) {
988 case PTP_CLK_REQ_EXTTS:
989 return mlx5_extts_configure(ptp, rq, on);
990 case PTP_CLK_REQ_PEROUT:
991 return mlx5_perout_configure(ptp, rq, on);
992 case PTP_CLK_REQ_PPS:
993 return mlx5_pps_configure(ptp, rq, on);
994 default:
995 return -EOPNOTSUPP;
996 }
997 return 0;
998 }
999
1000 enum {
1001 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
1002 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
1003 };
1004
mlx5_ptp_verify(struct ptp_clock_info * ptp,unsigned int pin,enum ptp_pin_function func,unsigned int chan)1005 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
1006 enum ptp_pin_function func, unsigned int chan)
1007 {
1008 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
1009 ptp_info);
1010
1011 switch (func) {
1012 case PTP_PF_NONE:
1013 return 0;
1014 case PTP_PF_EXTTS:
1015 return !(clock->pps_info.pin_caps[pin] &
1016 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
1017 case PTP_PF_PEROUT:
1018 return !(clock->pps_info.pin_caps[pin] &
1019 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
1020 default:
1021 return -EOPNOTSUPP;
1022 }
1023 }
1024
1025 static const struct ptp_clock_info mlx5_ptp_clock_info = {
1026 .owner = THIS_MODULE,
1027 .name = "mlx5_ptp",
1028 .max_adj = 50000000,
1029 .n_alarm = 0,
1030 .n_ext_ts = 0,
1031 .n_per_out = 0,
1032 .n_pins = 0,
1033 .pps = 0,
1034 .adjfine = mlx5_ptp_adjfine,
1035 .adjphase = mlx5_ptp_adjphase,
1036 .getmaxphase = mlx5_ptp_getmaxphase,
1037 .adjtime = mlx5_ptp_adjtime,
1038 .gettimex64 = mlx5_ptp_gettimex,
1039 .settime64 = mlx5_ptp_settime,
1040 .enable = NULL,
1041 .verify = NULL,
1042 .do_aux_work = mlx5_timestamp_overflow,
1043 };
1044
mlx5_query_mtpps_pin_mode(struct mlx5_core_dev * mdev,u8 pin,u32 * mtpps,u32 mtpps_size)1045 static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
1046 u32 *mtpps, u32 mtpps_size)
1047 {
1048 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
1049
1050 MLX5_SET(mtpps_reg, in, pin, pin);
1051
1052 return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
1053 mtpps_size, MLX5_REG_MTPPS, 0, 0);
1054 }
1055
mlx5_get_pps_pin_mode(struct mlx5_core_dev * mdev,u8 pin)1056 static int mlx5_get_pps_pin_mode(struct mlx5_core_dev *mdev, u8 pin)
1057 {
1058 u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
1059 u8 mode;
1060 int err;
1061
1062 err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
1063 if (err || !MLX5_GET(mtpps_reg, out, enable))
1064 return PTP_PF_NONE;
1065
1066 mode = MLX5_GET(mtpps_reg, out, pin_mode);
1067
1068 if (mode == MLX5_PIN_MODE_IN)
1069 return PTP_PF_EXTTS;
1070 else if (mode == MLX5_PIN_MODE_OUT)
1071 return PTP_PF_PEROUT;
1072
1073 return PTP_PF_NONE;
1074 }
1075
mlx5_init_pin_config(struct mlx5_core_dev * mdev)1076 static void mlx5_init_pin_config(struct mlx5_core_dev *mdev)
1077 {
1078 struct mlx5_clock *clock = mdev->clock;
1079 int i;
1080
1081 if (!clock->ptp_info.n_pins)
1082 return;
1083
1084 clock->ptp_info.pin_config =
1085 kcalloc(clock->ptp_info.n_pins,
1086 sizeof(*clock->ptp_info.pin_config),
1087 GFP_KERNEL);
1088 if (!clock->ptp_info.pin_config)
1089 return;
1090 clock->ptp_info.enable = mlx5_ptp_enable;
1091 clock->ptp_info.verify = mlx5_ptp_verify;
1092 clock->ptp_info.pps = 1;
1093
1094 clock->ptp_info.supported_extts_flags = PTP_RISING_EDGE |
1095 PTP_FALLING_EDGE |
1096 PTP_STRICT_FLAGS;
1097
1098 if (mlx5_npps_real_time_supported(mdev))
1099 clock->ptp_info.supported_perout_flags = PTP_PEROUT_DUTY_CYCLE;
1100
1101 for (i = 0; i < clock->ptp_info.n_pins; i++) {
1102 snprintf(clock->ptp_info.pin_config[i].name,
1103 sizeof(clock->ptp_info.pin_config[i].name),
1104 "mlx5_pps%d", i);
1105 clock->ptp_info.pin_config[i].index = i;
1106 clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(mdev, i);
1107 clock->ptp_info.pin_config[i].chan = 0;
1108 }
1109 }
1110
mlx5_get_pps_caps(struct mlx5_core_dev * mdev)1111 static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
1112 {
1113 u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
1114 struct mlx5_clock *clock = mdev->clock;
1115
1116 mlx5_query_mtpps(mdev, out, sizeof(out));
1117
1118 clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
1119 cap_number_of_pps_pins);
1120 clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
1121 cap_max_num_of_pps_in_pins);
1122 clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
1123 cap_max_num_of_pps_out_pins);
1124
1125 if (MLX5_CAP_MCAM_FEATURE(mdev, npps_period))
1126 clock->pps_info.min_npps_period = 1 << MLX5_GET(mtpps_reg, out,
1127 cap_log_min_npps_period);
1128 if (MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns))
1129 clock->pps_info.min_out_pulse_duration_ns = 1 << MLX5_GET(mtpps_reg, out,
1130 cap_log_min_out_pulse_duration_ns);
1131
1132 clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
1133 clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
1134 clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
1135 clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
1136 clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
1137 clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
1138 clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
1139 clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
1140 }
1141
ts_next_sec(struct timespec64 * ts)1142 static void ts_next_sec(struct timespec64 *ts)
1143 {
1144 ts->tv_sec += 1;
1145 ts->tv_nsec = 0;
1146 }
1147
perout_conf_next_event_timer(struct mlx5_core_dev * mdev,struct mlx5_clock * clock)1148 static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
1149 struct mlx5_clock *clock)
1150 {
1151 struct timespec64 ts;
1152 s64 target_ns;
1153
1154 mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
1155 ts_next_sec(&ts);
1156 target_ns = timespec64_to_ns(&ts);
1157
1158 return find_target_cycles(mdev, target_ns);
1159 }
1160
mlx5_pps_event(struct notifier_block * nb,unsigned long type,void * data)1161 static int mlx5_pps_event(struct notifier_block *nb,
1162 unsigned long type, void *data)
1163 {
1164 struct mlx5_clock_dev_state *clock_state = mlx5_nb_cof(nb, struct mlx5_clock_dev_state,
1165 pps_nb);
1166 struct mlx5_core_dev *mdev = clock_state->mdev;
1167 struct mlx5_clock *clock = mdev->clock;
1168 struct ptp_clock_event ptp_event;
1169 struct mlx5_eqe *eqe = data;
1170 int pin = eqe->data.pps.pin;
1171 unsigned long flags;
1172 u64 ns;
1173
1174 switch (clock->ptp_info.pin_config[pin].func) {
1175 case PTP_PF_EXTTS:
1176 ptp_event.index = pin;
1177 ptp_event.timestamp = mlx5_real_time_mode(mdev) ?
1178 mlx5_real_time_cyc2time(clock,
1179 be64_to_cpu(eqe->data.pps.time_stamp)) :
1180 mlx5_timecounter_cyc2time(clock,
1181 be64_to_cpu(eqe->data.pps.time_stamp));
1182 if (clock->pps_info.enabled) {
1183 ptp_event.type = PTP_CLOCK_PPSUSR;
1184 ptp_event.pps_times.ts_real =
1185 ns_to_timespec64(ptp_event.timestamp);
1186 } else {
1187 ptp_event.type = PTP_CLOCK_EXTTS;
1188 }
1189 /* TODOL clock->ptp can be NULL if ptp_clock_register fails */
1190 ptp_clock_event(clock->ptp, &ptp_event);
1191 break;
1192 case PTP_PF_PEROUT:
1193 if (clock->shared) {
1194 mlx5_core_warn(mdev, " Received unexpected PPS out event\n");
1195 break;
1196 }
1197 ns = perout_conf_next_event_timer(mdev, clock);
1198 write_seqlock_irqsave(&clock->lock, flags);
1199 clock->pps_info.start[pin] = ns;
1200 write_sequnlock_irqrestore(&clock->lock, flags);
1201 schedule_work(&clock_state->out_work);
1202 break;
1203 default:
1204 mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
1205 clock->ptp_info.pin_config[pin].func);
1206 }
1207
1208 return NOTIFY_OK;
1209 }
1210
mlx5_timecounter_init(struct mlx5_core_dev * mdev)1211 static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
1212 {
1213 struct mlx5_clock *clock = mdev->clock;
1214 struct mlx5_timer *timer = &clock->timer;
1215 u32 dev_freq;
1216
1217 dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
1218 timer->cycles.read = read_internal_timer;
1219 timer->cycles.shift = mlx5_ptp_shift_constant(dev_freq);
1220 timer->cycles.mult = clocksource_khz2mult(dev_freq,
1221 timer->cycles.shift);
1222 timer->nominal_c_mult = timer->cycles.mult;
1223 timer->cycles.mask = CLOCKSOURCE_MASK(41);
1224
1225 timecounter_init(&timer->tc, &timer->cycles,
1226 ktime_to_ns(ktime_get_real()));
1227 }
1228
mlx5_init_overflow_period(struct mlx5_core_dev * mdev)1229 static void mlx5_init_overflow_period(struct mlx5_core_dev *mdev)
1230 {
1231 struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
1232 struct mlx5_clock *clock = mdev->clock;
1233 struct mlx5_timer *timer = &clock->timer;
1234 u64 overflow_cycles;
1235 u64 frac = 0;
1236 u64 ns;
1237
1238 /* Calculate period in seconds to call the overflow watchdog - to make
1239 * sure counter is checked at least twice every wrap around.
1240 * The period is calculated as the minimum between max HW cycles count
1241 * (The clock source mask) and max amount of cycles that can be
1242 * multiplied by clock multiplier where the result doesn't exceed
1243 * 64bits.
1244 */
1245 overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult);
1246 overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3));
1247
1248 ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles,
1249 frac, &frac);
1250 do_div(ns, NSEC_PER_SEC / HZ);
1251 timer->overflow_period = ns;
1252
1253 if (!timer->overflow_period) {
1254 timer->overflow_period = HZ;
1255 mlx5_core_warn(mdev,
1256 "invalid overflow period, overflow_work is scheduled once per second\n");
1257 }
1258
1259 if (clock_info)
1260 clock_info->overflow_period = timer->overflow_period;
1261 }
1262
mlx5_init_clock_info(struct mlx5_core_dev * mdev)1263 static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
1264 {
1265 struct mlx5_clock *clock = mdev->clock;
1266 struct mlx5_ib_clock_info *info;
1267 struct mlx5_timer *timer;
1268
1269 mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
1270 if (!mdev->clock_info) {
1271 mlx5_core_warn(mdev, "Failed to allocate IB clock info page\n");
1272 return;
1273 }
1274
1275 info = mdev->clock_info;
1276 timer = &clock->timer;
1277
1278 info->nsec = timer->tc.nsec;
1279 info->cycles = timer->tc.cycle_last;
1280 info->mask = timer->cycles.mask;
1281 info->mult = timer->nominal_c_mult;
1282 info->shift = timer->cycles.shift;
1283 info->frac = timer->tc.frac;
1284 }
1285
mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev * mdev)1286 static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev)
1287 {
1288 struct mlx5_clock *clock = mdev->clock;
1289 u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
1290 u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
1291 u8 log_max_freq_adjustment = 0;
1292 int err;
1293
1294 err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
1295 MLX5_REG_MTUTC, 0, 0);
1296 if (!err)
1297 log_max_freq_adjustment =
1298 MLX5_GET(mtutc_reg, out, log_max_freq_adjustment);
1299
1300 if (log_max_freq_adjustment)
1301 clock->ptp_info.max_adj =
1302 min(S32_MAX, 1 << log_max_freq_adjustment);
1303 }
1304
mlx5_init_timer_clock(struct mlx5_core_dev * mdev)1305 static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
1306 {
1307 struct mlx5_clock *clock = mdev->clock;
1308 bool expose_cycles;
1309
1310 /* Configure the PHC */
1311 clock->ptp_info = mlx5_ptp_clock_info;
1312
1313 if (MLX5_CAP_MCAM_REG(mdev, mtutc))
1314 mlx5_init_timer_max_freq_adjustment(mdev);
1315
1316 expose_cycles = !MLX5_CAP_GEN(mdev, disciplined_fr_counter) ||
1317 !mlx5_real_time_mode(mdev);
1318
1319 #ifdef CONFIG_X86
1320 if (MLX5_CAP_MCAM_REG3(mdev, mtptm) &&
1321 MLX5_CAP_MCAM_REG3(mdev, mtctr) && boot_cpu_has(X86_FEATURE_ART)) {
1322 clock->ptp_info.getcrosststamp = mlx5_ptp_getcrosststamp;
1323 if (expose_cycles)
1324 clock->ptp_info.getcrosscycles =
1325 mlx5_ptp_getcrosscycles;
1326 }
1327 #endif /* CONFIG_X86 */
1328
1329 if (expose_cycles)
1330 clock->ptp_info.getcyclesx64 = mlx5_ptp_getcyclesx;
1331
1332 mlx5_timecounter_init(mdev);
1333 mlx5_init_clock_info(mdev);
1334 mlx5_init_overflow_period(mdev);
1335
1336 if (mlx5_real_time_mode(mdev)) {
1337 struct timespec64 ts;
1338
1339 ktime_get_real_ts64(&ts);
1340 mlx5_clock_settime(mdev, clock, &ts);
1341 }
1342 }
1343
mlx5_init_pps(struct mlx5_core_dev * mdev)1344 static void mlx5_init_pps(struct mlx5_core_dev *mdev)
1345 {
1346 if (!MLX5_PPS_CAP(mdev))
1347 return;
1348
1349 mlx5_get_pps_caps(mdev);
1350 mlx5_init_pin_config(mdev);
1351 }
1352
mlx5_init_clock_dev(struct mlx5_core_dev * mdev)1353 static void mlx5_init_clock_dev(struct mlx5_core_dev *mdev)
1354 {
1355 struct mlx5_clock *clock = mdev->clock;
1356
1357 seqlock_init(&clock->lock);
1358
1359 /* Initialize the device clock */
1360 mlx5_init_timer_clock(mdev);
1361
1362 /* Initialize 1PPS data structures */
1363 mlx5_init_pps(mdev);
1364
1365 clock->ptp = ptp_clock_register(&clock->ptp_info,
1366 clock->shared ? NULL : &mdev->pdev->dev);
1367 if (IS_ERR(clock->ptp)) {
1368 mlx5_core_warn(mdev, "%sptp_clock_register failed %pe\n",
1369 clock->shared ? "shared clock " : "",
1370 clock->ptp);
1371 clock->ptp = NULL;
1372 }
1373
1374 if (clock->ptp)
1375 ptp_schedule_worker(clock->ptp, 0);
1376 }
1377
mlx5_destroy_clock_dev(struct mlx5_core_dev * mdev)1378 static void mlx5_destroy_clock_dev(struct mlx5_core_dev *mdev)
1379 {
1380 struct mlx5_clock *clock = mdev->clock;
1381
1382 if (clock->ptp) {
1383 ptp_clock_unregister(clock->ptp);
1384 clock->ptp = NULL;
1385 }
1386
1387 if (mdev->clock_info) {
1388 free_page((unsigned long)mdev->clock_info);
1389 mdev->clock_info = NULL;
1390 }
1391
1392 kfree(clock->ptp_info.pin_config);
1393 }
1394
mlx5_clock_free(struct mlx5_core_dev * mdev)1395 static void mlx5_clock_free(struct mlx5_core_dev *mdev)
1396 {
1397 struct mlx5_clock_priv *cpriv = clock_priv(mdev->clock);
1398
1399 mlx5_destroy_clock_dev(mdev);
1400 mutex_destroy(&cpriv->lock);
1401 kfree(cpriv);
1402 mdev->clock = NULL;
1403 }
1404
mlx5_clock_alloc(struct mlx5_core_dev * mdev,bool shared)1405 static int mlx5_clock_alloc(struct mlx5_core_dev *mdev, bool shared)
1406 {
1407 struct mlx5_clock_priv *cpriv;
1408 struct mlx5_clock *clock;
1409
1410 cpriv = kzalloc(sizeof(*cpriv), GFP_KERNEL);
1411 if (!cpriv)
1412 return -ENOMEM;
1413
1414 mutex_init(&cpriv->lock);
1415 cpriv->mdev = mdev;
1416 clock = &cpriv->clock;
1417 clock->shared = shared;
1418 mdev->clock = clock;
1419 mlx5_clock_lock(clock);
1420 mlx5_init_clock_dev(mdev);
1421 mlx5_clock_unlock(clock);
1422
1423 if (!clock->shared)
1424 return 0;
1425
1426 if (!clock->ptp) {
1427 mlx5_core_warn(mdev, "failed to create ptp dev shared by multiple functions");
1428 mlx5_clock_free(mdev);
1429 return -EINVAL;
1430 }
1431
1432 return 0;
1433 }
1434
mlx5_shared_clock_register(struct mlx5_core_dev * mdev,u64 key)1435 static void mlx5_shared_clock_register(struct mlx5_core_dev *mdev, u64 key)
1436 {
1437 struct mlx5_core_dev *peer_dev, *next = NULL;
1438 struct mlx5_devcom_match_attr attr = {
1439 .key.val = key,
1440 };
1441 struct mlx5_devcom_comp_dev *compd;
1442 struct mlx5_devcom_comp_dev *pos;
1443
1444 compd = mlx5_devcom_register_component(mdev->priv.devc,
1445 MLX5_DEVCOM_SHARED_CLOCK,
1446 &attr, NULL, mdev);
1447 if (!compd)
1448 return;
1449
1450 mdev->clock_state->compdev = compd;
1451
1452 mlx5_devcom_comp_lock(mdev->clock_state->compdev);
1453 mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
1454 if (peer_dev->clock) {
1455 next = peer_dev;
1456 break;
1457 }
1458 }
1459
1460 if (next) {
1461 mdev->clock = next->clock;
1462 /* clock info is shared among all the functions using the same clock */
1463 mdev->clock_info = next->clock_info;
1464 } else {
1465 mlx5_clock_alloc(mdev, true);
1466 }
1467 mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
1468
1469 if (!mdev->clock) {
1470 mlx5_devcom_unregister_component(mdev->clock_state->compdev);
1471 mdev->clock_state->compdev = NULL;
1472 }
1473 }
1474
mlx5_shared_clock_unregister(struct mlx5_core_dev * mdev)1475 static void mlx5_shared_clock_unregister(struct mlx5_core_dev *mdev)
1476 {
1477 struct mlx5_core_dev *peer_dev, *next = NULL;
1478 struct mlx5_clock *clock = mdev->clock;
1479 struct mlx5_devcom_comp_dev *pos;
1480
1481 mlx5_devcom_comp_lock(mdev->clock_state->compdev);
1482 mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
1483 if (peer_dev->clock && peer_dev != mdev) {
1484 next = peer_dev;
1485 break;
1486 }
1487 }
1488
1489 if (next) {
1490 struct mlx5_clock_priv *cpriv = clock_priv(clock);
1491
1492 mlx5_clock_lock(clock);
1493 if (mdev == cpriv->mdev)
1494 cpriv->mdev = next;
1495 mlx5_clock_unlock(clock);
1496 } else {
1497 mlx5_clock_free(mdev);
1498 }
1499
1500 mdev->clock = NULL;
1501 mdev->clock_info = NULL;
1502 mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
1503
1504 mlx5_devcom_unregister_component(mdev->clock_state->compdev);
1505 }
1506
mlx5_clock_arm_pps_in_event(struct mlx5_clock * clock,struct mlx5_core_dev * new_mdev,struct mlx5_core_dev * old_mdev)1507 static void mlx5_clock_arm_pps_in_event(struct mlx5_clock *clock,
1508 struct mlx5_core_dev *new_mdev,
1509 struct mlx5_core_dev *old_mdev)
1510 {
1511 struct ptp_clock_info *ptp_info = &clock->ptp_info;
1512 struct mlx5_clock_priv *cpriv = clock_priv(clock);
1513 int i;
1514
1515 for (i = 0; i < ptp_info->n_pins; i++) {
1516 if (ptp_info->pin_config[i].func != PTP_PF_EXTTS ||
1517 !clock->pps_info.pin_armed[i])
1518 continue;
1519
1520 if (new_mdev) {
1521 mlx5_set_mtppse(new_mdev, i, 0, MLX5_EVENT_MODE_REPETETIVE);
1522 cpriv->event_mdev = new_mdev;
1523 } else {
1524 cpriv->event_mdev = NULL;
1525 }
1526
1527 if (old_mdev)
1528 mlx5_set_mtppse(old_mdev, i, 0, MLX5_EVENT_MODE_DISABLE);
1529 }
1530 }
1531
mlx5_clock_load(struct mlx5_core_dev * mdev)1532 void mlx5_clock_load(struct mlx5_core_dev *mdev)
1533 {
1534 struct mlx5_clock *clock = mdev->clock;
1535 struct mlx5_clock_priv *cpriv;
1536
1537 if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
1538 return;
1539
1540 INIT_WORK(&mdev->clock_state->out_work, mlx5_pps_out);
1541 MLX5_NB_INIT(&mdev->clock_state->pps_nb, mlx5_pps_event, PPS_EVENT);
1542 mlx5_eq_notifier_register(mdev, &mdev->clock_state->pps_nb);
1543
1544 if (!clock->shared) {
1545 mlx5_clock_arm_pps_in_event(clock, mdev, NULL);
1546 return;
1547 }
1548
1549 cpriv = clock_priv(clock);
1550 mlx5_devcom_comp_lock(mdev->clock_state->compdev);
1551 mlx5_clock_lock(clock);
1552 if (mdev == cpriv->mdev && mdev != cpriv->event_mdev)
1553 mlx5_clock_arm_pps_in_event(clock, mdev, cpriv->event_mdev);
1554 mlx5_clock_unlock(clock);
1555 mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
1556 }
1557
mlx5_clock_unload(struct mlx5_core_dev * mdev)1558 void mlx5_clock_unload(struct mlx5_core_dev *mdev)
1559 {
1560 struct mlx5_core_dev *peer_dev, *next = NULL;
1561 struct mlx5_clock *clock = mdev->clock;
1562 struct mlx5_devcom_comp_dev *pos;
1563
1564 if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
1565 return;
1566
1567 if (!clock->shared) {
1568 mlx5_clock_arm_pps_in_event(clock, NULL, mdev);
1569 goto out;
1570 }
1571
1572 mlx5_devcom_comp_lock(mdev->clock_state->compdev);
1573 mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
1574 if (peer_dev->clock && peer_dev != mdev) {
1575 next = peer_dev;
1576 break;
1577 }
1578 }
1579
1580 mlx5_clock_lock(clock);
1581 if (mdev == clock_priv(clock)->event_mdev)
1582 mlx5_clock_arm_pps_in_event(clock, next, mdev);
1583 mlx5_clock_unlock(clock);
1584 mlx5_devcom_comp_unlock(mdev->clock_state->compdev);
1585
1586 out:
1587 mlx5_eq_notifier_unregister(mdev, &mdev->clock_state->pps_nb);
1588 cancel_work_sync(&mdev->clock_state->out_work);
1589 }
1590
1591 static struct mlx5_clock null_clock;
1592
mlx5_init_clock(struct mlx5_core_dev * mdev)1593 int mlx5_init_clock(struct mlx5_core_dev *mdev)
1594 {
1595 u8 identity[MLX5_RT_CLOCK_IDENTITY_SIZE];
1596 struct mlx5_clock_dev_state *clock_state;
1597 u64 key;
1598 int err;
1599
1600 if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
1601 mdev->clock = &null_clock;
1602 mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
1603 return 0;
1604 }
1605
1606 clock_state = kzalloc(sizeof(*clock_state), GFP_KERNEL);
1607 if (!clock_state)
1608 return -ENOMEM;
1609 clock_state->mdev = mdev;
1610 mdev->clock_state = clock_state;
1611
1612 if (MLX5_CAP_MCAM_REG3(mdev, mrtcq) && mlx5_real_time_mode(mdev)) {
1613 if (mlx5_clock_identity_get(mdev, identity)) {
1614 mlx5_core_warn(mdev, "failed to get rt clock identity, create ptp dev per function\n");
1615 } else {
1616 memcpy(&key, &identity, sizeof(key));
1617 mlx5_shared_clock_register(mdev, key);
1618 }
1619 }
1620
1621 if (!mdev->clock) {
1622 err = mlx5_clock_alloc(mdev, false);
1623 if (err) {
1624 kfree(clock_state);
1625 mdev->clock_state = NULL;
1626 return err;
1627 }
1628 }
1629
1630 return 0;
1631 }
1632
mlx5_cleanup_clock(struct mlx5_core_dev * mdev)1633 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
1634 {
1635 if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
1636 return;
1637
1638 if (mdev->clock->shared)
1639 mlx5_shared_clock_unregister(mdev);
1640 else
1641 mlx5_clock_free(mdev);
1642 kfree(mdev->clock_state);
1643 mdev->clock_state = NULL;
1644 }
1645