xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c (revision f6d08d9d8543c8ee494b307804b28e2750ffedb9)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/ptp_clock_kernel.h>
5 #include <linux/clocksource.h>
6 #include <linux/timecounter.h>
7 #include <linux/spinlock.h>
8 #include <linux/device.h>
9 
10 #include "spectrum_ptp.h"
11 #include "core.h"
12 
13 #define MLXSW_SP1_PTP_CLOCK_CYCLES_SHIFT	29
14 #define MLXSW_SP1_PTP_CLOCK_FREQ_KHZ		156257 /* 6.4nSec */
15 #define MLXSW_SP1_PTP_CLOCK_MASK		64
16 
17 struct mlxsw_sp_ptp_clock {
18 	struct mlxsw_core *core;
19 	spinlock_t lock; /* protect this structure */
20 	struct cyclecounter cycles;
21 	struct timecounter tc;
22 	u32 nominal_c_mult;
23 	struct ptp_clock *ptp;
24 	struct ptp_clock_info ptp_info;
25 	unsigned long overflow_period;
26 	struct delayed_work overflow_work;
27 };
28 
29 static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp_ptp_clock *clock,
30 				    struct ptp_system_timestamp *sts)
31 {
32 	struct mlxsw_core *mlxsw_core = clock->core;
33 	u32 frc_h1, frc_h2, frc_l;
34 
35 	frc_h1 = mlxsw_core_read_frc_h(mlxsw_core);
36 	ptp_read_system_prets(sts);
37 	frc_l = mlxsw_core_read_frc_l(mlxsw_core);
38 	ptp_read_system_postts(sts);
39 	frc_h2 = mlxsw_core_read_frc_h(mlxsw_core);
40 
41 	if (frc_h1 != frc_h2) {
42 		/* wrap around */
43 		ptp_read_system_prets(sts);
44 		frc_l = mlxsw_core_read_frc_l(mlxsw_core);
45 		ptp_read_system_postts(sts);
46 	}
47 
48 	return (u64) frc_l | (u64) frc_h2 << 32;
49 }
50 
51 static u64 mlxsw_sp1_ptp_read_frc(const struct cyclecounter *cc)
52 {
53 	struct mlxsw_sp_ptp_clock *clock =
54 		container_of(cc, struct mlxsw_sp_ptp_clock, cycles);
55 
56 	return __mlxsw_sp1_ptp_read_frc(clock, NULL) & cc->mask;
57 }
58 
59 static int
60 mlxsw_sp1_ptp_phc_adjfreq(struct mlxsw_sp_ptp_clock *clock, int freq_adj)
61 {
62 	struct mlxsw_core *mlxsw_core = clock->core;
63 	char mtutc_pl[MLXSW_REG_MTUTC_LEN];
64 
65 	mlxsw_reg_mtutc_pack(mtutc_pl, MLXSW_REG_MTUTC_OPERATION_ADJUST_FREQ,
66 			     freq_adj, 0);
67 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
68 }
69 
70 static u64 mlxsw_sp1_ptp_ns2cycles(const struct timecounter *tc, u64 nsec)
71 {
72 	u64 cycles = (u64) nsec;
73 
74 	cycles <<= tc->cc->shift;
75 	cycles = div_u64(cycles, tc->cc->mult);
76 
77 	return cycles;
78 }
79 
80 static int
81 mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
82 {
83 	struct mlxsw_core *mlxsw_core = clock->core;
84 	u64 next_sec, next_sec_in_nsec, cycles;
85 	char mtutc_pl[MLXSW_REG_MTUTC_LEN];
86 	char mtpps_pl[MLXSW_REG_MTPPS_LEN];
87 	int err;
88 
89 	next_sec = div_u64(nsec, NSEC_PER_SEC) + 1;
90 	next_sec_in_nsec = next_sec * NSEC_PER_SEC;
91 
92 	spin_lock(&clock->lock);
93 	cycles = mlxsw_sp1_ptp_ns2cycles(&clock->tc, next_sec_in_nsec);
94 	spin_unlock(&clock->lock);
95 
96 	mlxsw_reg_mtpps_vpin_pack(mtpps_pl, cycles);
97 	err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtpps), mtpps_pl);
98 	if (err)
99 		return err;
100 
101 	mlxsw_reg_mtutc_pack(mtutc_pl,
102 			     MLXSW_REG_MTUTC_OPERATION_SET_TIME_AT_NEXT_SEC,
103 			     0, next_sec);
104 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
105 }
106 
107 static int mlxsw_sp1_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
108 {
109 	struct mlxsw_sp_ptp_clock *clock =
110 		container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
111 	int neg_adj = 0;
112 	u32 diff;
113 	u64 adj;
114 	s32 ppb;
115 
116 	ppb = scaled_ppm_to_ppb(scaled_ppm);
117 
118 	if (ppb < 0) {
119 		neg_adj = 1;
120 		ppb = -ppb;
121 	}
122 
123 	adj = clock->nominal_c_mult;
124 	adj *= ppb;
125 	diff = div_u64(adj, NSEC_PER_SEC);
126 
127 	spin_lock(&clock->lock);
128 	timecounter_read(&clock->tc);
129 	clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
130 				       clock->nominal_c_mult + diff;
131 	spin_unlock(&clock->lock);
132 
133 	return mlxsw_sp1_ptp_phc_adjfreq(clock, neg_adj ? -ppb : ppb);
134 }
135 
136 static int mlxsw_sp1_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
137 {
138 	struct mlxsw_sp_ptp_clock *clock =
139 		container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
140 	u64 nsec;
141 
142 	spin_lock(&clock->lock);
143 	timecounter_adjtime(&clock->tc, delta);
144 	nsec = timecounter_read(&clock->tc);
145 	spin_unlock(&clock->lock);
146 
147 	return mlxsw_sp1_ptp_phc_settime(clock, nsec);
148 }
149 
150 static int mlxsw_sp1_ptp_gettimex(struct ptp_clock_info *ptp,
151 				  struct timespec64 *ts,
152 				  struct ptp_system_timestamp *sts)
153 {
154 	struct mlxsw_sp_ptp_clock *clock =
155 		container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
156 	u64 cycles, nsec;
157 
158 	spin_lock(&clock->lock);
159 	cycles = __mlxsw_sp1_ptp_read_frc(clock, sts);
160 	nsec = timecounter_cyc2time(&clock->tc, cycles);
161 	spin_unlock(&clock->lock);
162 
163 	*ts = ns_to_timespec64(nsec);
164 
165 	return 0;
166 }
167 
168 static int mlxsw_sp1_ptp_settime(struct ptp_clock_info *ptp,
169 				 const struct timespec64 *ts)
170 {
171 	struct mlxsw_sp_ptp_clock *clock =
172 		container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
173 	u64 nsec = timespec64_to_ns(ts);
174 
175 	spin_lock(&clock->lock);
176 	timecounter_init(&clock->tc, &clock->cycles, nsec);
177 	nsec = timecounter_read(&clock->tc);
178 	spin_unlock(&clock->lock);
179 
180 	return mlxsw_sp1_ptp_phc_settime(clock, nsec);
181 }
182 
183 static const struct ptp_clock_info mlxsw_sp1_ptp_clock_info = {
184 	.owner		= THIS_MODULE,
185 	.name		= "mlxsw_sp_clock",
186 	.max_adj	= 100000000,
187 	.adjfine	= mlxsw_sp1_ptp_adjfine,
188 	.adjtime	= mlxsw_sp1_ptp_adjtime,
189 	.gettimex64	= mlxsw_sp1_ptp_gettimex,
190 	.settime64	= mlxsw_sp1_ptp_settime,
191 };
192 
193 static void mlxsw_sp1_ptp_clock_overflow(struct work_struct *work)
194 {
195 	struct delayed_work *dwork = to_delayed_work(work);
196 	struct mlxsw_sp_ptp_clock *clock;
197 
198 	clock = container_of(dwork, struct mlxsw_sp_ptp_clock, overflow_work);
199 
200 	spin_lock(&clock->lock);
201 	timecounter_read(&clock->tc);
202 	spin_unlock(&clock->lock);
203 	mlxsw_core_schedule_dw(&clock->overflow_work, clock->overflow_period);
204 }
205 
206 struct mlxsw_sp_ptp_clock *
207 mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
208 {
209 	u64 overflow_cycles, nsec, frac = 0;
210 	struct mlxsw_sp_ptp_clock *clock;
211 	int err;
212 
213 	clock = kzalloc(sizeof(*clock), GFP_KERNEL);
214 	if (!clock)
215 		return ERR_PTR(-ENOMEM);
216 
217 	spin_lock_init(&clock->lock);
218 	clock->cycles.read = mlxsw_sp1_ptp_read_frc;
219 	clock->cycles.shift = MLXSW_SP1_PTP_CLOCK_CYCLES_SHIFT;
220 	clock->cycles.mult = clocksource_khz2mult(MLXSW_SP1_PTP_CLOCK_FREQ_KHZ,
221 						  clock->cycles.shift);
222 	clock->nominal_c_mult = clock->cycles.mult;
223 	clock->cycles.mask = CLOCKSOURCE_MASK(MLXSW_SP1_PTP_CLOCK_MASK);
224 	clock->core = mlxsw_sp->core;
225 
226 	timecounter_init(&clock->tc, &clock->cycles,
227 			 ktime_to_ns(ktime_get_real()));
228 
229 	/* Calculate period in seconds to call the overflow watchdog - to make
230 	 * sure counter is checked at least twice every wrap around.
231 	 * The period is calculated as the minimum between max HW cycles count
232 	 * (The clock source mask) and max amount of cycles that can be
233 	 * multiplied by clock multiplier where the result doesn't exceed
234 	 * 64bits.
235 	 */
236 	overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
237 	overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
238 
239 	nsec = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles, 0, &frac);
240 	clock->overflow_period = nsecs_to_jiffies(nsec);
241 
242 	INIT_DELAYED_WORK(&clock->overflow_work, mlxsw_sp1_ptp_clock_overflow);
243 	mlxsw_core_schedule_dw(&clock->overflow_work, 0);
244 
245 	clock->ptp_info = mlxsw_sp1_ptp_clock_info;
246 	clock->ptp = ptp_clock_register(&clock->ptp_info, dev);
247 	if (IS_ERR(clock->ptp)) {
248 		err = PTR_ERR(clock->ptp);
249 		dev_err(dev, "ptp_clock_register failed %d\n", err);
250 		goto err_ptp_clock_register;
251 	}
252 
253 	return clock;
254 
255 err_ptp_clock_register:
256 	cancel_delayed_work_sync(&clock->overflow_work);
257 	kfree(clock);
258 	return ERR_PTR(err);
259 }
260 
261 void mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock)
262 {
263 	ptp_clock_unregister(clock->ptp);
264 	cancel_delayed_work_sync(&clock->overflow_work);
265 	kfree(clock);
266 }
267