xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c (revision 4201c9260a8d3c4ef238e51692a7e9b4e1e29efe)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/ptp_clock_kernel.h>
5 #include <linux/clocksource.h>
6 #include <linux/timecounter.h>
7 #include <linux/spinlock.h>
8 #include <linux/device.h>
9 
10 #include "spectrum_ptp.h"
11 #include "core.h"
12 
13 #define MLXSW_SP1_PTP_CLOCK_CYCLES_SHIFT	29
14 #define MLXSW_SP1_PTP_CLOCK_FREQ_KHZ		156257 /* 6.4nSec */
15 #define MLXSW_SP1_PTP_CLOCK_MASK		64
16 
17 struct mlxsw_sp_ptp_clock {
18 	struct mlxsw_core *core;
19 	spinlock_t lock; /* protect this structure */
20 	struct cyclecounter cycles;
21 	struct timecounter tc;
22 	u32 nominal_c_mult;
23 	struct ptp_clock *ptp;
24 	struct ptp_clock_info ptp_info;
25 	unsigned long overflow_period;
26 	struct delayed_work overflow_work;
27 };
28 
29 static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp_ptp_clock *clock,
30 				    struct ptp_system_timestamp *sts)
31 {
32 	struct mlxsw_core *mlxsw_core = clock->core;
33 	u32 frc_h1, frc_h2, frc_l;
34 
35 	frc_h1 = mlxsw_core_read_frc_h(mlxsw_core);
36 	ptp_read_system_prets(sts);
37 	frc_l = mlxsw_core_read_frc_l(mlxsw_core);
38 	ptp_read_system_postts(sts);
39 	frc_h2 = mlxsw_core_read_frc_h(mlxsw_core);
40 
41 	if (frc_h1 != frc_h2) {
42 		/* wrap around */
43 		ptp_read_system_prets(sts);
44 		frc_l = mlxsw_core_read_frc_l(mlxsw_core);
45 		ptp_read_system_postts(sts);
46 	}
47 
48 	return (u64) frc_l | (u64) frc_h2 << 32;
49 }
50 
51 static u64 mlxsw_sp1_ptp_read_frc(const struct cyclecounter *cc)
52 {
53 	struct mlxsw_sp_ptp_clock *clock =
54 		container_of(cc, struct mlxsw_sp_ptp_clock, cycles);
55 
56 	return __mlxsw_sp1_ptp_read_frc(clock, NULL) & cc->mask;
57 }
58 
59 static int
60 mlxsw_sp1_ptp_phc_adjfreq(struct mlxsw_sp_ptp_clock *clock, int freq_adj)
61 {
62 	struct mlxsw_core *mlxsw_core = clock->core;
63 	char mtutc_pl[MLXSW_REG_MTUTC_LEN];
64 
65 	mlxsw_reg_mtutc_pack(mtutc_pl, MLXSW_REG_MTUTC_OPERATION_ADJUST_FREQ,
66 			     freq_adj, 0);
67 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
68 }
69 
70 static u64 mlxsw_sp1_ptp_ns2cycles(const struct timecounter *tc, u64 nsec)
71 {
72 	u64 cycles = (u64) nsec;
73 
74 	cycles <<= tc->cc->shift;
75 	cycles = div_u64(cycles, tc->cc->mult);
76 
77 	return cycles;
78 }
79 
80 static int
81 mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
82 {
83 	struct mlxsw_core *mlxsw_core = clock->core;
84 	char mtutc_pl[MLXSW_REG_MTUTC_LEN];
85 	char mtpps_pl[MLXSW_REG_MTPPS_LEN];
86 	u64 next_sec_in_nsec, cycles;
87 	u32 next_sec;
88 	int err;
89 
90 	next_sec = nsec / NSEC_PER_SEC + 1;
91 	next_sec_in_nsec = next_sec * NSEC_PER_SEC;
92 
93 	spin_lock(&clock->lock);
94 	cycles = mlxsw_sp1_ptp_ns2cycles(&clock->tc, next_sec_in_nsec);
95 	spin_unlock(&clock->lock);
96 
97 	mlxsw_reg_mtpps_vpin_pack(mtpps_pl, cycles);
98 	err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtpps), mtpps_pl);
99 	if (err)
100 		return err;
101 
102 	mlxsw_reg_mtutc_pack(mtutc_pl,
103 			     MLXSW_REG_MTUTC_OPERATION_SET_TIME_AT_NEXT_SEC,
104 			     0, next_sec);
105 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
106 }
107 
108 static int mlxsw_sp1_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
109 {
110 	struct mlxsw_sp_ptp_clock *clock =
111 		container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
112 	int neg_adj = 0;
113 	u32 diff;
114 	u64 adj;
115 	s32 ppb;
116 
117 	ppb = scaled_ppm_to_ppb(scaled_ppm);
118 
119 	if (ppb < 0) {
120 		neg_adj = 1;
121 		ppb = -ppb;
122 	}
123 
124 	adj = clock->nominal_c_mult;
125 	adj *= ppb;
126 	diff = div_u64(adj, NSEC_PER_SEC);
127 
128 	spin_lock(&clock->lock);
129 	timecounter_read(&clock->tc);
130 	clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
131 				       clock->nominal_c_mult + diff;
132 	spin_unlock(&clock->lock);
133 
134 	return mlxsw_sp1_ptp_phc_adjfreq(clock, neg_adj ? -ppb : ppb);
135 }
136 
137 static int mlxsw_sp1_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
138 {
139 	struct mlxsw_sp_ptp_clock *clock =
140 		container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
141 	u64 nsec;
142 
143 	spin_lock(&clock->lock);
144 	timecounter_adjtime(&clock->tc, delta);
145 	nsec = timecounter_read(&clock->tc);
146 	spin_unlock(&clock->lock);
147 
148 	return mlxsw_sp1_ptp_phc_settime(clock, nsec);
149 }
150 
151 static int mlxsw_sp1_ptp_gettimex(struct ptp_clock_info *ptp,
152 				  struct timespec64 *ts,
153 				  struct ptp_system_timestamp *sts)
154 {
155 	struct mlxsw_sp_ptp_clock *clock =
156 		container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
157 	u64 cycles, nsec;
158 
159 	spin_lock(&clock->lock);
160 	cycles = __mlxsw_sp1_ptp_read_frc(clock, sts);
161 	nsec = timecounter_cyc2time(&clock->tc, cycles);
162 	spin_unlock(&clock->lock);
163 
164 	*ts = ns_to_timespec64(nsec);
165 
166 	return 0;
167 }
168 
169 static int mlxsw_sp1_ptp_settime(struct ptp_clock_info *ptp,
170 				 const struct timespec64 *ts)
171 {
172 	struct mlxsw_sp_ptp_clock *clock =
173 		container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
174 	u64 nsec = timespec64_to_ns(ts);
175 
176 	spin_lock(&clock->lock);
177 	timecounter_init(&clock->tc, &clock->cycles, nsec);
178 	nsec = timecounter_read(&clock->tc);
179 	spin_unlock(&clock->lock);
180 
181 	return mlxsw_sp1_ptp_phc_settime(clock, nsec);
182 }
183 
184 static const struct ptp_clock_info mlxsw_sp1_ptp_clock_info = {
185 	.owner		= THIS_MODULE,
186 	.name		= "mlxsw_sp_clock",
187 	.max_adj	= 100000000,
188 	.adjfine	= mlxsw_sp1_ptp_adjfine,
189 	.adjtime	= mlxsw_sp1_ptp_adjtime,
190 	.gettimex64	= mlxsw_sp1_ptp_gettimex,
191 	.settime64	= mlxsw_sp1_ptp_settime,
192 };
193 
194 static void mlxsw_sp1_ptp_clock_overflow(struct work_struct *work)
195 {
196 	struct delayed_work *dwork = to_delayed_work(work);
197 	struct mlxsw_sp_ptp_clock *clock;
198 
199 	clock = container_of(dwork, struct mlxsw_sp_ptp_clock, overflow_work);
200 
201 	spin_lock(&clock->lock);
202 	timecounter_read(&clock->tc);
203 	spin_unlock(&clock->lock);
204 	mlxsw_core_schedule_dw(&clock->overflow_work, clock->overflow_period);
205 }
206 
207 struct mlxsw_sp_ptp_clock *
208 mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
209 {
210 	u64 overflow_cycles, nsec, frac = 0;
211 	struct mlxsw_sp_ptp_clock *clock;
212 	int err;
213 
214 	clock = kzalloc(sizeof(*clock), GFP_KERNEL);
215 	if (!clock)
216 		return ERR_PTR(-ENOMEM);
217 
218 	spin_lock_init(&clock->lock);
219 	clock->cycles.read = mlxsw_sp1_ptp_read_frc;
220 	clock->cycles.shift = MLXSW_SP1_PTP_CLOCK_CYCLES_SHIFT;
221 	clock->cycles.mult = clocksource_khz2mult(MLXSW_SP1_PTP_CLOCK_FREQ_KHZ,
222 						  clock->cycles.shift);
223 	clock->nominal_c_mult = clock->cycles.mult;
224 	clock->cycles.mask = CLOCKSOURCE_MASK(MLXSW_SP1_PTP_CLOCK_MASK);
225 	clock->core = mlxsw_sp->core;
226 
227 	timecounter_init(&clock->tc, &clock->cycles,
228 			 ktime_to_ns(ktime_get_real()));
229 
230 	/* Calculate period in seconds to call the overflow watchdog - to make
231 	 * sure counter is checked at least twice every wrap around.
232 	 * The period is calculated as the minimum between max HW cycles count
233 	 * (The clock source mask) and max amount of cycles that can be
234 	 * multiplied by clock multiplier where the result doesn't exceed
235 	 * 64bits.
236 	 */
237 	overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
238 	overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
239 
240 	nsec = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles, 0, &frac);
241 	clock->overflow_period = nsecs_to_jiffies(nsec);
242 
243 	INIT_DELAYED_WORK(&clock->overflow_work, mlxsw_sp1_ptp_clock_overflow);
244 	mlxsw_core_schedule_dw(&clock->overflow_work, 0);
245 
246 	clock->ptp_info = mlxsw_sp1_ptp_clock_info;
247 	clock->ptp = ptp_clock_register(&clock->ptp_info, dev);
248 	if (IS_ERR(clock->ptp)) {
249 		err = PTR_ERR(clock->ptp);
250 		dev_err(dev, "ptp_clock_register failed %d\n", err);
251 		goto err_ptp_clock_register;
252 	}
253 
254 	return clock;
255 
256 err_ptp_clock_register:
257 	cancel_delayed_work_sync(&clock->overflow_work);
258 	kfree(clock);
259 	return ERR_PTR(err);
260 }
261 
262 void mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock)
263 {
264 	ptp_clock_unregister(clock->ptp);
265 	cancel_delayed_work_sync(&clock->overflow_work);
266 	kfree(clock);
267 }
268