xref: /linux/drivers/net/ethernet/mellanox/mlx4/en_clock.c (revision 34dc1baba215b826e454b8d19e4f24adbeb7d00d)
1 /*
2  * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 
34 #include <linux/mlx4/device.h>
35 #include <linux/clocksource.h>
36 
37 #include "mlx4_en.h"
38 
39 /* mlx4_en_read_clock - read raw cycle counter (to be used by time counter)
40  */
41 static u64 mlx4_en_read_clock(const struct cyclecounter *tc)
42 {
43 	struct mlx4_en_dev *mdev =
44 		container_of(tc, struct mlx4_en_dev, cycles);
45 	struct mlx4_dev *dev = mdev->dev;
46 
47 	return mlx4_read_clock(dev) & tc->mask;
48 }
49 
50 u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe)
51 {
52 	u64 hi, lo;
53 	struct mlx4_ts_cqe *ts_cqe = (struct mlx4_ts_cqe *)cqe;
54 
55 	lo = (u64)be16_to_cpu(ts_cqe->timestamp_lo);
56 	hi = ((u64)be32_to_cpu(ts_cqe->timestamp_hi) + !lo) << 16;
57 
58 	return hi | lo;
59 }
60 
61 u64 mlx4_en_get_hwtstamp(struct mlx4_en_dev *mdev, u64 timestamp)
62 {
63 	unsigned int seq;
64 	u64 nsec;
65 
66 	do {
67 		seq = read_seqbegin(&mdev->clock_lock);
68 		nsec = timecounter_cyc2time(&mdev->clock, timestamp);
69 	} while (read_seqretry(&mdev->clock_lock, seq));
70 
71 	return ns_to_ktime(nsec);
72 }
73 
74 void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
75 			    struct skb_shared_hwtstamps *hwts,
76 			    u64 timestamp)
77 {
78 	memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
79 	hwts->hwtstamp = mlx4_en_get_hwtstamp(mdev, timestamp);
80 }
81 
82 /**
83  * mlx4_en_remove_timestamp - disable PTP device
84  * @mdev: board private structure
85  *
86  * Stop the PTP support.
87  **/
88 void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
89 {
90 	if (mdev->ptp_clock) {
91 		ptp_clock_unregister(mdev->ptp_clock);
92 		mdev->ptp_clock = NULL;
93 		mlx4_info(mdev, "removed PHC\n");
94 	}
95 }
96 
97 #define MLX4_EN_WRAP_AROUND_SEC	10UL
98 /* By scheduling the overflow check every 5 seconds, we have a reasonably
99  * good chance we wont miss a wrap around.
100  * TOTO: Use a timer instead of a work queue to increase the guarantee.
101  */
102 #define MLX4_EN_OVERFLOW_PERIOD (MLX4_EN_WRAP_AROUND_SEC * HZ / 2)
103 
104 void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
105 {
106 	bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
107 					      MLX4_EN_OVERFLOW_PERIOD);
108 	unsigned long flags;
109 
110 	if (timeout) {
111 		write_seqlock_irqsave(&mdev->clock_lock, flags);
112 		timecounter_read(&mdev->clock);
113 		write_sequnlock_irqrestore(&mdev->clock_lock, flags);
114 		mdev->last_overflow_check = jiffies;
115 	}
116 }
117 
118 /**
119  * mlx4_en_phc_adjfine - adjust the frequency of the hardware clock
120  * @ptp: ptp clock structure
121  * @scaled_ppm: Desired frequency change in scaled parts per million
122  *
123  * Adjust the frequency of the PHC cycle counter by the indicated scaled_ppm
124  * from the base frequency.
125  *
126  * Scaled parts per million is ppm with a 16-bit binary fractional field.
127  **/
128 static int mlx4_en_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
129 {
130 	u32 mult;
131 	unsigned long flags;
132 	struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
133 						ptp_clock_info);
134 
135 	mult = (u32)adjust_by_scaled_ppm(mdev->nominal_c_mult, scaled_ppm);
136 
137 	write_seqlock_irqsave(&mdev->clock_lock, flags);
138 	timecounter_read(&mdev->clock);
139 	mdev->cycles.mult = mult;
140 	write_sequnlock_irqrestore(&mdev->clock_lock, flags);
141 
142 	return 0;
143 }
144 
145 /**
146  * mlx4_en_phc_adjtime - Shift the time of the hardware clock
147  * @ptp: ptp clock structure
148  * @delta: Desired change in nanoseconds
149  *
150  * Adjust the timer by resetting the timecounter structure.
151  **/
152 static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
153 {
154 	struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
155 						ptp_clock_info);
156 	unsigned long flags;
157 
158 	write_seqlock_irqsave(&mdev->clock_lock, flags);
159 	timecounter_adjtime(&mdev->clock, delta);
160 	write_sequnlock_irqrestore(&mdev->clock_lock, flags);
161 
162 	return 0;
163 }
164 
165 /**
166  * mlx4_en_phc_gettime - Reads the current time from the hardware clock
167  * @ptp: ptp clock structure
168  * @ts: timespec structure to hold the current time value
169  *
170  * Read the timecounter and return the correct value in ns after converting
171  * it into a struct timespec.
172  **/
173 static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp,
174 			       struct timespec64 *ts)
175 {
176 	struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
177 						ptp_clock_info);
178 	unsigned long flags;
179 	u64 ns;
180 
181 	write_seqlock_irqsave(&mdev->clock_lock, flags);
182 	ns = timecounter_read(&mdev->clock);
183 	write_sequnlock_irqrestore(&mdev->clock_lock, flags);
184 
185 	*ts = ns_to_timespec64(ns);
186 
187 	return 0;
188 }
189 
190 /**
191  * mlx4_en_phc_settime - Set the current time on the hardware clock
192  * @ptp: ptp clock structure
193  * @ts: timespec containing the new time for the cycle counter
194  *
195  * Reset the timecounter to use a new base value instead of the kernel
196  * wall timer value.
197  **/
198 static int mlx4_en_phc_settime(struct ptp_clock_info *ptp,
199 			       const struct timespec64 *ts)
200 {
201 	struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
202 						ptp_clock_info);
203 	u64 ns = timespec64_to_ns(ts);
204 	unsigned long flags;
205 
206 	/* reset the timecounter */
207 	write_seqlock_irqsave(&mdev->clock_lock, flags);
208 	timecounter_init(&mdev->clock, &mdev->cycles, ns);
209 	write_sequnlock_irqrestore(&mdev->clock_lock, flags);
210 
211 	return 0;
212 }
213 
214 /**
215  * mlx4_en_phc_enable - enable or disable an ancillary feature
216  * @ptp: ptp clock structure
217  * @request: Desired resource to enable or disable
218  * @on: Caller passes one to enable or zero to disable
219  *
220  * Enable (or disable) ancillary features of the PHC subsystem.
221  * Currently, no ancillary features are supported.
222  **/
223 static int mlx4_en_phc_enable(struct ptp_clock_info __always_unused *ptp,
224 			      struct ptp_clock_request __always_unused *request,
225 			      int __always_unused on)
226 {
227 	return -EOPNOTSUPP;
228 }
229 
230 static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
231 	.owner		= THIS_MODULE,
232 	.max_adj	= 100000000,
233 	.n_alarm	= 0,
234 	.n_ext_ts	= 0,
235 	.n_per_out	= 0,
236 	.n_pins		= 0,
237 	.pps		= 0,
238 	.adjfine	= mlx4_en_phc_adjfine,
239 	.adjtime	= mlx4_en_phc_adjtime,
240 	.gettime64	= mlx4_en_phc_gettime,
241 	.settime64	= mlx4_en_phc_settime,
242 	.enable		= mlx4_en_phc_enable,
243 };
244 
245 
246 /* This function calculates the max shift that enables the user range
247  * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
248  */
249 static u32 freq_to_shift(u16 freq)
250 {
251 	u32 freq_khz = freq * 1000;
252 	u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
253 	u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1);
254 	/* calculate max possible multiplier in order to fit in 64bit */
255 	u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded);
256 
257 	/* This comes from the reverse of clocksource_khz2mult */
258 	return ilog2(div_u64(max_mul * freq_khz, 1000000));
259 }
260 
261 void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
262 {
263 	struct mlx4_dev *dev = mdev->dev;
264 	unsigned long flags;
265 
266 	/* mlx4_en_init_timestamp is called for each netdev.
267 	 * mdev->ptp_clock is common for all ports, skip initialization if
268 	 * was done for other port.
269 	 */
270 	if (mdev->ptp_clock)
271 		return;
272 
273 	seqlock_init(&mdev->clock_lock);
274 
275 	memset(&mdev->cycles, 0, sizeof(mdev->cycles));
276 	mdev->cycles.read = mlx4_en_read_clock;
277 	mdev->cycles.mask = CLOCKSOURCE_MASK(48);
278 	mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
279 	mdev->cycles.mult =
280 		clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
281 	mdev->nominal_c_mult = mdev->cycles.mult;
282 
283 	write_seqlock_irqsave(&mdev->clock_lock, flags);
284 	timecounter_init(&mdev->clock, &mdev->cycles,
285 			 ktime_to_ns(ktime_get_real()));
286 	write_sequnlock_irqrestore(&mdev->clock_lock, flags);
287 
288 	/* Configure the PHC */
289 	mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
290 	snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");
291 
292 	mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info,
293 					     &mdev->pdev->dev);
294 	if (IS_ERR(mdev->ptp_clock)) {
295 		mdev->ptp_clock = NULL;
296 		mlx4_err(mdev, "ptp_clock_register failed\n");
297 	} else if (mdev->ptp_clock) {
298 		mlx4_info(mdev, "registered PHC clock\n");
299 	}
300 
301 }
302