xref: /linux/drivers/net/ethernet/mellanox/mlx4/en_clock.c (revision 26fbb4c8c7c3ee9a4c3b4de555a8587b5a19154e)
1 /*
2  * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 
34 #include <linux/mlx4/device.h>
35 #include <linux/clocksource.h>
36 
37 #include "mlx4_en.h"
38 
39 /* mlx4_en_read_clock - read raw cycle counter (to be used by time counter)
40  */
41 static u64 mlx4_en_read_clock(const struct cyclecounter *tc)
42 {
43 	struct mlx4_en_dev *mdev =
44 		container_of(tc, struct mlx4_en_dev, cycles);
45 	struct mlx4_dev *dev = mdev->dev;
46 
47 	return mlx4_read_clock(dev) & tc->mask;
48 }
49 
50 u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe)
51 {
52 	u64 hi, lo;
53 	struct mlx4_ts_cqe *ts_cqe = (struct mlx4_ts_cqe *)cqe;
54 
55 	lo = (u64)be16_to_cpu(ts_cqe->timestamp_lo);
56 	hi = ((u64)be32_to_cpu(ts_cqe->timestamp_hi) + !lo) << 16;
57 
58 	return hi | lo;
59 }
60 
61 void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
62 			    struct skb_shared_hwtstamps *hwts,
63 			    u64 timestamp)
64 {
65 	unsigned int seq;
66 	u64 nsec;
67 
68 	do {
69 		seq = read_seqbegin(&mdev->clock_lock);
70 		nsec = timecounter_cyc2time(&mdev->clock, timestamp);
71 	} while (read_seqretry(&mdev->clock_lock, seq));
72 
73 	memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
74 	hwts->hwtstamp = ns_to_ktime(nsec);
75 }
76 
77 /**
78  * mlx4_en_remove_timestamp - disable PTP device
79  * @mdev: board private structure
80  *
81  * Stop the PTP support.
82  **/
83 void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
84 {
85 	if (mdev->ptp_clock) {
86 		ptp_clock_unregister(mdev->ptp_clock);
87 		mdev->ptp_clock = NULL;
88 		mlx4_info(mdev, "removed PHC\n");
89 	}
90 }
91 
92 #define MLX4_EN_WRAP_AROUND_SEC	10UL
93 /* By scheduling the overflow check every 5 seconds, we have a reasonably
94  * good chance we wont miss a wrap around.
95  * TOTO: Use a timer instead of a work queue to increase the guarantee.
96  */
97 #define MLX4_EN_OVERFLOW_PERIOD (MLX4_EN_WRAP_AROUND_SEC * HZ / 2)
98 
99 void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
100 {
101 	bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
102 					      MLX4_EN_OVERFLOW_PERIOD);
103 	unsigned long flags;
104 
105 	if (timeout) {
106 		write_seqlock_irqsave(&mdev->clock_lock, flags);
107 		timecounter_read(&mdev->clock);
108 		write_sequnlock_irqrestore(&mdev->clock_lock, flags);
109 		mdev->last_overflow_check = jiffies;
110 	}
111 }
112 
113 /**
114  * mlx4_en_phc_adjfreq - adjust the frequency of the hardware clock
115  * @ptp: ptp clock structure
116  * @delta: Desired frequency change in parts per billion
117  *
118  * Adjust the frequency of the PHC cycle counter by the indicated delta from
119  * the base frequency.
120  **/
121 static int mlx4_en_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
122 {
123 	u64 adj;
124 	u32 diff, mult;
125 	int neg_adj = 0;
126 	unsigned long flags;
127 	struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
128 						ptp_clock_info);
129 
130 	if (delta < 0) {
131 		neg_adj = 1;
132 		delta = -delta;
133 	}
134 	mult = mdev->nominal_c_mult;
135 	adj = mult;
136 	adj *= delta;
137 	diff = div_u64(adj, 1000000000ULL);
138 
139 	write_seqlock_irqsave(&mdev->clock_lock, flags);
140 	timecounter_read(&mdev->clock);
141 	mdev->cycles.mult = neg_adj ? mult - diff : mult + diff;
142 	write_sequnlock_irqrestore(&mdev->clock_lock, flags);
143 
144 	return 0;
145 }
146 
147 /**
148  * mlx4_en_phc_adjtime - Shift the time of the hardware clock
149  * @ptp: ptp clock structure
150  * @delta: Desired change in nanoseconds
151  *
152  * Adjust the timer by resetting the timecounter structure.
153  **/
154 static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
155 {
156 	struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
157 						ptp_clock_info);
158 	unsigned long flags;
159 
160 	write_seqlock_irqsave(&mdev->clock_lock, flags);
161 	timecounter_adjtime(&mdev->clock, delta);
162 	write_sequnlock_irqrestore(&mdev->clock_lock, flags);
163 
164 	return 0;
165 }
166 
167 /**
168  * mlx4_en_phc_gettime - Reads the current time from the hardware clock
169  * @ptp: ptp clock structure
170  * @ts: timespec structure to hold the current time value
171  *
172  * Read the timecounter and return the correct value in ns after converting
173  * it into a struct timespec.
174  **/
175 static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp,
176 			       struct timespec64 *ts)
177 {
178 	struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
179 						ptp_clock_info);
180 	unsigned long flags;
181 	u64 ns;
182 
183 	write_seqlock_irqsave(&mdev->clock_lock, flags);
184 	ns = timecounter_read(&mdev->clock);
185 	write_sequnlock_irqrestore(&mdev->clock_lock, flags);
186 
187 	*ts = ns_to_timespec64(ns);
188 
189 	return 0;
190 }
191 
192 /**
193  * mlx4_en_phc_settime - Set the current time on the hardware clock
194  * @ptp: ptp clock structure
195  * @ts: timespec containing the new time for the cycle counter
196  *
197  * Reset the timecounter to use a new base value instead of the kernel
198  * wall timer value.
199  **/
200 static int mlx4_en_phc_settime(struct ptp_clock_info *ptp,
201 			       const struct timespec64 *ts)
202 {
203 	struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
204 						ptp_clock_info);
205 	u64 ns = timespec64_to_ns(ts);
206 	unsigned long flags;
207 
208 	/* reset the timecounter */
209 	write_seqlock_irqsave(&mdev->clock_lock, flags);
210 	timecounter_init(&mdev->clock, &mdev->cycles, ns);
211 	write_sequnlock_irqrestore(&mdev->clock_lock, flags);
212 
213 	return 0;
214 }
215 
216 /**
217  * mlx4_en_phc_enable - enable or disable an ancillary feature
218  * @ptp: ptp clock structure
219  * @request: Desired resource to enable or disable
220  * @on: Caller passes one to enable or zero to disable
221  *
222  * Enable (or disable) ancillary features of the PHC subsystem.
223  * Currently, no ancillary features are supported.
224  **/
225 static int mlx4_en_phc_enable(struct ptp_clock_info __always_unused *ptp,
226 			      struct ptp_clock_request __always_unused *request,
227 			      int __always_unused on)
228 {
229 	return -EOPNOTSUPP;
230 }
231 
232 static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
233 	.owner		= THIS_MODULE,
234 	.max_adj	= 100000000,
235 	.n_alarm	= 0,
236 	.n_ext_ts	= 0,
237 	.n_per_out	= 0,
238 	.n_pins		= 0,
239 	.pps		= 0,
240 	.adjfreq	= mlx4_en_phc_adjfreq,
241 	.adjtime	= mlx4_en_phc_adjtime,
242 	.gettime64	= mlx4_en_phc_gettime,
243 	.settime64	= mlx4_en_phc_settime,
244 	.enable		= mlx4_en_phc_enable,
245 };
246 
247 
248 /* This function calculates the max shift that enables the user range
249  * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
250  */
251 static u32 freq_to_shift(u16 freq)
252 {
253 	u32 freq_khz = freq * 1000;
254 	u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
255 	u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1);
256 	/* calculate max possible multiplier in order to fit in 64bit */
257 	u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded);
258 
259 	/* This comes from the reverse of clocksource_khz2mult */
260 	return ilog2(div_u64(max_mul * freq_khz, 1000000));
261 }
262 
263 void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
264 {
265 	struct mlx4_dev *dev = mdev->dev;
266 	unsigned long flags;
267 
268 	/* mlx4_en_init_timestamp is called for each netdev.
269 	 * mdev->ptp_clock is common for all ports, skip initialization if
270 	 * was done for other port.
271 	 */
272 	if (mdev->ptp_clock)
273 		return;
274 
275 	seqlock_init(&mdev->clock_lock);
276 
277 	memset(&mdev->cycles, 0, sizeof(mdev->cycles));
278 	mdev->cycles.read = mlx4_en_read_clock;
279 	mdev->cycles.mask = CLOCKSOURCE_MASK(48);
280 	mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
281 	mdev->cycles.mult =
282 		clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
283 	mdev->nominal_c_mult = mdev->cycles.mult;
284 
285 	write_seqlock_irqsave(&mdev->clock_lock, flags);
286 	timecounter_init(&mdev->clock, &mdev->cycles,
287 			 ktime_to_ns(ktime_get_real()));
288 	write_sequnlock_irqrestore(&mdev->clock_lock, flags);
289 
290 	/* Configure the PHC */
291 	mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
292 	snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");
293 
294 	mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info,
295 					     &mdev->pdev->dev);
296 	if (IS_ERR(mdev->ptp_clock)) {
297 		mdev->ptp_clock = NULL;
298 		mlx4_err(mdev, "ptp_clock_register failed\n");
299 	} else if (mdev->ptp_clock) {
300 		mlx4_info(mdev, "registered PHC clock\n");
301 	}
302 
303 }
304