xref: /linux/drivers/net/ethernet/qlogic/qed/qed_ptp.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/types.h>
33 #include "qed.h"
34 #include "qed_dev_api.h"
35 #include "qed_hw.h"
36 #include "qed_l2.h"
37 #include "qed_mcp.h"
38 #include "qed_reg_addr.h"
39 
40 /* 16 nano second time quantas to wait before making a Drift adjustment */
41 #define QED_DRIFT_CNTR_TIME_QUANTA_SHIFT	0
42 /* Nano seconds to add/subtract when making a Drift adjustment */
43 #define QED_DRIFT_CNTR_ADJUSTMENT_SHIFT		28
44 /* Add/subtract the Adjustment_Value when making a Drift adjustment */
45 #define QED_DRIFT_CNTR_DIRECTION_SHIFT		31
46 #define QED_TIMESTAMP_MASK			BIT(16)
47 
48 static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn)
49 {
50 	switch (qed_device_get_port_id(p_hwfn->cdev)) {
51 	case 0:
52 		return QED_RESC_LOCK_PTP_PORT0;
53 	case 1:
54 		return QED_RESC_LOCK_PTP_PORT1;
55 	case 2:
56 		return QED_RESC_LOCK_PTP_PORT2;
57 	case 3:
58 		return QED_RESC_LOCK_PTP_PORT3;
59 	default:
60 		return QED_RESC_LOCK_RESC_INVALID;
61 	}
62 }
63 
64 static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
65 {
66 	struct qed_resc_lock_params params;
67 	enum qed_resc_lock resource;
68 	int rc;
69 
70 	resource = qed_ptcdev_to_resc(p_hwfn);
71 	if (resource == QED_RESC_LOCK_RESC_INVALID)
72 		return -EINVAL;
73 
74 	qed_mcp_resc_lock_default_init(&params, NULL, resource, true);
75 
76 	rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &params);
77 	if (rc && rc != -EINVAL) {
78 		return rc;
79 	} else if (rc == -EINVAL) {
80 		/* MFW doesn't support resource locking, first PF on the port
81 		 * has lock ownership.
82 		 */
83 		if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines)
84 			return 0;
85 
86 		DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
87 		return -EBUSY;
88 	} else if (!rc && !params.b_granted) {
89 		DP_INFO(p_hwfn, "Failed to acquire ptp resource lock\n");
90 		return -EBUSY;
91 	}
92 
93 	return rc;
94 }
95 
96 static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
97 {
98 	struct qed_resc_unlock_params params;
99 	enum qed_resc_lock resource;
100 	int rc;
101 
102 	resource = qed_ptcdev_to_resc(p_hwfn);
103 	if (resource == QED_RESC_LOCK_RESC_INVALID)
104 		return -EINVAL;
105 
106 	qed_mcp_resc_lock_default_init(NULL, &params, resource, true);
107 
108 	rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &params);
109 	if (rc == -EINVAL) {
110 		/* MFW doesn't support locking, first PF has lock ownership */
111 		if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines) {
112 			rc = 0;
113 		} else {
114 			DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
115 			return -EINVAL;
116 		}
117 	} else if (rc) {
118 		DP_INFO(p_hwfn, "Failed to release the ptp resource lock\n");
119 	}
120 
121 	return rc;
122 }
123 
124 /* Read Rx timestamp */
125 static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp)
126 {
127 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
128 	struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
129 	u32 val;
130 
131 	*timestamp = 0;
132 	val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID);
133 	if (!(val & QED_TIMESTAMP_MASK)) {
134 		DP_INFO(p_hwfn, "Invalid Rx timestamp, buf_seqid = %d\n", val);
135 		return -EINVAL;
136 	}
137 
138 	val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_LSB);
139 	*timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_MSB);
140 	*timestamp <<= 32;
141 	*timestamp |= val;
142 
143 	/* Reset timestamp register to allow new timestamp */
144 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
145 	       QED_TIMESTAMP_MASK);
146 
147 	return 0;
148 }
149 
150 /* Read Tx timestamp */
151 static int qed_ptp_hw_read_tx_ts(struct qed_dev *cdev, u64 *timestamp)
152 {
153 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
154 	struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
155 	u32 val;
156 
157 	*timestamp = 0;
158 	val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID);
159 	if (!(val & QED_TIMESTAMP_MASK)) {
160 		DP_INFO(p_hwfn, "Invalid Tx timestamp, buf_seqid = %d\n", val);
161 		return -EINVAL;
162 	}
163 
164 	val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_LSB);
165 	*timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_MSB);
166 	*timestamp <<= 32;
167 	*timestamp |= val;
168 
169 	/* Reset timestamp register to allow new timestamp */
170 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
171 
172 	return 0;
173 }
174 
175 /* Read Phy Hardware Clock */
176 static int qed_ptp_hw_read_cc(struct qed_dev *cdev, u64 *phc_cycles)
177 {
178 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
179 	struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
180 	u32 temp = 0;
181 
182 	temp = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_LSB);
183 	*phc_cycles = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_MSB);
184 	*phc_cycles <<= 32;
185 	*phc_cycles |= temp;
186 
187 	return 0;
188 }
189 
190 /* Filter PTP protocol packets that need to be timestamped */
191 static int qed_ptp_hw_cfg_filters(struct qed_dev *cdev,
192 				  enum qed_ptp_filter_type rx_type,
193 				  enum qed_ptp_hwtstamp_tx_type tx_type)
194 {
195 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
196 	struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
197 	u32 rule_mask, enable_cfg = 0x0;
198 
199 	switch (rx_type) {
200 	case QED_PTP_FILTER_NONE:
201 		enable_cfg = 0x0;
202 		rule_mask = 0x3FFF;
203 		break;
204 	case QED_PTP_FILTER_ALL:
205 		enable_cfg = 0x7;
206 		rule_mask = 0x3CAA;
207 		break;
208 	case QED_PTP_FILTER_V1_L4_EVENT:
209 		enable_cfg = 0x3;
210 		rule_mask = 0x3FFA;
211 		break;
212 	case QED_PTP_FILTER_V1_L4_GEN:
213 		enable_cfg = 0x3;
214 		rule_mask = 0x3FFE;
215 		break;
216 	case QED_PTP_FILTER_V2_L4_EVENT:
217 		enable_cfg = 0x5;
218 		rule_mask = 0x3FAA;
219 		break;
220 	case QED_PTP_FILTER_V2_L4_GEN:
221 		enable_cfg = 0x5;
222 		rule_mask = 0x3FEE;
223 		break;
224 	case QED_PTP_FILTER_V2_L2_EVENT:
225 		enable_cfg = 0x5;
226 		rule_mask = 0x3CFF;
227 		break;
228 	case QED_PTP_FILTER_V2_L2_GEN:
229 		enable_cfg = 0x5;
230 		rule_mask = 0x3EFF;
231 		break;
232 	case QED_PTP_FILTER_V2_EVENT:
233 		enable_cfg = 0x5;
234 		rule_mask = 0x3CAA;
235 		break;
236 	case QED_PTP_FILTER_V2_GEN:
237 		enable_cfg = 0x5;
238 		rule_mask = 0x3EEE;
239 		break;
240 	default:
241 		DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", rx_type);
242 		return -EINVAL;
243 	}
244 
245 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0);
246 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask);
247 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, enable_cfg);
248 
249 	if (tx_type == QED_PTP_HWTSTAMP_TX_OFF) {
250 		qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
251 		qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
252 		qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
253 	} else {
254 		qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, enable_cfg);
255 		qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0);
256 		qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, rule_mask);
257 	}
258 
259 	/* Reset possibly old timestamps */
260 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
261 	       QED_TIMESTAMP_MASK);
262 
263 	return 0;
264 }
265 
266 /* Adjust the HW clock by a rate given in parts-per-billion (ppb) units.
267  * FW/HW accepts the adjustment value in terms of 3 parameters:
268  *   Drift period - adjustment happens once in certain number of nano seconds.
269  *   Drift value - time is adjusted by a certain value, for example by 5 ns.
270  *   Drift direction - add or subtract the adjustment value.
271  * The routine translates ppb into the adjustment triplet in an optimal manner.
272  */
273 static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb)
274 {
275 	s64 best_val = 0, val, best_period = 0, period, approx_dev, dif, dif2;
276 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
277 	struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
278 	u32 drift_ctr_cfg = 0, drift_state;
279 	int drift_dir = 1;
280 
281 	if (ppb < 0) {
282 		ppb = -ppb;
283 		drift_dir = 0;
284 	}
285 
286 	if (ppb > 1) {
287 		s64 best_dif = ppb, best_approx_dev = 1;
288 
289 		/* Adjustment value is up to +/-7ns, find an optimal value in
290 		 * this range.
291 		 */
292 		for (val = 7; val > 0; val--) {
293 			period = div_s64(val * 1000000000, ppb);
294 			period -= 8;
295 			period >>= 4;
296 			if (period < 1)
297 				period = 1;
298 			if (period > 0xFFFFFFE)
299 				period = 0xFFFFFFE;
300 
301 			/* Check both rounding ends for approximate error */
302 			approx_dev = period * 16 + 8;
303 			dif = ppb * approx_dev - val * 1000000000;
304 			dif2 = dif + 16 * ppb;
305 
306 			if (dif < 0)
307 				dif = -dif;
308 			if (dif2 < 0)
309 				dif2 = -dif2;
310 
311 			/* Determine which end gives better approximation */
312 			if (dif * (approx_dev + 16) > dif2 * approx_dev) {
313 				period++;
314 				approx_dev += 16;
315 				dif = dif2;
316 			}
317 
318 			/* Track best approximation found so far */
319 			if (best_dif * approx_dev > dif * best_approx_dev) {
320 				best_dif = dif;
321 				best_val = val;
322 				best_period = period;
323 				best_approx_dev = approx_dev;
324 			}
325 		}
326 	} else if (ppb == 1) {
327 		/* This is a special case as its the only value which wouldn't
328 		 * fit in a s64 variable. In order to prevent castings simple
329 		 * handle it seperately.
330 		 */
331 		best_val = 4;
332 		best_period = 0xee6b27f;
333 	} else {
334 		best_val = 0;
335 		best_period = 0xFFFFFFF;
336 	}
337 
338 	drift_ctr_cfg = (best_period << QED_DRIFT_CNTR_TIME_QUANTA_SHIFT) |
339 			(((int)best_val) << QED_DRIFT_CNTR_ADJUSTMENT_SHIFT) |
340 			(((int)drift_dir) << QED_DRIFT_CNTR_DIRECTION_SHIFT);
341 
342 	qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x1);
343 
344 	drift_state = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR);
345 	if (drift_state & 1) {
346 		qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF,
347 		       drift_ctr_cfg);
348 	} else {
349 		DP_INFO(p_hwfn, "Drift counter is not reset\n");
350 		return -EINVAL;
351 	}
352 
353 	qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
354 
355 	return 0;
356 }
357 
358 static int qed_ptp_hw_enable(struct qed_dev *cdev)
359 {
360 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
361 	struct qed_ptt *p_ptt;
362 	int rc;
363 
364 	p_ptt = qed_ptt_acquire(p_hwfn);
365 	if (!p_ptt) {
366 		DP_NOTICE(p_hwfn, "Failed to acquire PTT for PTP\n");
367 		return -EBUSY;
368 	}
369 
370 	p_hwfn->p_ptp_ptt = p_ptt;
371 
372 	rc = qed_ptp_res_lock(p_hwfn, p_ptt);
373 	if (rc) {
374 		DP_INFO(p_hwfn,
375 			"Couldn't acquire the resource lock, skip ptp enable for this PF\n");
376 		qed_ptt_release(p_hwfn, p_ptt);
377 		p_hwfn->p_ptp_ptt = NULL;
378 		return rc;
379 	}
380 
381 	/* Reset PTP event detection rules - will be configured in the IOCTL */
382 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
383 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
384 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
385 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
386 
387 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 7);
388 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 7);
389 
390 	qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1);
391 
392 	/* Pause free running counter */
393 	if (QED_IS_BB_B0(p_hwfn->cdev))
394 		qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
395 	if (QED_IS_AH(p_hwfn->cdev))
396 		qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 2);
397 
398 	qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0);
399 	qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0);
400 	/* Resume free running counter */
401 	if (QED_IS_BB_B0(p_hwfn->cdev))
402 		qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
403 	if (QED_IS_AH(p_hwfn->cdev)) {
404 		qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 4);
405 		qed_wr(p_hwfn, p_ptt, NIG_REG_PTP_LATCH_OSTS_PKT_TIME, 1);
406 	}
407 
408 	/* Disable drift register */
409 	qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0);
410 	qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
411 
412 	/* Reset possibly old timestamps */
413 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
414 	       QED_TIMESTAMP_MASK);
415 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
416 
417 	return 0;
418 }
419 
420 static int qed_ptp_hw_disable(struct qed_dev *cdev)
421 {
422 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
423 	struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
424 
425 	qed_ptp_res_unlock(p_hwfn, p_ptt);
426 
427 	/* Reset PTP event detection rules */
428 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
429 	qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
430 
431 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
432 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
433 
434 	/* Disable the PTP feature */
435 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0);
436 	qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
437 
438 	qed_ptt_release(p_hwfn, p_ptt);
439 	p_hwfn->p_ptp_ptt = NULL;
440 
441 	return 0;
442 }
443 
444 const struct qed_eth_ptp_ops qed_ptp_ops_pass = {
445 	.cfg_filters = qed_ptp_hw_cfg_filters,
446 	.read_rx_ts = qed_ptp_hw_read_rx_ts,
447 	.read_tx_ts = qed_ptp_hw_read_tx_ts,
448 	.read_cc = qed_ptp_hw_read_cc,
449 	.adjfreq = qed_ptp_hw_adjfreq,
450 	.disable = qed_ptp_hw_disable,
451 	.enable = qed_ptp_hw_enable,
452 };
453