xref: /linux/drivers/net/ethernet/meta/fbnic/fbnic_time.c (revision fcc79e1714e8c2b8e216dc3149812edd37884eef)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3 
4 #include <linux/bitfield.h>
5 #include <linux/jiffies.h>
6 #include <linux/limits.h>
7 #include <linux/ptp_clock_kernel.h>
8 #include <linux/timer.h>
9 
10 #include "fbnic.h"
11 #include "fbnic_csr.h"
12 #include "fbnic_netdev.h"
13 
14 /* FBNIC timing & PTP implementation
15  * Datapath uses truncated 40b timestamps for scheduling and event reporting.
16  * We need to promote those to full 64b, hence we periodically cache the top
17  * 32bit of the HW time counter. Since this makes our time reporting non-atomic
18  * we leave the HW clock free running and adjust time offsets in SW as needed.
19  * Time offset is 64bit - we need a seq counter for 32bit machines.
20  * Time offset and the cache of top bits are independent so we don't need
21  * a coherent snapshot of both - READ_ONCE()/WRITE_ONCE() + writer side lock
22  * are enough.
23  */
24 
25 /* Period of refresh of top bits of timestamp, give ourselves a 8x margin.
26  * This should translate to once a minute.
27  * The use of nsecs_to_jiffies() should be safe for a <=40b nsec value.
28  */
29 #define FBNIC_TS_HIGH_REFRESH_JIF	nsecs_to_jiffies((1ULL << 40) / 16)
30 
31 static struct fbnic_dev *fbnic_from_ptp_info(struct ptp_clock_info *ptp)
32 {
33 	return container_of(ptp, struct fbnic_dev, ptp_info);
34 }
35 
36 /* This function is "slow" because we could try guessing which high part
37  * is correct based on low instead of re-reading, and skip reading @hi
38  * twice altogether if @lo is far enough from 0.
39  */
40 static u64 __fbnic_time_get_slow(struct fbnic_dev *fbd)
41 {
42 	u32 hi, lo;
43 
44 	lockdep_assert_held(&fbd->time_lock);
45 
46 	do {
47 		hi = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI);
48 		lo = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_LO);
49 	} while (hi != fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI));
50 
51 	return (u64)hi << 32 | lo;
52 }
53 
54 static void __fbnic_time_set_addend(struct fbnic_dev *fbd, u64 addend)
55 {
56 	lockdep_assert_held(&fbd->time_lock);
57 
58 	fbnic_wr32(fbd, FBNIC_PTP_ADD_VAL_NS,
59 		   FIELD_PREP(FBNIC_PTP_ADD_VAL_NS_MASK, addend >> 32));
60 	fbnic_wr32(fbd, FBNIC_PTP_ADD_VAL_SUBNS, (u32)addend);
61 }
62 
63 static void fbnic_ptp_fresh_check(struct fbnic_dev *fbd)
64 {
65 	if (time_is_after_jiffies(fbd->last_read +
66 				  FBNIC_TS_HIGH_REFRESH_JIF * 3 / 2))
67 		return;
68 
69 	dev_warn(fbd->dev, "NIC timestamp refresh stall, delayed by %lu sec\n",
70 		 (jiffies - fbd->last_read - FBNIC_TS_HIGH_REFRESH_JIF) / HZ);
71 }
72 
73 static void fbnic_ptp_refresh_time(struct fbnic_dev *fbd, struct fbnic_net *fbn)
74 {
75 	unsigned long flags;
76 	u32 hi;
77 
78 	spin_lock_irqsave(&fbd->time_lock, flags);
79 	hi = fbnic_rd32(fbn->fbd, FBNIC_PTP_CTR_VAL_HI);
80 	if (!fbnic_present(fbd))
81 		goto out; /* Don't bother handling, reset is pending */
82 	/* Let's keep high cached value a bit lower to avoid race with
83 	 * incoming timestamps. The logic in fbnic_ts40_to_ns() will
84 	 * take care of overflow in this case. It will make cached time
85 	 * ~1 minute lower and incoming timestamp will always be later
86 	 * then cached time.
87 	 */
88 	WRITE_ONCE(fbn->time_high, hi - 16);
89 	fbd->last_read = jiffies;
90  out:
91 	spin_unlock_irqrestore(&fbd->time_lock, flags);
92 }
93 
94 static long fbnic_ptp_do_aux_work(struct ptp_clock_info *ptp)
95 {
96 	struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
97 	struct fbnic_net *fbn;
98 
99 	fbn = netdev_priv(fbd->netdev);
100 
101 	fbnic_ptp_fresh_check(fbd);
102 	fbnic_ptp_refresh_time(fbd, fbn);
103 
104 	return FBNIC_TS_HIGH_REFRESH_JIF;
105 }
106 
107 static int fbnic_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
108 {
109 	struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
110 	u64 addend, dclk_period;
111 	unsigned long flags;
112 
113 	/* d_clock is 600 MHz; which in Q16.32 fixed point ns is: */
114 	dclk_period = (((u64)1000000000) << 32) / FBNIC_CLOCK_FREQ;
115 	addend = adjust_by_scaled_ppm(dclk_period, scaled_ppm);
116 
117 	spin_lock_irqsave(&fbd->time_lock, flags);
118 	__fbnic_time_set_addend(fbd, addend);
119 	fbnic_wr32(fbd, FBNIC_PTP_ADJUST, FBNIC_PTP_ADJUST_ADDEND_SET);
120 
121 	/* Flush, make sure FBNIC_PTP_ADD_VAL_* is stable for at least 4 clks */
122 	fbnic_rd32(fbd, FBNIC_PTP_SPARE);
123 	spin_unlock_irqrestore(&fbd->time_lock, flags);
124 
125 	return fbnic_present(fbd) ? 0 : -EIO;
126 }
127 
128 static int fbnic_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
129 {
130 	struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
131 	struct fbnic_net *fbn;
132 	unsigned long flags;
133 
134 	fbn = netdev_priv(fbd->netdev);
135 
136 	spin_lock_irqsave(&fbd->time_lock, flags);
137 	u64_stats_update_begin(&fbn->time_seq);
138 	WRITE_ONCE(fbn->time_offset, READ_ONCE(fbn->time_offset) + delta);
139 	u64_stats_update_end(&fbn->time_seq);
140 	spin_unlock_irqrestore(&fbd->time_lock, flags);
141 
142 	return 0;
143 }
144 
145 static int
146 fbnic_ptp_gettimex64(struct ptp_clock_info *ptp, struct timespec64 *ts,
147 		     struct ptp_system_timestamp *sts)
148 {
149 	struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
150 	struct fbnic_net *fbn;
151 	unsigned long flags;
152 	u64 time_ns;
153 	u32 hi, lo;
154 
155 	fbn = netdev_priv(fbd->netdev);
156 
157 	spin_lock_irqsave(&fbd->time_lock, flags);
158 
159 	do {
160 		hi = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI);
161 		ptp_read_system_prets(sts);
162 		lo = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_LO);
163 		ptp_read_system_postts(sts);
164 		/* Similarly to comment above __fbnic_time_get_slow()
165 		 * - this can be optimized if needed.
166 		 */
167 	} while (hi != fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI));
168 
169 	time_ns = ((u64)hi << 32 | lo) + fbn->time_offset;
170 	spin_unlock_irqrestore(&fbd->time_lock, flags);
171 
172 	if (!fbnic_present(fbd))
173 		return -EIO;
174 
175 	*ts = ns_to_timespec64(time_ns);
176 
177 	return 0;
178 }
179 
180 static int
181 fbnic_ptp_settime64(struct ptp_clock_info *ptp, const struct timespec64 *ts)
182 {
183 	struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
184 	struct fbnic_net *fbn;
185 	unsigned long flags;
186 	u64 dev_ns, host_ns;
187 	int ret;
188 
189 	fbn = netdev_priv(fbd->netdev);
190 
191 	host_ns = timespec64_to_ns(ts);
192 
193 	spin_lock_irqsave(&fbd->time_lock, flags);
194 
195 	dev_ns = __fbnic_time_get_slow(fbd);
196 
197 	if (fbnic_present(fbd)) {
198 		u64_stats_update_begin(&fbn->time_seq);
199 		WRITE_ONCE(fbn->time_offset, host_ns - dev_ns);
200 		u64_stats_update_end(&fbn->time_seq);
201 		ret = 0;
202 	} else {
203 		ret = -EIO;
204 	}
205 	spin_unlock_irqrestore(&fbd->time_lock, flags);
206 
207 	return ret;
208 }
209 
210 static const struct ptp_clock_info fbnic_ptp_info = {
211 	.owner			= THIS_MODULE,
212 	/* 1,000,000,000 - 1 PPB to ensure increment is positive
213 	 * after max negative adjustment.
214 	 */
215 	.max_adj		= 999999999,
216 	.do_aux_work		= fbnic_ptp_do_aux_work,
217 	.adjfine		= fbnic_ptp_adjfine,
218 	.adjtime		= fbnic_ptp_adjtime,
219 	.gettimex64		= fbnic_ptp_gettimex64,
220 	.settime64		= fbnic_ptp_settime64,
221 };
222 
223 static void fbnic_ptp_reset(struct fbnic_dev *fbd)
224 {
225 	struct fbnic_net *fbn = netdev_priv(fbd->netdev);
226 	u64 dclk_period;
227 
228 	fbnic_wr32(fbd, FBNIC_PTP_CTRL,
229 		   FBNIC_PTP_CTRL_EN |
230 		   FIELD_PREP(FBNIC_PTP_CTRL_TICK_IVAL, 1));
231 
232 	/* d_clock is 600 MHz; which in Q16.32 fixed point ns is: */
233 	dclk_period = (((u64)1000000000) << 32) / FBNIC_CLOCK_FREQ;
234 
235 	__fbnic_time_set_addend(fbd, dclk_period);
236 
237 	fbnic_wr32(fbd, FBNIC_PTP_INIT_HI, 0);
238 	fbnic_wr32(fbd, FBNIC_PTP_INIT_LO, 0);
239 
240 	fbnic_wr32(fbd, FBNIC_PTP_ADJUST, FBNIC_PTP_ADJUST_INIT);
241 
242 	fbnic_wr32(fbd, FBNIC_PTP_CTRL,
243 		   FBNIC_PTP_CTRL_EN |
244 		   FBNIC_PTP_CTRL_TQS_OUT_EN |
245 		   FIELD_PREP(FBNIC_PTP_CTRL_MAC_OUT_IVAL, 3) |
246 		   FIELD_PREP(FBNIC_PTP_CTRL_TICK_IVAL, 1));
247 
248 	fbnic_rd32(fbd, FBNIC_PTP_SPARE);
249 
250 	fbn->time_offset = 0;
251 	fbn->time_high = 0;
252 }
253 
254 void fbnic_time_init(struct fbnic_net *fbn)
255 {
256 	/* This is not really a statistic, but the lockng primitive fits
257 	 * our usecase perfectly, we need an atomic 8 bytes READ_ONCE() /
258 	 * WRITE_ONCE() behavior.
259 	 */
260 	u64_stats_init(&fbn->time_seq);
261 }
262 
263 int fbnic_time_start(struct fbnic_net *fbn)
264 {
265 	fbnic_ptp_refresh_time(fbn->fbd, fbn);
266 	/* Assume that fbnic_ptp_do_aux_work() will never be called if not
267 	 * scheduled here
268 	 */
269 	return ptp_schedule_worker(fbn->fbd->ptp, FBNIC_TS_HIGH_REFRESH_JIF);
270 }
271 
272 void fbnic_time_stop(struct fbnic_net *fbn)
273 {
274 	ptp_cancel_worker_sync(fbn->fbd->ptp);
275 	fbnic_ptp_fresh_check(fbn->fbd);
276 }
277 
278 int fbnic_ptp_setup(struct fbnic_dev *fbd)
279 {
280 	struct device *dev = fbd->dev;
281 	unsigned long flags;
282 
283 	spin_lock_init(&fbd->time_lock);
284 
285 	spin_lock_irqsave(&fbd->time_lock, flags); /* Appease lockdep */
286 	fbnic_ptp_reset(fbd);
287 	spin_unlock_irqrestore(&fbd->time_lock, flags);
288 
289 	memcpy(&fbd->ptp_info, &fbnic_ptp_info, sizeof(fbnic_ptp_info));
290 
291 	fbd->ptp = ptp_clock_register(&fbd->ptp_info, dev);
292 	if (IS_ERR(fbd->ptp))
293 		dev_err(dev, "Failed to register PTP: %pe\n", fbd->ptp);
294 
295 	return PTR_ERR_OR_ZERO(fbd->ptp);
296 }
297 
298 void fbnic_ptp_destroy(struct fbnic_dev *fbd)
299 {
300 	if (!fbd->ptp)
301 		return;
302 	ptp_clock_unregister(fbd->ptp);
303 }
304