xref: /linux/drivers/net/ethernet/meta/fbnic/fbnic_time.c (revision 25768de50b1f2dbb6ea44bd5148a87fe2c9c3688)
1*ad8e66a4SVadim Fedorenko // SPDX-License-Identifier: GPL-2.0
2*ad8e66a4SVadim Fedorenko /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3*ad8e66a4SVadim Fedorenko 
4*ad8e66a4SVadim Fedorenko #include <linux/bitfield.h>
5*ad8e66a4SVadim Fedorenko #include <linux/jiffies.h>
6*ad8e66a4SVadim Fedorenko #include <linux/limits.h>
7*ad8e66a4SVadim Fedorenko #include <linux/ptp_clock_kernel.h>
8*ad8e66a4SVadim Fedorenko #include <linux/timer.h>
9*ad8e66a4SVadim Fedorenko 
10*ad8e66a4SVadim Fedorenko #include "fbnic.h"
11*ad8e66a4SVadim Fedorenko #include "fbnic_csr.h"
12*ad8e66a4SVadim Fedorenko #include "fbnic_netdev.h"
13*ad8e66a4SVadim Fedorenko 
14*ad8e66a4SVadim Fedorenko /* FBNIC timing & PTP implementation
15*ad8e66a4SVadim Fedorenko  * Datapath uses truncated 40b timestamps for scheduling and event reporting.
16*ad8e66a4SVadim Fedorenko  * We need to promote those to full 64b, hence we periodically cache the top
17*ad8e66a4SVadim Fedorenko  * 32bit of the HW time counter. Since this makes our time reporting non-atomic
18*ad8e66a4SVadim Fedorenko  * we leave the HW clock free running and adjust time offsets in SW as needed.
19*ad8e66a4SVadim Fedorenko  * Time offset is 64bit - we need a seq counter for 32bit machines.
20*ad8e66a4SVadim Fedorenko  * Time offset and the cache of top bits are independent so we don't need
21*ad8e66a4SVadim Fedorenko  * a coherent snapshot of both - READ_ONCE()/WRITE_ONCE() + writer side lock
22*ad8e66a4SVadim Fedorenko  * are enough.
23*ad8e66a4SVadim Fedorenko  */
24*ad8e66a4SVadim Fedorenko 
25*ad8e66a4SVadim Fedorenko /* Period of refresh of top bits of timestamp, give ourselves a 8x margin.
26*ad8e66a4SVadim Fedorenko  * This should translate to once a minute.
27*ad8e66a4SVadim Fedorenko  * The use of nsecs_to_jiffies() should be safe for a <=40b nsec value.
28*ad8e66a4SVadim Fedorenko  */
29*ad8e66a4SVadim Fedorenko #define FBNIC_TS_HIGH_REFRESH_JIF	nsecs_to_jiffies((1ULL << 40) / 16)
30*ad8e66a4SVadim Fedorenko 
31*ad8e66a4SVadim Fedorenko static struct fbnic_dev *fbnic_from_ptp_info(struct ptp_clock_info *ptp)
32*ad8e66a4SVadim Fedorenko {
33*ad8e66a4SVadim Fedorenko 	return container_of(ptp, struct fbnic_dev, ptp_info);
34*ad8e66a4SVadim Fedorenko }
35*ad8e66a4SVadim Fedorenko 
36*ad8e66a4SVadim Fedorenko /* This function is "slow" because we could try guessing which high part
37*ad8e66a4SVadim Fedorenko  * is correct based on low instead of re-reading, and skip reading @hi
38*ad8e66a4SVadim Fedorenko  * twice altogether if @lo is far enough from 0.
39*ad8e66a4SVadim Fedorenko  */
40*ad8e66a4SVadim Fedorenko static u64 __fbnic_time_get_slow(struct fbnic_dev *fbd)
41*ad8e66a4SVadim Fedorenko {
42*ad8e66a4SVadim Fedorenko 	u32 hi, lo;
43*ad8e66a4SVadim Fedorenko 
44*ad8e66a4SVadim Fedorenko 	lockdep_assert_held(&fbd->time_lock);
45*ad8e66a4SVadim Fedorenko 
46*ad8e66a4SVadim Fedorenko 	do {
47*ad8e66a4SVadim Fedorenko 		hi = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI);
48*ad8e66a4SVadim Fedorenko 		lo = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_LO);
49*ad8e66a4SVadim Fedorenko 	} while (hi != fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI));
50*ad8e66a4SVadim Fedorenko 
51*ad8e66a4SVadim Fedorenko 	return (u64)hi << 32 | lo;
52*ad8e66a4SVadim Fedorenko }
53*ad8e66a4SVadim Fedorenko 
54*ad8e66a4SVadim Fedorenko static void __fbnic_time_set_addend(struct fbnic_dev *fbd, u64 addend)
55*ad8e66a4SVadim Fedorenko {
56*ad8e66a4SVadim Fedorenko 	lockdep_assert_held(&fbd->time_lock);
57*ad8e66a4SVadim Fedorenko 
58*ad8e66a4SVadim Fedorenko 	fbnic_wr32(fbd, FBNIC_PTP_ADD_VAL_NS,
59*ad8e66a4SVadim Fedorenko 		   FIELD_PREP(FBNIC_PTP_ADD_VAL_NS_MASK, addend >> 32));
60*ad8e66a4SVadim Fedorenko 	fbnic_wr32(fbd, FBNIC_PTP_ADD_VAL_SUBNS, (u32)addend);
61*ad8e66a4SVadim Fedorenko }
62*ad8e66a4SVadim Fedorenko 
63*ad8e66a4SVadim Fedorenko static void fbnic_ptp_fresh_check(struct fbnic_dev *fbd)
64*ad8e66a4SVadim Fedorenko {
65*ad8e66a4SVadim Fedorenko 	if (time_is_after_jiffies(fbd->last_read +
66*ad8e66a4SVadim Fedorenko 				  FBNIC_TS_HIGH_REFRESH_JIF * 3 / 2))
67*ad8e66a4SVadim Fedorenko 		return;
68*ad8e66a4SVadim Fedorenko 
69*ad8e66a4SVadim Fedorenko 	dev_warn(fbd->dev, "NIC timestamp refresh stall, delayed by %lu sec\n",
70*ad8e66a4SVadim Fedorenko 		 (jiffies - fbd->last_read - FBNIC_TS_HIGH_REFRESH_JIF) / HZ);
71*ad8e66a4SVadim Fedorenko }
72*ad8e66a4SVadim Fedorenko 
73*ad8e66a4SVadim Fedorenko static void fbnic_ptp_refresh_time(struct fbnic_dev *fbd, struct fbnic_net *fbn)
74*ad8e66a4SVadim Fedorenko {
75*ad8e66a4SVadim Fedorenko 	unsigned long flags;
76*ad8e66a4SVadim Fedorenko 	u32 hi;
77*ad8e66a4SVadim Fedorenko 
78*ad8e66a4SVadim Fedorenko 	spin_lock_irqsave(&fbd->time_lock, flags);
79*ad8e66a4SVadim Fedorenko 	hi = fbnic_rd32(fbn->fbd, FBNIC_PTP_CTR_VAL_HI);
80*ad8e66a4SVadim Fedorenko 	if (!fbnic_present(fbd))
81*ad8e66a4SVadim Fedorenko 		goto out; /* Don't bother handling, reset is pending */
82*ad8e66a4SVadim Fedorenko 	/* Let's keep high cached value a bit lower to avoid race with
83*ad8e66a4SVadim Fedorenko 	 * incoming timestamps. The logic in fbnic_ts40_to_ns() will
84*ad8e66a4SVadim Fedorenko 	 * take care of overflow in this case. It will make cached time
85*ad8e66a4SVadim Fedorenko 	 * ~1 minute lower and incoming timestamp will always be later
86*ad8e66a4SVadim Fedorenko 	 * then cached time.
87*ad8e66a4SVadim Fedorenko 	 */
88*ad8e66a4SVadim Fedorenko 	WRITE_ONCE(fbn->time_high, hi - 16);
89*ad8e66a4SVadim Fedorenko 	fbd->last_read = jiffies;
90*ad8e66a4SVadim Fedorenko  out:
91*ad8e66a4SVadim Fedorenko 	spin_unlock_irqrestore(&fbd->time_lock, flags);
92*ad8e66a4SVadim Fedorenko }
93*ad8e66a4SVadim Fedorenko 
94*ad8e66a4SVadim Fedorenko static long fbnic_ptp_do_aux_work(struct ptp_clock_info *ptp)
95*ad8e66a4SVadim Fedorenko {
96*ad8e66a4SVadim Fedorenko 	struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
97*ad8e66a4SVadim Fedorenko 	struct fbnic_net *fbn;
98*ad8e66a4SVadim Fedorenko 
99*ad8e66a4SVadim Fedorenko 	fbn = netdev_priv(fbd->netdev);
100*ad8e66a4SVadim Fedorenko 
101*ad8e66a4SVadim Fedorenko 	fbnic_ptp_fresh_check(fbd);
102*ad8e66a4SVadim Fedorenko 	fbnic_ptp_refresh_time(fbd, fbn);
103*ad8e66a4SVadim Fedorenko 
104*ad8e66a4SVadim Fedorenko 	return FBNIC_TS_HIGH_REFRESH_JIF;
105*ad8e66a4SVadim Fedorenko }
106*ad8e66a4SVadim Fedorenko 
107*ad8e66a4SVadim Fedorenko static int fbnic_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
108*ad8e66a4SVadim Fedorenko {
109*ad8e66a4SVadim Fedorenko 	struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
110*ad8e66a4SVadim Fedorenko 	u64 addend, dclk_period;
111*ad8e66a4SVadim Fedorenko 	unsigned long flags;
112*ad8e66a4SVadim Fedorenko 
113*ad8e66a4SVadim Fedorenko 	/* d_clock is 600 MHz; which in Q16.32 fixed point ns is: */
114*ad8e66a4SVadim Fedorenko 	dclk_period = (((u64)1000000000) << 32) / FBNIC_CLOCK_FREQ;
115*ad8e66a4SVadim Fedorenko 	addend = adjust_by_scaled_ppm(dclk_period, scaled_ppm);
116*ad8e66a4SVadim Fedorenko 
117*ad8e66a4SVadim Fedorenko 	spin_lock_irqsave(&fbd->time_lock, flags);
118*ad8e66a4SVadim Fedorenko 	__fbnic_time_set_addend(fbd, addend);
119*ad8e66a4SVadim Fedorenko 	fbnic_wr32(fbd, FBNIC_PTP_ADJUST, FBNIC_PTP_ADJUST_ADDEND_SET);
120*ad8e66a4SVadim Fedorenko 
121*ad8e66a4SVadim Fedorenko 	/* Flush, make sure FBNIC_PTP_ADD_VAL_* is stable for at least 4 clks */
122*ad8e66a4SVadim Fedorenko 	fbnic_rd32(fbd, FBNIC_PTP_SPARE);
123*ad8e66a4SVadim Fedorenko 	spin_unlock_irqrestore(&fbd->time_lock, flags);
124*ad8e66a4SVadim Fedorenko 
125*ad8e66a4SVadim Fedorenko 	return fbnic_present(fbd) ? 0 : -EIO;
126*ad8e66a4SVadim Fedorenko }
127*ad8e66a4SVadim Fedorenko 
128*ad8e66a4SVadim Fedorenko static int fbnic_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
129*ad8e66a4SVadim Fedorenko {
130*ad8e66a4SVadim Fedorenko 	struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
131*ad8e66a4SVadim Fedorenko 	struct fbnic_net *fbn;
132*ad8e66a4SVadim Fedorenko 	unsigned long flags;
133*ad8e66a4SVadim Fedorenko 
134*ad8e66a4SVadim Fedorenko 	fbn = netdev_priv(fbd->netdev);
135*ad8e66a4SVadim Fedorenko 
136*ad8e66a4SVadim Fedorenko 	spin_lock_irqsave(&fbd->time_lock, flags);
137*ad8e66a4SVadim Fedorenko 	u64_stats_update_begin(&fbn->time_seq);
138*ad8e66a4SVadim Fedorenko 	WRITE_ONCE(fbn->time_offset, READ_ONCE(fbn->time_offset) + delta);
139*ad8e66a4SVadim Fedorenko 	u64_stats_update_end(&fbn->time_seq);
140*ad8e66a4SVadim Fedorenko 	spin_unlock_irqrestore(&fbd->time_lock, flags);
141*ad8e66a4SVadim Fedorenko 
142*ad8e66a4SVadim Fedorenko 	return 0;
143*ad8e66a4SVadim Fedorenko }
144*ad8e66a4SVadim Fedorenko 
145*ad8e66a4SVadim Fedorenko static int
146*ad8e66a4SVadim Fedorenko fbnic_ptp_gettimex64(struct ptp_clock_info *ptp, struct timespec64 *ts,
147*ad8e66a4SVadim Fedorenko 		     struct ptp_system_timestamp *sts)
148*ad8e66a4SVadim Fedorenko {
149*ad8e66a4SVadim Fedorenko 	struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
150*ad8e66a4SVadim Fedorenko 	struct fbnic_net *fbn;
151*ad8e66a4SVadim Fedorenko 	unsigned long flags;
152*ad8e66a4SVadim Fedorenko 	u64 time_ns;
153*ad8e66a4SVadim Fedorenko 	u32 hi, lo;
154*ad8e66a4SVadim Fedorenko 
155*ad8e66a4SVadim Fedorenko 	fbn = netdev_priv(fbd->netdev);
156*ad8e66a4SVadim Fedorenko 
157*ad8e66a4SVadim Fedorenko 	spin_lock_irqsave(&fbd->time_lock, flags);
158*ad8e66a4SVadim Fedorenko 
159*ad8e66a4SVadim Fedorenko 	do {
160*ad8e66a4SVadim Fedorenko 		hi = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI);
161*ad8e66a4SVadim Fedorenko 		ptp_read_system_prets(sts);
162*ad8e66a4SVadim Fedorenko 		lo = fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_LO);
163*ad8e66a4SVadim Fedorenko 		ptp_read_system_postts(sts);
164*ad8e66a4SVadim Fedorenko 		/* Similarly to comment above __fbnic_time_get_slow()
165*ad8e66a4SVadim Fedorenko 		 * - this can be optimized if needed.
166*ad8e66a4SVadim Fedorenko 		 */
167*ad8e66a4SVadim Fedorenko 	} while (hi != fbnic_rd32(fbd, FBNIC_PTP_CTR_VAL_HI));
168*ad8e66a4SVadim Fedorenko 
169*ad8e66a4SVadim Fedorenko 	time_ns = ((u64)hi << 32 | lo) + fbn->time_offset;
170*ad8e66a4SVadim Fedorenko 	spin_unlock_irqrestore(&fbd->time_lock, flags);
171*ad8e66a4SVadim Fedorenko 
172*ad8e66a4SVadim Fedorenko 	if (!fbnic_present(fbd))
173*ad8e66a4SVadim Fedorenko 		return -EIO;
174*ad8e66a4SVadim Fedorenko 
175*ad8e66a4SVadim Fedorenko 	*ts = ns_to_timespec64(time_ns);
176*ad8e66a4SVadim Fedorenko 
177*ad8e66a4SVadim Fedorenko 	return 0;
178*ad8e66a4SVadim Fedorenko }
179*ad8e66a4SVadim Fedorenko 
180*ad8e66a4SVadim Fedorenko static int
181*ad8e66a4SVadim Fedorenko fbnic_ptp_settime64(struct ptp_clock_info *ptp, const struct timespec64 *ts)
182*ad8e66a4SVadim Fedorenko {
183*ad8e66a4SVadim Fedorenko 	struct fbnic_dev *fbd = fbnic_from_ptp_info(ptp);
184*ad8e66a4SVadim Fedorenko 	struct fbnic_net *fbn;
185*ad8e66a4SVadim Fedorenko 	unsigned long flags;
186*ad8e66a4SVadim Fedorenko 	u64 dev_ns, host_ns;
187*ad8e66a4SVadim Fedorenko 	int ret;
188*ad8e66a4SVadim Fedorenko 
189*ad8e66a4SVadim Fedorenko 	fbn = netdev_priv(fbd->netdev);
190*ad8e66a4SVadim Fedorenko 
191*ad8e66a4SVadim Fedorenko 	host_ns = timespec64_to_ns(ts);
192*ad8e66a4SVadim Fedorenko 
193*ad8e66a4SVadim Fedorenko 	spin_lock_irqsave(&fbd->time_lock, flags);
194*ad8e66a4SVadim Fedorenko 
195*ad8e66a4SVadim Fedorenko 	dev_ns = __fbnic_time_get_slow(fbd);
196*ad8e66a4SVadim Fedorenko 
197*ad8e66a4SVadim Fedorenko 	if (fbnic_present(fbd)) {
198*ad8e66a4SVadim Fedorenko 		u64_stats_update_begin(&fbn->time_seq);
199*ad8e66a4SVadim Fedorenko 		WRITE_ONCE(fbn->time_offset, host_ns - dev_ns);
200*ad8e66a4SVadim Fedorenko 		u64_stats_update_end(&fbn->time_seq);
201*ad8e66a4SVadim Fedorenko 		ret = 0;
202*ad8e66a4SVadim Fedorenko 	} else {
203*ad8e66a4SVadim Fedorenko 		ret = -EIO;
204*ad8e66a4SVadim Fedorenko 	}
205*ad8e66a4SVadim Fedorenko 	spin_unlock_irqrestore(&fbd->time_lock, flags);
206*ad8e66a4SVadim Fedorenko 
207*ad8e66a4SVadim Fedorenko 	return ret;
208*ad8e66a4SVadim Fedorenko }
209*ad8e66a4SVadim Fedorenko 
210*ad8e66a4SVadim Fedorenko static const struct ptp_clock_info fbnic_ptp_info = {
211*ad8e66a4SVadim Fedorenko 	.owner			= THIS_MODULE,
212*ad8e66a4SVadim Fedorenko 	/* 1,000,000,000 - 1 PPB to ensure increment is positive
213*ad8e66a4SVadim Fedorenko 	 * after max negative adjustment.
214*ad8e66a4SVadim Fedorenko 	 */
215*ad8e66a4SVadim Fedorenko 	.max_adj		= 999999999,
216*ad8e66a4SVadim Fedorenko 	.do_aux_work		= fbnic_ptp_do_aux_work,
217*ad8e66a4SVadim Fedorenko 	.adjfine		= fbnic_ptp_adjfine,
218*ad8e66a4SVadim Fedorenko 	.adjtime		= fbnic_ptp_adjtime,
219*ad8e66a4SVadim Fedorenko 	.gettimex64		= fbnic_ptp_gettimex64,
220*ad8e66a4SVadim Fedorenko 	.settime64		= fbnic_ptp_settime64,
221*ad8e66a4SVadim Fedorenko };
222*ad8e66a4SVadim Fedorenko 
223*ad8e66a4SVadim Fedorenko static void fbnic_ptp_reset(struct fbnic_dev *fbd)
224*ad8e66a4SVadim Fedorenko {
225*ad8e66a4SVadim Fedorenko 	struct fbnic_net *fbn = netdev_priv(fbd->netdev);
226*ad8e66a4SVadim Fedorenko 	u64 dclk_period;
227*ad8e66a4SVadim Fedorenko 
228*ad8e66a4SVadim Fedorenko 	fbnic_wr32(fbd, FBNIC_PTP_CTRL,
229*ad8e66a4SVadim Fedorenko 		   FBNIC_PTP_CTRL_EN |
230*ad8e66a4SVadim Fedorenko 		   FIELD_PREP(FBNIC_PTP_CTRL_TICK_IVAL, 1));
231*ad8e66a4SVadim Fedorenko 
232*ad8e66a4SVadim Fedorenko 	/* d_clock is 600 MHz; which in Q16.32 fixed point ns is: */
233*ad8e66a4SVadim Fedorenko 	dclk_period = (((u64)1000000000) << 32) / FBNIC_CLOCK_FREQ;
234*ad8e66a4SVadim Fedorenko 
235*ad8e66a4SVadim Fedorenko 	__fbnic_time_set_addend(fbd, dclk_period);
236*ad8e66a4SVadim Fedorenko 
237*ad8e66a4SVadim Fedorenko 	fbnic_wr32(fbd, FBNIC_PTP_INIT_HI, 0);
238*ad8e66a4SVadim Fedorenko 	fbnic_wr32(fbd, FBNIC_PTP_INIT_LO, 0);
239*ad8e66a4SVadim Fedorenko 
240*ad8e66a4SVadim Fedorenko 	fbnic_wr32(fbd, FBNIC_PTP_ADJUST, FBNIC_PTP_ADJUST_INIT);
241*ad8e66a4SVadim Fedorenko 
242*ad8e66a4SVadim Fedorenko 	fbnic_wr32(fbd, FBNIC_PTP_CTRL,
243*ad8e66a4SVadim Fedorenko 		   FBNIC_PTP_CTRL_EN |
244*ad8e66a4SVadim Fedorenko 		   FBNIC_PTP_CTRL_TQS_OUT_EN |
245*ad8e66a4SVadim Fedorenko 		   FIELD_PREP(FBNIC_PTP_CTRL_MAC_OUT_IVAL, 3) |
246*ad8e66a4SVadim Fedorenko 		   FIELD_PREP(FBNIC_PTP_CTRL_TICK_IVAL, 1));
247*ad8e66a4SVadim Fedorenko 
248*ad8e66a4SVadim Fedorenko 	fbnic_rd32(fbd, FBNIC_PTP_SPARE);
249*ad8e66a4SVadim Fedorenko 
250*ad8e66a4SVadim Fedorenko 	fbn->time_offset = 0;
251*ad8e66a4SVadim Fedorenko 	fbn->time_high = 0;
252*ad8e66a4SVadim Fedorenko }
253*ad8e66a4SVadim Fedorenko 
254*ad8e66a4SVadim Fedorenko void fbnic_time_init(struct fbnic_net *fbn)
255*ad8e66a4SVadim Fedorenko {
256*ad8e66a4SVadim Fedorenko 	/* This is not really a statistic, but the lockng primitive fits
257*ad8e66a4SVadim Fedorenko 	 * our usecase perfectly, we need an atomic 8 bytes READ_ONCE() /
258*ad8e66a4SVadim Fedorenko 	 * WRITE_ONCE() behavior.
259*ad8e66a4SVadim Fedorenko 	 */
260*ad8e66a4SVadim Fedorenko 	u64_stats_init(&fbn->time_seq);
261*ad8e66a4SVadim Fedorenko }
262*ad8e66a4SVadim Fedorenko 
263*ad8e66a4SVadim Fedorenko int fbnic_time_start(struct fbnic_net *fbn)
264*ad8e66a4SVadim Fedorenko {
265*ad8e66a4SVadim Fedorenko 	fbnic_ptp_refresh_time(fbn->fbd, fbn);
266*ad8e66a4SVadim Fedorenko 	/* Assume that fbnic_ptp_do_aux_work() will never be called if not
267*ad8e66a4SVadim Fedorenko 	 * scheduled here
268*ad8e66a4SVadim Fedorenko 	 */
269*ad8e66a4SVadim Fedorenko 	return ptp_schedule_worker(fbn->fbd->ptp, FBNIC_TS_HIGH_REFRESH_JIF);
270*ad8e66a4SVadim Fedorenko }
271*ad8e66a4SVadim Fedorenko 
272*ad8e66a4SVadim Fedorenko void fbnic_time_stop(struct fbnic_net *fbn)
273*ad8e66a4SVadim Fedorenko {
274*ad8e66a4SVadim Fedorenko 	ptp_cancel_worker_sync(fbn->fbd->ptp);
275*ad8e66a4SVadim Fedorenko 	fbnic_ptp_fresh_check(fbn->fbd);
276*ad8e66a4SVadim Fedorenko }
277*ad8e66a4SVadim Fedorenko 
278*ad8e66a4SVadim Fedorenko int fbnic_ptp_setup(struct fbnic_dev *fbd)
279*ad8e66a4SVadim Fedorenko {
280*ad8e66a4SVadim Fedorenko 	struct device *dev = fbd->dev;
281*ad8e66a4SVadim Fedorenko 	unsigned long flags;
282*ad8e66a4SVadim Fedorenko 
283*ad8e66a4SVadim Fedorenko 	spin_lock_init(&fbd->time_lock);
284*ad8e66a4SVadim Fedorenko 
285*ad8e66a4SVadim Fedorenko 	spin_lock_irqsave(&fbd->time_lock, flags); /* Appease lockdep */
286*ad8e66a4SVadim Fedorenko 	fbnic_ptp_reset(fbd);
287*ad8e66a4SVadim Fedorenko 	spin_unlock_irqrestore(&fbd->time_lock, flags);
288*ad8e66a4SVadim Fedorenko 
289*ad8e66a4SVadim Fedorenko 	memcpy(&fbd->ptp_info, &fbnic_ptp_info, sizeof(fbnic_ptp_info));
290*ad8e66a4SVadim Fedorenko 
291*ad8e66a4SVadim Fedorenko 	fbd->ptp = ptp_clock_register(&fbd->ptp_info, dev);
292*ad8e66a4SVadim Fedorenko 	if (IS_ERR(fbd->ptp))
293*ad8e66a4SVadim Fedorenko 		dev_err(dev, "Failed to register PTP: %pe\n", fbd->ptp);
294*ad8e66a4SVadim Fedorenko 
295*ad8e66a4SVadim Fedorenko 	return PTR_ERR_OR_ZERO(fbd->ptp);
296*ad8e66a4SVadim Fedorenko }
297*ad8e66a4SVadim Fedorenko 
298*ad8e66a4SVadim Fedorenko void fbnic_ptp_destroy(struct fbnic_dev *fbd)
299*ad8e66a4SVadim Fedorenko {
300*ad8e66a4SVadim Fedorenko 	if (!fbd->ptp)
301*ad8e66a4SVadim Fedorenko 		return;
302*ad8e66a4SVadim Fedorenko 	ptp_clock_unregister(fbd->ptp);
303*ad8e66a4SVadim Fedorenko }
304