xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/ptp.c (revision 576d7fed09c7edbae7600f29a8a3ed6c1ead904f)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell PTP driver
3  *
4  * Copyright (C) 2020 Marvell.
5  *
6  */
7 
8 #include <linux/bitfield.h>
9 #include <linux/device.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/hrtimer.h>
13 #include <linux/ktime.h>
14 
15 #include "mbox.h"
16 #include "ptp.h"
17 #include "rvu.h"
18 
19 #define DRV_NAME				"Marvell PTP Driver"
20 
21 #define PCI_DEVID_OCTEONTX2_PTP			0xA00C
22 #define PCI_SUBSYS_DEVID_OCTX2_98xx_PTP		0xB100
23 #define PCI_SUBSYS_DEVID_OCTX2_96XX_PTP		0xB200
24 #define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP		0xB300
25 #define PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP	0xB400
26 #define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP		0xB500
27 #define PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP	0xB600
28 #define PCI_DEVID_OCTEONTX2_RST			0xA085
29 #define PCI_DEVID_CN10K_PTP			0xA09E
30 #define PCI_SUBSYS_DEVID_CN10K_A_PTP		0xB900
31 #define PCI_SUBSYS_DEVID_CNF10K_A_PTP		0xBA00
32 #define PCI_SUBSYS_DEVID_CNF10K_B_PTP		0xBC00
33 
34 #define PCI_PTP_BAR_NO				0
35 
36 #define PTP_CLOCK_CFG				0xF00ULL
37 #define PTP_CLOCK_CFG_PTP_EN			BIT_ULL(0)
38 #define PTP_CLOCK_CFG_EXT_CLK_EN		BIT_ULL(1)
39 #define PTP_CLOCK_CFG_EXT_CLK_IN_MASK		GENMASK_ULL(7, 2)
40 #define PTP_CLOCK_CFG_TSTMP_EDGE		BIT_ULL(9)
41 #define PTP_CLOCK_CFG_TSTMP_EN			BIT_ULL(8)
42 #define PTP_CLOCK_CFG_TSTMP_IN_MASK		GENMASK_ULL(15, 10)
43 #define PTP_CLOCK_CFG_ATOMIC_OP_MASK		GENMASK_ULL(28, 26)
44 #define PTP_CLOCK_CFG_PPS_EN			BIT_ULL(30)
45 #define PTP_CLOCK_CFG_PPS_INV			BIT_ULL(31)
46 
47 #define PTP_PPS_HI_INCR				0xF60ULL
48 #define PTP_PPS_LO_INCR				0xF68ULL
49 #define PTP_PPS_THRESH_LO			0xF50ULL
50 #define PTP_PPS_THRESH_HI			0xF58ULL
51 
52 #define PTP_CLOCK_LO				0xF08ULL
53 #define PTP_CLOCK_HI				0xF10ULL
54 #define PTP_CLOCK_COMP				0xF18ULL
55 #define PTP_TIMESTAMP				0xF20ULL
56 #define PTP_CLOCK_SEC				0xFD0ULL
57 #define PTP_SEC_ROLLOVER			0xFD8ULL
58 /* Atomic update related CSRs */
59 #define PTP_FRNS_TIMESTAMP			0xFE0ULL
60 #define PTP_NXT_ROLLOVER_SET			0xFE8ULL
61 #define PTP_CURR_ROLLOVER_SET			0xFF0ULL
62 #define PTP_NANO_TIMESTAMP			0xFF8ULL
63 #define PTP_SEC_TIMESTAMP			0x1000ULL
64 
65 #define CYCLE_MULT				1000
66 
67 #define is_rev_A0(ptp) (((ptp)->pdev->revision & 0x0F) == 0x0)
68 #define is_rev_A1(ptp) (((ptp)->pdev->revision & 0x0F) == 0x1)
69 
70 /* PTP atomic update operation type */
71 enum atomic_opcode {
72 	ATOMIC_SET = 1,
73 	ATOMIC_INC = 3,
74 	ATOMIC_DEC = 4
75 };
76 
77 static struct ptp *first_ptp_block;
78 static const struct pci_device_id ptp_id_table[];
79 
80 static bool is_ptp_dev_cnf10ka(struct ptp *ptp)
81 {
82 	return ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP;
83 }
84 
85 static bool is_ptp_dev_cn10ka(struct ptp *ptp)
86 {
87 	return ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP;
88 }
89 
90 static bool cn10k_ptp_errata(struct ptp *ptp)
91 {
92 	if ((is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) &&
93 	    (is_rev_A0(ptp) || is_rev_A1(ptp)))
94 		return true;
95 
96 	return false;
97 }
98 
99 static bool is_tstmp_atomic_update_supported(struct rvu *rvu)
100 {
101 	struct ptp *ptp = rvu->ptp;
102 
103 	if (is_rvu_otx2(rvu))
104 		return false;
105 
106 	/* On older silicon variants of CN10K, atomic update feature
107 	 * is not available.
108 	 */
109 	if ((is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) &&
110 	    (is_rev_A0(ptp) || is_rev_A1(ptp)))
111 		return false;
112 
113 	return true;
114 }
115 
116 static enum hrtimer_restart ptp_reset_thresh(struct hrtimer *hrtimer)
117 {
118 	struct ptp *ptp = container_of(hrtimer, struct ptp, hrtimer);
119 	ktime_t curr_ts = ktime_get();
120 	ktime_t delta_ns, period_ns;
121 	u64 ptp_clock_hi;
122 
123 	/* calculate the elapsed time since last restart */
124 	delta_ns = ktime_to_ns(ktime_sub(curr_ts, ptp->last_ts));
125 
126 	/* if the ptp clock value has crossed 0.5 seconds,
127 	 * its too late to update pps threshold value, so
128 	 * update threshold after 1 second.
129 	 */
130 	ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
131 	if (ptp_clock_hi > 500000000) {
132 		period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - ptp_clock_hi));
133 	} else {
134 		writeq(500000000, ptp->reg_base + PTP_PPS_THRESH_HI);
135 		period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - delta_ns));
136 	}
137 
138 	hrtimer_forward_now(hrtimer, period_ns);
139 	ptp->last_ts = curr_ts;
140 
141 	return HRTIMER_RESTART;
142 }
143 
144 static void ptp_hrtimer_start(struct ptp *ptp, ktime_t start_ns)
145 {
146 	ktime_t period_ns;
147 
148 	period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - start_ns));
149 	hrtimer_start(&ptp->hrtimer, period_ns, HRTIMER_MODE_REL);
150 	ptp->last_ts = ktime_get();
151 }
152 
153 static u64 read_ptp_tstmp_sec_nsec(struct ptp *ptp)
154 {
155 	u64 sec, sec1, nsec;
156 	unsigned long flags;
157 
158 	spin_lock_irqsave(&ptp->ptp_lock, flags);
159 	sec = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
160 	nsec = readq(ptp->reg_base + PTP_CLOCK_HI);
161 	sec1 = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
162 	/* check nsec rollover */
163 	if (sec1 > sec) {
164 		nsec = readq(ptp->reg_base + PTP_CLOCK_HI);
165 		sec = sec1;
166 	}
167 	spin_unlock_irqrestore(&ptp->ptp_lock, flags);
168 
169 	return sec * NSEC_PER_SEC + nsec;
170 }
171 
172 static u64 read_ptp_tstmp_nsec(struct ptp *ptp)
173 {
174 	return readq(ptp->reg_base + PTP_CLOCK_HI);
175 }
176 
177 static u64 ptp_calc_adjusted_comp(u64 ptp_clock_freq)
178 {
179 	u64 comp, adj = 0, cycles_per_sec, ns_drift = 0;
180 	u32 ptp_clock_nsec, cycle_time;
181 	int cycle;
182 
183 	/* Errata:
184 	 * Issue #1: At the time of 1 sec rollover of the nano-second counter,
185 	 * the nano-second counter is set to 0. However, it should be set to
186 	 * (existing counter_value - 10^9).
187 	 *
188 	 * Issue #2: The nano-second counter rolls over at 0x3B9A_C9FF.
189 	 * It should roll over at 0x3B9A_CA00.
190 	 */
191 
192 	/* calculate ptp_clock_comp value */
193 	comp = ((u64)1000000000ULL << 32) / ptp_clock_freq;
194 	/* use CYCLE_MULT to avoid accuracy loss due to integer arithmetic */
195 	cycle_time = NSEC_PER_SEC * CYCLE_MULT / ptp_clock_freq;
196 	/* cycles per sec */
197 	cycles_per_sec = ptp_clock_freq;
198 
199 	/* check whether ptp nanosecond counter rolls over early */
200 	cycle = cycles_per_sec - 1;
201 	ptp_clock_nsec = (cycle * comp) >> 32;
202 	while (ptp_clock_nsec < NSEC_PER_SEC) {
203 		if (ptp_clock_nsec == 0x3B9AC9FF)
204 			goto calc_adj_comp;
205 		cycle++;
206 		ptp_clock_nsec = (cycle * comp) >> 32;
207 	}
208 	/* compute nanoseconds lost per second when nsec counter rolls over */
209 	ns_drift = ptp_clock_nsec - NSEC_PER_SEC;
210 	/* calculate ptp_clock_comp adjustment */
211 	if (ns_drift > 0) {
212 		adj = comp * ns_drift;
213 		adj = adj / 1000000000ULL;
214 	}
215 	/* speed up the ptp clock to account for nanoseconds lost */
216 	comp += adj;
217 	return comp;
218 
219 calc_adj_comp:
220 	/* slow down the ptp clock to not rollover early */
221 	adj = comp * cycle_time;
222 	adj = adj / 1000000000ULL;
223 	adj = adj / CYCLE_MULT;
224 	comp -= adj;
225 
226 	return comp;
227 }
228 
229 struct ptp *ptp_get(void)
230 {
231 	struct ptp *ptp = first_ptp_block;
232 
233 	/* Check PTP block is present in hardware */
234 	if (!pci_dev_present(ptp_id_table))
235 		return ERR_PTR(-ENODEV);
236 	/* Check driver is bound to PTP block */
237 	if (!ptp)
238 		ptp = ERR_PTR(-EPROBE_DEFER);
239 	else if (!IS_ERR(ptp))
240 		pci_dev_get(ptp->pdev);
241 
242 	return ptp;
243 }
244 
245 void ptp_put(struct ptp *ptp)
246 {
247 	if (!ptp)
248 		return;
249 
250 	pci_dev_put(ptp->pdev);
251 }
252 
253 static void ptp_atomic_update(struct ptp *ptp, u64 timestamp)
254 {
255 	u64 regval, curr_rollover_set, nxt_rollover_set;
256 
257 	/* First setup NSECs and SECs */
258 	writeq(timestamp, ptp->reg_base + PTP_NANO_TIMESTAMP);
259 	writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP);
260 	writeq(timestamp / NSEC_PER_SEC,
261 	       ptp->reg_base + PTP_SEC_TIMESTAMP);
262 
263 	nxt_rollover_set = roundup(timestamp, NSEC_PER_SEC);
264 	curr_rollover_set = nxt_rollover_set - NSEC_PER_SEC;
265 	writeq(nxt_rollover_set, ptp->reg_base + PTP_NXT_ROLLOVER_SET);
266 	writeq(curr_rollover_set, ptp->reg_base + PTP_CURR_ROLLOVER_SET);
267 
268 	/* Now, initiate atomic update */
269 	regval = readq(ptp->reg_base + PTP_CLOCK_CFG);
270 	regval &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK;
271 	regval |= (ATOMIC_SET << 26);
272 	writeq(regval, ptp->reg_base + PTP_CLOCK_CFG);
273 }
274 
275 static void ptp_atomic_adjtime(struct ptp *ptp, s64 delta)
276 {
277 	bool neg_adj = false, atomic_inc_dec = false;
278 	u64 regval, ptp_clock_hi;
279 
280 	if (delta < 0) {
281 		delta = -delta;
282 		neg_adj = true;
283 	}
284 
285 	/* use atomic inc/dec when delta < 1 second */
286 	if (delta < NSEC_PER_SEC)
287 		atomic_inc_dec = true;
288 
289 	if (!atomic_inc_dec) {
290 		ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
291 		if (neg_adj) {
292 			if (ptp_clock_hi > delta)
293 				ptp_clock_hi -= delta;
294 			else
295 				ptp_clock_hi = delta - ptp_clock_hi;
296 		} else {
297 			ptp_clock_hi += delta;
298 		}
299 		ptp_atomic_update(ptp, ptp_clock_hi);
300 	} else {
301 		writeq(delta, ptp->reg_base + PTP_NANO_TIMESTAMP);
302 		writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP);
303 
304 		/* initiate atomic inc/dec */
305 		regval = readq(ptp->reg_base + PTP_CLOCK_CFG);
306 		regval &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK;
307 		regval |= neg_adj ? (ATOMIC_DEC << 26) : (ATOMIC_INC << 26);
308 		writeq(regval, ptp->reg_base + PTP_CLOCK_CFG);
309 	}
310 }
311 
312 static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
313 {
314 	bool neg_adj = false;
315 	u32 freq, freq_adj;
316 	u64 comp, adj;
317 	s64 ppb;
318 
319 	if (scaled_ppm < 0) {
320 		neg_adj = true;
321 		scaled_ppm = -scaled_ppm;
322 	}
323 
324 	/* The hardware adds the clock compensation value to the PTP clock
325 	 * on every coprocessor clock cycle. Typical convention is that it
326 	 * represent number of nanosecond betwen each cycle. In this
327 	 * convention compensation value is in 64 bit fixed-point
328 	 * representation where upper 32 bits are number of nanoseconds
329 	 * and lower is fractions of nanosecond.
330 	 * The scaled_ppm represent the ratio in "parts per million" by which
331 	 * the compensation value should be corrected.
332 	 * To calculate new compenstation value we use 64bit fixed point
333 	 * arithmetic on following formula
334 	 * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
335 	 * where tbase is the basic compensation value calculated
336 	 * initialy in the probe function.
337 	 */
338 	/* convert scaled_ppm to ppb */
339 	ppb = 1 + scaled_ppm;
340 	ppb *= 125;
341 	ppb >>= 13;
342 
343 	if (cn10k_ptp_errata(ptp)) {
344 		/* calculate the new frequency based on ppb */
345 		freq_adj = (ptp->clock_rate * ppb) / 1000000000ULL;
346 		freq = neg_adj ? ptp->clock_rate + freq_adj : ptp->clock_rate - freq_adj;
347 		comp = ptp_calc_adjusted_comp(freq);
348 	} else {
349 		comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
350 		adj = comp * ppb;
351 		adj = div_u64(adj, 1000000000ull);
352 		comp = neg_adj ? comp - adj : comp + adj;
353 	}
354 	writeq(comp, ptp->reg_base + PTP_CLOCK_COMP);
355 
356 	return 0;
357 }
358 
359 static int ptp_get_clock(struct ptp *ptp, u64 *clk)
360 {
361 	/* Return the current PTP clock */
362 	*clk = ptp->read_ptp_tstmp(ptp);
363 
364 	return 0;
365 }
366 
367 void ptp_start(struct rvu *rvu, u64 sclk, u32 ext_clk_freq, u32 extts)
368 {
369 	struct ptp *ptp = rvu->ptp;
370 	struct pci_dev *pdev;
371 	u64 clock_comp;
372 	u64 clock_cfg;
373 
374 	if (!ptp)
375 		return;
376 
377 	pdev = ptp->pdev;
378 
379 	if (!sclk) {
380 		dev_err(&pdev->dev, "PTP input clock cannot be zero\n");
381 		return;
382 	}
383 
384 	/* sclk is in MHz */
385 	ptp->clock_rate = sclk * 1000000;
386 
387 	/* Program the seconds rollover value to 1 second */
388 	if (is_tstmp_atomic_update_supported(rvu)) {
389 		writeq(0, ptp->reg_base + PTP_NANO_TIMESTAMP);
390 		writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP);
391 		writeq(0, ptp->reg_base + PTP_SEC_TIMESTAMP);
392 		writeq(0, ptp->reg_base + PTP_CURR_ROLLOVER_SET);
393 		writeq(0x3b9aca00, ptp->reg_base + PTP_NXT_ROLLOVER_SET);
394 		writeq(0x3b9aca00, ptp->reg_base + PTP_SEC_ROLLOVER);
395 	}
396 
397 	/* Enable PTP clock */
398 	clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
399 
400 	if (ext_clk_freq) {
401 		ptp->clock_rate = ext_clk_freq;
402 		/* Set GPIO as PTP clock source */
403 		clock_cfg &= ~PTP_CLOCK_CFG_EXT_CLK_IN_MASK;
404 		clock_cfg |= PTP_CLOCK_CFG_EXT_CLK_EN;
405 	}
406 
407 	if (extts) {
408 		clock_cfg |= PTP_CLOCK_CFG_TSTMP_EDGE;
409 		/* Set GPIO as timestamping source */
410 		clock_cfg &= ~PTP_CLOCK_CFG_TSTMP_IN_MASK;
411 		clock_cfg |= PTP_CLOCK_CFG_TSTMP_EN;
412 	}
413 
414 	clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
415 	writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
416 	clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
417 	clock_cfg &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK;
418 	clock_cfg |= (ATOMIC_SET << 26);
419 	writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
420 
421 	if (cn10k_ptp_errata(ptp))
422 		clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate);
423 	else
424 		clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
425 
426 	/* Initial compensation value to start the nanosecs counter */
427 	writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
428 }
429 
430 static int ptp_get_tstmp(struct ptp *ptp, u64 *clk)
431 {
432 	u64 timestamp;
433 
434 	if (is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) {
435 		timestamp = readq(ptp->reg_base + PTP_TIMESTAMP);
436 		*clk = (timestamp >> 32) * NSEC_PER_SEC + (timestamp & 0xFFFFFFFF);
437 	} else {
438 		*clk = readq(ptp->reg_base + PTP_TIMESTAMP);
439 	}
440 
441 	return 0;
442 }
443 
444 static int ptp_set_thresh(struct ptp *ptp, u64 thresh)
445 {
446 	if (!cn10k_ptp_errata(ptp))
447 		writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
448 
449 	return 0;
450 }
451 
452 static int ptp_config_hrtimer(struct ptp *ptp, int on)
453 {
454 	u64 ptp_clock_hi;
455 
456 	if (on) {
457 		ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
458 		ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi);
459 	} else {
460 		if (hrtimer_active(&ptp->hrtimer))
461 			hrtimer_cancel(&ptp->hrtimer);
462 	}
463 
464 	return 0;
465 }
466 
467 static int ptp_pps_on(struct ptp *ptp, int on, u64 period)
468 {
469 	u64 clock_cfg;
470 
471 	clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
472 	if (on) {
473 		if (cn10k_ptp_errata(ptp) && period != NSEC_PER_SEC) {
474 			dev_err(&ptp->pdev->dev, "Supports max period value as 1 second\n");
475 			return -EINVAL;
476 		}
477 
478 		if (period > (8 * NSEC_PER_SEC)) {
479 			dev_err(&ptp->pdev->dev, "Supports max period as 8 seconds\n");
480 			return -EINVAL;
481 		}
482 
483 		clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV;
484 		writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
485 
486 		writeq(0, ptp->reg_base + PTP_PPS_THRESH_HI);
487 		writeq(0, ptp->reg_base + PTP_PPS_THRESH_LO);
488 
489 		/* Configure high/low phase time */
490 		period = period / 2;
491 		writeq(((u64)period << 32), ptp->reg_base + PTP_PPS_HI_INCR);
492 		writeq(((u64)period << 32), ptp->reg_base + PTP_PPS_LO_INCR);
493 	} else {
494 		clock_cfg &= ~(PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV);
495 		writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
496 	}
497 
498 	if (on && cn10k_ptp_errata(ptp)) {
499 		/* The ptp_clock_hi rollsover to zero once clock cycle before it
500 		 * reaches one second boundary. so, program the pps_lo_incr in
501 		 * such a way that the pps threshold value comparison at one
502 		 * second boundary will succeed and pps edge changes. After each
503 		 * one second boundary, the hrtimer handler will be invoked and
504 		 * reprograms the pps threshold value.
505 		 */
506 		ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate;
507 		writeq((0x1dcd6500ULL - ptp->clock_period) << 32,
508 		       ptp->reg_base + PTP_PPS_LO_INCR);
509 	}
510 
511 	if (cn10k_ptp_errata(ptp))
512 		ptp_config_hrtimer(ptp, on);
513 
514 	return 0;
515 }
516 
517 static int ptp_probe(struct pci_dev *pdev,
518 		     const struct pci_device_id *ent)
519 {
520 	struct ptp *ptp;
521 	int err;
522 
523 	ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
524 	if (!ptp) {
525 		err = -ENOMEM;
526 		goto error;
527 	}
528 
529 	ptp->pdev = pdev;
530 
531 	err = pcim_enable_device(pdev);
532 	if (err)
533 		goto error_free;
534 
535 	err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
536 	if (err)
537 		goto error_free;
538 
539 	ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
540 
541 	pci_set_drvdata(pdev, ptp);
542 	if (!first_ptp_block)
543 		first_ptp_block = ptp;
544 
545 	spin_lock_init(&ptp->ptp_lock);
546 	if (cn10k_ptp_errata(ptp)) {
547 		ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
548 		hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
549 		ptp->hrtimer.function = ptp_reset_thresh;
550 	} else {
551 		ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
552 	}
553 
554 	return 0;
555 
556 error_free:
557 	kfree(ptp);
558 
559 error:
560 	/* For `ptp_get()` we need to differentiate between the case
561 	 * when the core has not tried to probe this device and the case when
562 	 * the probe failed.  In the later case we keep the error in
563 	 * `dev->driver_data`.
564 	 */
565 	pci_set_drvdata(pdev, ERR_PTR(err));
566 	if (!first_ptp_block)
567 		first_ptp_block = ERR_PTR(err);
568 
569 	return err;
570 }
571 
572 static void ptp_remove(struct pci_dev *pdev)
573 {
574 	struct ptp *ptp = pci_get_drvdata(pdev);
575 	u64 clock_cfg;
576 
577 	if (IS_ERR_OR_NULL(ptp))
578 		return;
579 
580 	if (cn10k_ptp_errata(ptp) && hrtimer_active(&ptp->hrtimer))
581 		hrtimer_cancel(&ptp->hrtimer);
582 
583 	/* Disable PTP clock */
584 	clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
585 	clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
586 	writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
587 	kfree(ptp);
588 }
589 
590 static const struct pci_device_id ptp_id_table[] = {
591 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
592 			 PCI_VENDOR_ID_CAVIUM,
593 			 PCI_SUBSYS_DEVID_OCTX2_98xx_PTP) },
594 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
595 			 PCI_VENDOR_ID_CAVIUM,
596 			 PCI_SUBSYS_DEVID_OCTX2_96XX_PTP) },
597 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
598 			 PCI_VENDOR_ID_CAVIUM,
599 			 PCI_SUBSYS_DEVID_OCTX2_95XX_PTP) },
600 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
601 			 PCI_VENDOR_ID_CAVIUM,
602 			 PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP) },
603 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
604 			 PCI_VENDOR_ID_CAVIUM,
605 			 PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) },
606 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
607 			 PCI_VENDOR_ID_CAVIUM,
608 			 PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP) },
609 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_PTP) },
610 	{ 0, }
611 };
612 
613 struct pci_driver ptp_driver = {
614 	.name = DRV_NAME,
615 	.id_table = ptp_id_table,
616 	.probe = ptp_probe,
617 	.remove = ptp_remove,
618 };
619 
620 int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
621 			    struct ptp_rsp *rsp)
622 {
623 	int err = 0;
624 
625 	/* This function is the PTP mailbox handler invoked when
626 	 * called by AF consumers/netdev drivers via mailbox mechanism.
627 	 * It is used by netdev driver to get the PTP clock and to set
628 	 * frequency adjustments. Since mailbox can be called without
629 	 * notion of whether the driver is bound to ptp device below
630 	 * validation is needed as first step.
631 	 */
632 	if (!rvu->ptp)
633 		return -ENODEV;
634 
635 	switch (req->op) {
636 	case PTP_OP_ADJFINE:
637 		err = ptp_adjfine(rvu->ptp, req->scaled_ppm);
638 		break;
639 	case PTP_OP_GET_CLOCK:
640 		err = ptp_get_clock(rvu->ptp, &rsp->clk);
641 		break;
642 	case PTP_OP_GET_TSTMP:
643 		err = ptp_get_tstmp(rvu->ptp, &rsp->clk);
644 		break;
645 	case PTP_OP_SET_THRESH:
646 		err = ptp_set_thresh(rvu->ptp, req->thresh);
647 		break;
648 	case PTP_OP_PPS_ON:
649 		err = ptp_pps_on(rvu->ptp, req->pps_on, req->period);
650 		break;
651 	case PTP_OP_ADJTIME:
652 		ptp_atomic_adjtime(rvu->ptp, req->delta);
653 		break;
654 	case PTP_OP_SET_CLOCK:
655 		ptp_atomic_update(rvu->ptp, (u64)req->clk);
656 		break;
657 	default:
658 		err = -EINVAL;
659 		break;
660 	}
661 
662 	return err;
663 }
664 
665 int rvu_mbox_handler_ptp_get_cap(struct rvu *rvu, struct msg_req *req,
666 				 struct ptp_get_cap_rsp *rsp)
667 {
668 	if (!rvu->ptp)
669 		return -ENODEV;
670 
671 	if (is_tstmp_atomic_update_supported(rvu))
672 		rsp->cap |= PTP_CAP_HW_ATOMIC_UPDATE;
673 	else
674 		rsp->cap &= ~BIT_ULL_MASK(0);
675 
676 	return 0;
677 }
678