xref: /linux/drivers/ptp/ptp_vclock.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * PTP virtual clock driver
4  *
5  * Copyright 2021 NXP
6  */
7 #include <linux/slab.h>
8 #include <linux/hashtable.h>
9 #include "ptp_private.h"
10 
11 #define PTP_VCLOCK_CC_SHIFT		31
12 #define PTP_VCLOCK_CC_MULT		(1 << PTP_VCLOCK_CC_SHIFT)
13 #define PTP_VCLOCK_FADJ_SHIFT		9
14 #define PTP_VCLOCK_FADJ_DENOMINATOR	15625ULL
15 #define PTP_VCLOCK_REFRESH_INTERVAL	(HZ * 2)
16 
17 /* protects vclock_hash addition/deletion */
18 static DEFINE_SPINLOCK(vclock_hash_lock);
19 
20 static DEFINE_READ_MOSTLY_HASHTABLE(vclock_hash, 8);
21 
22 static void ptp_vclock_hash_add(struct ptp_vclock *vclock)
23 {
24 	spin_lock(&vclock_hash_lock);
25 
26 	hlist_add_head_rcu(&vclock->vclock_hash_node,
27 			   &vclock_hash[vclock->clock->index % HASH_SIZE(vclock_hash)]);
28 
29 	spin_unlock(&vclock_hash_lock);
30 }
31 
32 static void ptp_vclock_hash_del(struct ptp_vclock *vclock)
33 {
34 	spin_lock(&vclock_hash_lock);
35 
36 	hlist_del_init_rcu(&vclock->vclock_hash_node);
37 
38 	spin_unlock(&vclock_hash_lock);
39 
40 	synchronize_rcu();
41 }
42 
43 static int ptp_vclock_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
44 {
45 	struct ptp_vclock *vclock = info_to_vclock(ptp);
46 	unsigned long flags;
47 	s64 adj;
48 
49 	adj = (s64)scaled_ppm << PTP_VCLOCK_FADJ_SHIFT;
50 	adj = div_s64(adj, PTP_VCLOCK_FADJ_DENOMINATOR);
51 
52 	spin_lock_irqsave(&vclock->lock, flags);
53 	timecounter_read(&vclock->tc);
54 	vclock->cc.mult = PTP_VCLOCK_CC_MULT + adj;
55 	spin_unlock_irqrestore(&vclock->lock, flags);
56 
57 	return 0;
58 }
59 
60 static int ptp_vclock_adjtime(struct ptp_clock_info *ptp, s64 delta)
61 {
62 	struct ptp_vclock *vclock = info_to_vclock(ptp);
63 	unsigned long flags;
64 
65 	spin_lock_irqsave(&vclock->lock, flags);
66 	timecounter_adjtime(&vclock->tc, delta);
67 	spin_unlock_irqrestore(&vclock->lock, flags);
68 
69 	return 0;
70 }
71 
72 static int ptp_vclock_gettime(struct ptp_clock_info *ptp,
73 			      struct timespec64 *ts)
74 {
75 	struct ptp_vclock *vclock = info_to_vclock(ptp);
76 	unsigned long flags;
77 	u64 ns;
78 
79 	spin_lock_irqsave(&vclock->lock, flags);
80 	ns = timecounter_read(&vclock->tc);
81 	spin_unlock_irqrestore(&vclock->lock, flags);
82 	*ts = ns_to_timespec64(ns);
83 
84 	return 0;
85 }
86 
87 static int ptp_vclock_gettimex(struct ptp_clock_info *ptp,
88 			       struct timespec64 *ts,
89 			       struct ptp_system_timestamp *sts)
90 {
91 	struct ptp_vclock *vclock = info_to_vclock(ptp);
92 	struct ptp_clock *pptp = vclock->pclock;
93 	struct timespec64 pts;
94 	unsigned long flags;
95 	int err;
96 	u64 ns;
97 
98 	err = pptp->info->getcyclesx64(pptp->info, &pts, sts);
99 	if (err)
100 		return err;
101 
102 	spin_lock_irqsave(&vclock->lock, flags);
103 	ns = timecounter_cyc2time(&vclock->tc, timespec64_to_ns(&pts));
104 	spin_unlock_irqrestore(&vclock->lock, flags);
105 
106 	*ts = ns_to_timespec64(ns);
107 
108 	return 0;
109 }
110 
111 static int ptp_vclock_settime(struct ptp_clock_info *ptp,
112 			      const struct timespec64 *ts)
113 {
114 	struct ptp_vclock *vclock = info_to_vclock(ptp);
115 	u64 ns = timespec64_to_ns(ts);
116 	unsigned long flags;
117 
118 	spin_lock_irqsave(&vclock->lock, flags);
119 	timecounter_init(&vclock->tc, &vclock->cc, ns);
120 	spin_unlock_irqrestore(&vclock->lock, flags);
121 
122 	return 0;
123 }
124 
125 static int ptp_vclock_getcrosststamp(struct ptp_clock_info *ptp,
126 				     struct system_device_crosststamp *xtstamp)
127 {
128 	struct ptp_vclock *vclock = info_to_vclock(ptp);
129 	struct ptp_clock *pptp = vclock->pclock;
130 	unsigned long flags;
131 	int err;
132 	u64 ns;
133 
134 	err = pptp->info->getcrosscycles(pptp->info, xtstamp);
135 	if (err)
136 		return err;
137 
138 	spin_lock_irqsave(&vclock->lock, flags);
139 	ns = timecounter_cyc2time(&vclock->tc, ktime_to_ns(xtstamp->device));
140 	spin_unlock_irqrestore(&vclock->lock, flags);
141 
142 	xtstamp->device = ns_to_ktime(ns);
143 
144 	return 0;
145 }
146 
147 static long ptp_vclock_refresh(struct ptp_clock_info *ptp)
148 {
149 	struct ptp_vclock *vclock = info_to_vclock(ptp);
150 	struct timespec64 ts;
151 
152 	ptp_vclock_gettime(&vclock->info, &ts);
153 
154 	return PTP_VCLOCK_REFRESH_INTERVAL;
155 }
156 
157 static const struct ptp_clock_info ptp_vclock_info = {
158 	.owner		= THIS_MODULE,
159 	.name		= "ptp virtual clock",
160 	.max_adj	= 500000000,
161 	.adjfine	= ptp_vclock_adjfine,
162 	.adjtime	= ptp_vclock_adjtime,
163 	.settime64	= ptp_vclock_settime,
164 	.do_aux_work	= ptp_vclock_refresh,
165 };
166 
167 static u64 ptp_vclock_read(const struct cyclecounter *cc)
168 {
169 	struct ptp_vclock *vclock = cc_to_vclock(cc);
170 	struct ptp_clock *ptp = vclock->pclock;
171 	struct timespec64 ts = {};
172 
173 	ptp->info->getcycles64(ptp->info, &ts);
174 
175 	return timespec64_to_ns(&ts);
176 }
177 
178 static const struct cyclecounter ptp_vclock_cc = {
179 	.read	= ptp_vclock_read,
180 	.mask	= CYCLECOUNTER_MASK(32),
181 	.mult	= PTP_VCLOCK_CC_MULT,
182 	.shift	= PTP_VCLOCK_CC_SHIFT,
183 };
184 
185 struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock)
186 {
187 	struct ptp_vclock *vclock;
188 
189 	vclock = kzalloc(sizeof(*vclock), GFP_KERNEL);
190 	if (!vclock)
191 		return NULL;
192 
193 	vclock->pclock = pclock;
194 	vclock->info = ptp_vclock_info;
195 	if (pclock->info->getcyclesx64)
196 		vclock->info.gettimex64 = ptp_vclock_gettimex;
197 	else
198 		vclock->info.gettime64 = ptp_vclock_gettime;
199 	if (pclock->info->getcrosscycles)
200 		vclock->info.getcrosststamp = ptp_vclock_getcrosststamp;
201 	vclock->cc = ptp_vclock_cc;
202 
203 	snprintf(vclock->info.name, PTP_CLOCK_NAME_LEN, "ptp%d_virt",
204 		 pclock->index);
205 
206 	INIT_HLIST_NODE(&vclock->vclock_hash_node);
207 
208 	spin_lock_init(&vclock->lock);
209 
210 	vclock->clock = ptp_clock_register(&vclock->info, &pclock->dev);
211 	if (IS_ERR_OR_NULL(vclock->clock)) {
212 		kfree(vclock);
213 		return NULL;
214 	}
215 
216 	timecounter_init(&vclock->tc, &vclock->cc, 0);
217 	ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL);
218 
219 	ptp_vclock_hash_add(vclock);
220 
221 	return vclock;
222 }
223 
224 void ptp_vclock_unregister(struct ptp_vclock *vclock)
225 {
226 	ptp_vclock_hash_del(vclock);
227 
228 	ptp_clock_unregister(vclock->clock);
229 	kfree(vclock);
230 }
231 
232 #if IS_BUILTIN(CONFIG_PTP_1588_CLOCK)
233 int ptp_get_vclocks_index(int pclock_index, int **vclock_index)
234 {
235 	char name[PTP_CLOCK_NAME_LEN] = "";
236 	struct ptp_clock *ptp;
237 	struct device *dev;
238 	int num = 0;
239 
240 	if (pclock_index < 0)
241 		return num;
242 
243 	snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", pclock_index);
244 	dev = class_find_device_by_name(ptp_class, name);
245 	if (!dev)
246 		return num;
247 
248 	ptp = dev_get_drvdata(dev);
249 
250 	if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) {
251 		put_device(dev);
252 		return num;
253 	}
254 
255 	*vclock_index = kzalloc(sizeof(int) * ptp->n_vclocks, GFP_KERNEL);
256 	if (!(*vclock_index))
257 		goto out;
258 
259 	memcpy(*vclock_index, ptp->vclock_index, sizeof(int) * ptp->n_vclocks);
260 	num = ptp->n_vclocks;
261 out:
262 	mutex_unlock(&ptp->n_vclocks_mux);
263 	put_device(dev);
264 	return num;
265 }
266 EXPORT_SYMBOL(ptp_get_vclocks_index);
267 
268 ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index)
269 {
270 	unsigned int hash = vclock_index % HASH_SIZE(vclock_hash);
271 	struct ptp_vclock *vclock;
272 	unsigned long flags;
273 	u64 ns;
274 	u64 vclock_ns = 0;
275 
276 	ns = ktime_to_ns(*hwtstamp);
277 
278 	rcu_read_lock();
279 
280 	hlist_for_each_entry_rcu(vclock, &vclock_hash[hash], vclock_hash_node) {
281 		if (vclock->clock->index != vclock_index)
282 			continue;
283 
284 		spin_lock_irqsave(&vclock->lock, flags);
285 		vclock_ns = timecounter_cyc2time(&vclock->tc, ns);
286 		spin_unlock_irqrestore(&vclock->lock, flags);
287 		break;
288 	}
289 
290 	rcu_read_unlock();
291 
292 	return ns_to_ktime(vclock_ns);
293 }
294 EXPORT_SYMBOL(ptp_convert_timestamp);
295 #endif
296