xref: /linux/drivers/pci/pcie/ptm.c (revision fa33adcaf8af147f4238c84d76a316a47e43e091)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI Express Precision Time Measurement
4  * Copyright (c) 2016, Intel Corporation.
5  */
6 
7 #include <linux/bitfield.h>
8 #include <linux/debugfs.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/pci.h>
12 #include "../pci.h"
13 
14 /*
15  * If the next upstream device supports PTM, return it; otherwise return
16  * NULL.  PTM Messages are local, so both link partners must support it.
17  */
pci_upstream_ptm(struct pci_dev * dev)18 static struct pci_dev *pci_upstream_ptm(struct pci_dev *dev)
19 {
20 	struct pci_dev *ups = pci_upstream_bridge(dev);
21 
22 	/*
23 	 * Switch Downstream Ports are not permitted to have a PTM
24 	 * capability; their PTM behavior is controlled by the Upstream
25 	 * Port (PCIe r5.0, sec 7.9.16), so if the upstream bridge is a
26 	 * Switch Downstream Port, look up one more level.
27 	 */
28 	if (ups && pci_pcie_type(ups) == PCI_EXP_TYPE_DOWNSTREAM)
29 		ups = pci_upstream_bridge(ups);
30 
31 	if (ups && ups->ptm_cap)
32 		return ups;
33 
34 	return NULL;
35 }
36 
37 /*
38  * Find the PTM Capability (if present) and extract the information we need
39  * to use it.
40  */
pci_ptm_init(struct pci_dev * dev)41 void pci_ptm_init(struct pci_dev *dev)
42 {
43 	u16 ptm;
44 	u32 cap;
45 	struct pci_dev *ups;
46 
47 	if (!pci_is_pcie(dev))
48 		return;
49 
50 	ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
51 	if (!ptm)
52 		return;
53 
54 	dev->ptm_cap = ptm;
55 	pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_PTM, sizeof(u32));
56 
57 	pci_read_config_dword(dev, ptm + PCI_PTM_CAP, &cap);
58 	dev->ptm_granularity = FIELD_GET(PCI_PTM_GRANULARITY_MASK, cap);
59 
60 	/*
61 	 * Per the spec recommendation (PCIe r6.0, sec 7.9.15.3), select the
62 	 * furthest upstream Time Source as the PTM Root.  For Endpoints,
63 	 * "the Effective Granularity is the maximum Local Clock Granularity
64 	 * reported by the PTM Root and all intervening PTM Time Sources."
65 	 */
66 	ups = pci_upstream_ptm(dev);
67 	if (ups) {
68 		if (ups->ptm_granularity == 0)
69 			dev->ptm_granularity = 0;
70 		else if (ups->ptm_granularity > dev->ptm_granularity)
71 			dev->ptm_granularity = ups->ptm_granularity;
72 	} else if (cap & PCI_PTM_CAP_ROOT) {
73 		dev->ptm_root = 1;
74 	} else if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
75 
76 		/*
77 		 * Per sec 7.9.15.3, this should be the Local Clock
78 		 * Granularity of the associated Time Source.  But it
79 		 * doesn't say how to find that Time Source.
80 		 */
81 		dev->ptm_granularity = 0;
82 	}
83 
84 	if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
85 	    pci_pcie_type(dev) == PCI_EXP_TYPE_UPSTREAM)
86 		pci_enable_ptm(dev, NULL);
87 }
88 
pci_save_ptm_state(struct pci_dev * dev)89 void pci_save_ptm_state(struct pci_dev *dev)
90 {
91 	u16 ptm = dev->ptm_cap;
92 	struct pci_cap_saved_state *save_state;
93 	u32 *cap;
94 
95 	if (!ptm)
96 		return;
97 
98 	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
99 	if (!save_state)
100 		return;
101 
102 	cap = (u32 *)&save_state->cap.data[0];
103 	pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, cap);
104 }
105 
pci_restore_ptm_state(struct pci_dev * dev)106 void pci_restore_ptm_state(struct pci_dev *dev)
107 {
108 	u16 ptm = dev->ptm_cap;
109 	struct pci_cap_saved_state *save_state;
110 	u32 *cap;
111 
112 	if (!ptm)
113 		return;
114 
115 	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
116 	if (!save_state)
117 		return;
118 
119 	cap = (u32 *)&save_state->cap.data[0];
120 	pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, *cap);
121 }
122 
123 /* Enable PTM in the Control register if possible */
__pci_enable_ptm(struct pci_dev * dev)124 static int __pci_enable_ptm(struct pci_dev *dev)
125 {
126 	u16 ptm = dev->ptm_cap;
127 	struct pci_dev *ups;
128 	u32 ctrl;
129 
130 	if (!ptm)
131 		return -EINVAL;
132 
133 	/*
134 	 * A device uses local PTM Messages to request time information
135 	 * from a PTM Root that's farther upstream.  Every device along the
136 	 * path must support PTM and have it enabled so it can handle the
137 	 * messages.  Therefore, if this device is not a PTM Root, the
138 	 * upstream link partner must have PTM enabled before we can enable
139 	 * PTM.
140 	 */
141 	if (!dev->ptm_root) {
142 		ups = pci_upstream_ptm(dev);
143 		if (!ups || !ups->ptm_enabled)
144 			return -EINVAL;
145 	}
146 
147 	pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, &ctrl);
148 
149 	ctrl |= PCI_PTM_CTRL_ENABLE;
150 	ctrl &= ~PCI_PTM_GRANULARITY_MASK;
151 	ctrl |= FIELD_PREP(PCI_PTM_GRANULARITY_MASK, dev->ptm_granularity);
152 	if (dev->ptm_root)
153 		ctrl |= PCI_PTM_CTRL_ROOT;
154 
155 	pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, ctrl);
156 	return 0;
157 }
158 
159 /**
160  * pci_enable_ptm() - Enable Precision Time Measurement
161  * @dev: PCI device
162  * @granularity: pointer to return granularity
163  *
164  * Enable Precision Time Measurement for @dev.  If successful and
165  * @granularity is non-NULL, return the Effective Granularity.
166  *
167  * Return: zero if successful, or -EINVAL if @dev lacks a PTM Capability or
168  * is not a PTM Root and lacks an upstream path of PTM-enabled devices.
169  */
pci_enable_ptm(struct pci_dev * dev,u8 * granularity)170 int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
171 {
172 	int rc;
173 	char clock_desc[8];
174 
175 	rc = __pci_enable_ptm(dev);
176 	if (rc)
177 		return rc;
178 
179 	dev->ptm_enabled = 1;
180 
181 	if (granularity)
182 		*granularity = dev->ptm_granularity;
183 
184 	switch (dev->ptm_granularity) {
185 	case 0:
186 		snprintf(clock_desc, sizeof(clock_desc), "unknown");
187 		break;
188 	case 255:
189 		snprintf(clock_desc, sizeof(clock_desc), ">254ns");
190 		break;
191 	default:
192 		snprintf(clock_desc, sizeof(clock_desc), "%uns",
193 			 dev->ptm_granularity);
194 		break;
195 	}
196 	pci_info(dev, "PTM enabled%s, %s granularity\n",
197 		 dev->ptm_root ? " (root)" : "", clock_desc);
198 
199 	return 0;
200 }
201 EXPORT_SYMBOL(pci_enable_ptm);
202 
__pci_disable_ptm(struct pci_dev * dev)203 static void __pci_disable_ptm(struct pci_dev *dev)
204 {
205 	u16 ptm = dev->ptm_cap;
206 	u32 ctrl;
207 
208 	if (!ptm)
209 		return;
210 
211 	pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, &ctrl);
212 	ctrl &= ~(PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT);
213 	pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, ctrl);
214 }
215 
216 /**
217  * pci_disable_ptm() - Disable Precision Time Measurement
218  * @dev: PCI device
219  *
220  * Disable Precision Time Measurement for @dev.
221  */
pci_disable_ptm(struct pci_dev * dev)222 void pci_disable_ptm(struct pci_dev *dev)
223 {
224 	if (dev->ptm_enabled) {
225 		__pci_disable_ptm(dev);
226 		dev->ptm_enabled = 0;
227 	}
228 }
229 EXPORT_SYMBOL(pci_disable_ptm);
230 
231 /*
232  * Disable PTM, but preserve dev->ptm_enabled so we silently re-enable it on
233  * resume if necessary.
234  */
pci_suspend_ptm(struct pci_dev * dev)235 void pci_suspend_ptm(struct pci_dev *dev)
236 {
237 	if (dev->ptm_enabled)
238 		__pci_disable_ptm(dev);
239 }
240 
241 /* If PTM was enabled before suspend, re-enable it when resuming */
pci_resume_ptm(struct pci_dev * dev)242 void pci_resume_ptm(struct pci_dev *dev)
243 {
244 	if (dev->ptm_enabled)
245 		__pci_enable_ptm(dev);
246 }
247 
pcie_ptm_enabled(struct pci_dev * dev)248 bool pcie_ptm_enabled(struct pci_dev *dev)
249 {
250 	if (!dev)
251 		return false;
252 
253 	return dev->ptm_enabled;
254 }
255 EXPORT_SYMBOL(pcie_ptm_enabled);
256 
257 #if IS_ENABLED(CONFIG_DEBUG_FS)
context_update_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)258 static ssize_t context_update_write(struct file *file, const char __user *ubuf,
259 			     size_t count, loff_t *ppos)
260 {
261 	struct pci_ptm_debugfs *ptm_debugfs = file->private_data;
262 	char buf[7];
263 	int ret;
264 	u8 mode;
265 
266 	if (!ptm_debugfs->ops->context_update_write)
267 		return -EOPNOTSUPP;
268 
269 	if (count < 1 || count >= sizeof(buf))
270 		return -EINVAL;
271 
272 	ret = copy_from_user(buf, ubuf, count);
273 	if (ret)
274 		return -EFAULT;
275 
276 	buf[count] = '\0';
277 
278 	if (sysfs_streq(buf, "auto"))
279 		mode = PCIE_PTM_CONTEXT_UPDATE_AUTO;
280 	else if (sysfs_streq(buf, "manual"))
281 		mode = PCIE_PTM_CONTEXT_UPDATE_MANUAL;
282 	else
283 		return -EINVAL;
284 
285 	mutex_lock(&ptm_debugfs->lock);
286 	ret = ptm_debugfs->ops->context_update_write(ptm_debugfs->pdata, mode);
287 	mutex_unlock(&ptm_debugfs->lock);
288 	if (ret)
289 		return ret;
290 
291 	return count;
292 }
293 
context_update_read(struct file * file,char __user * ubuf,size_t count,loff_t * ppos)294 static ssize_t context_update_read(struct file *file, char __user *ubuf,
295 			     size_t count, loff_t *ppos)
296 {
297 	struct pci_ptm_debugfs *ptm_debugfs = file->private_data;
298 	char buf[8]; /* Extra space for NULL termination at the end */
299 	ssize_t pos;
300 	u8 mode;
301 
302 	if (!ptm_debugfs->ops->context_update_read)
303 		return -EOPNOTSUPP;
304 
305 	mutex_lock(&ptm_debugfs->lock);
306 	ptm_debugfs->ops->context_update_read(ptm_debugfs->pdata, &mode);
307 	mutex_unlock(&ptm_debugfs->lock);
308 
309 	if (mode == PCIE_PTM_CONTEXT_UPDATE_AUTO)
310 		pos = scnprintf(buf, sizeof(buf), "auto\n");
311 	else
312 		pos = scnprintf(buf, sizeof(buf), "manual\n");
313 
314 	return simple_read_from_buffer(ubuf, count, ppos, buf, pos);
315 }
316 
317 static const struct file_operations context_update_fops = {
318 	.open = simple_open,
319 	.read = context_update_read,
320 	.write = context_update_write,
321 };
322 
context_valid_get(void * data,u64 * val)323 static int context_valid_get(void *data, u64 *val)
324 {
325 	struct pci_ptm_debugfs *ptm_debugfs = data;
326 	bool valid;
327 	int ret;
328 
329 	if (!ptm_debugfs->ops->context_valid_read)
330 		return -EOPNOTSUPP;
331 
332 	mutex_lock(&ptm_debugfs->lock);
333 	ret = ptm_debugfs->ops->context_valid_read(ptm_debugfs->pdata, &valid);
334 	mutex_unlock(&ptm_debugfs->lock);
335 	if (ret)
336 		return ret;
337 
338 	*val = valid;
339 
340 	return 0;
341 }
342 
context_valid_set(void * data,u64 val)343 static int context_valid_set(void *data, u64 val)
344 {
345 	struct pci_ptm_debugfs *ptm_debugfs = data;
346 	int ret;
347 
348 	if (!ptm_debugfs->ops->context_valid_write)
349 		return -EOPNOTSUPP;
350 
351 	mutex_lock(&ptm_debugfs->lock);
352 	ret = ptm_debugfs->ops->context_valid_write(ptm_debugfs->pdata, !!val);
353 	mutex_unlock(&ptm_debugfs->lock);
354 
355 	return ret;
356 }
357 
358 DEFINE_DEBUGFS_ATTRIBUTE(context_valid_fops, context_valid_get,
359 			 context_valid_set, "%llu\n");
360 
local_clock_get(void * data,u64 * val)361 static int local_clock_get(void *data, u64 *val)
362 {
363 	struct pci_ptm_debugfs *ptm_debugfs = data;
364 	u64 clock;
365 	int ret;
366 
367 	if (!ptm_debugfs->ops->local_clock_read)
368 		return -EOPNOTSUPP;
369 
370 	ret = ptm_debugfs->ops->local_clock_read(ptm_debugfs->pdata, &clock);
371 	if (ret)
372 		return ret;
373 
374 	*val = clock;
375 
376 	return 0;
377 }
378 
379 DEFINE_DEBUGFS_ATTRIBUTE(local_clock_fops, local_clock_get, NULL, "%llu\n");
380 
master_clock_get(void * data,u64 * val)381 static int master_clock_get(void *data, u64 *val)
382 {
383 	struct pci_ptm_debugfs *ptm_debugfs = data;
384 	u64 clock;
385 	int ret;
386 
387 	if (!ptm_debugfs->ops->master_clock_read)
388 		return -EOPNOTSUPP;
389 
390 	ret = ptm_debugfs->ops->master_clock_read(ptm_debugfs->pdata, &clock);
391 	if (ret)
392 		return ret;
393 
394 	*val = clock;
395 
396 	return 0;
397 }
398 
399 DEFINE_DEBUGFS_ATTRIBUTE(master_clock_fops, master_clock_get, NULL, "%llu\n");
400 
t1_get(void * data,u64 * val)401 static int t1_get(void *data, u64 *val)
402 {
403 	struct pci_ptm_debugfs *ptm_debugfs = data;
404 	u64 clock;
405 	int ret;
406 
407 	if (!ptm_debugfs->ops->t1_read)
408 		return -EOPNOTSUPP;
409 
410 	ret = ptm_debugfs->ops->t1_read(ptm_debugfs->pdata, &clock);
411 	if (ret)
412 		return ret;
413 
414 	*val = clock;
415 
416 	return 0;
417 }
418 
419 DEFINE_DEBUGFS_ATTRIBUTE(t1_fops, t1_get, NULL, "%llu\n");
420 
t2_get(void * data,u64 * val)421 static int t2_get(void *data, u64 *val)
422 {
423 	struct pci_ptm_debugfs *ptm_debugfs = data;
424 	u64 clock;
425 	int ret;
426 
427 	if (!ptm_debugfs->ops->t2_read)
428 		return -EOPNOTSUPP;
429 
430 	ret = ptm_debugfs->ops->t2_read(ptm_debugfs->pdata, &clock);
431 	if (ret)
432 		return ret;
433 
434 	*val = clock;
435 
436 	return 0;
437 }
438 
439 DEFINE_DEBUGFS_ATTRIBUTE(t2_fops, t2_get, NULL, "%llu\n");
440 
t3_get(void * data,u64 * val)441 static int t3_get(void *data, u64 *val)
442 {
443 	struct pci_ptm_debugfs *ptm_debugfs = data;
444 	u64 clock;
445 	int ret;
446 
447 	if (!ptm_debugfs->ops->t3_read)
448 		return -EOPNOTSUPP;
449 
450 	ret = ptm_debugfs->ops->t3_read(ptm_debugfs->pdata, &clock);
451 	if (ret)
452 		return ret;
453 
454 	*val = clock;
455 
456 	return 0;
457 }
458 
459 DEFINE_DEBUGFS_ATTRIBUTE(t3_fops, t3_get, NULL, "%llu\n");
460 
t4_get(void * data,u64 * val)461 static int t4_get(void *data, u64 *val)
462 {
463 	struct pci_ptm_debugfs *ptm_debugfs = data;
464 	u64 clock;
465 	int ret;
466 
467 	if (!ptm_debugfs->ops->t4_read)
468 		return -EOPNOTSUPP;
469 
470 	ret = ptm_debugfs->ops->t4_read(ptm_debugfs->pdata, &clock);
471 	if (ret)
472 		return ret;
473 
474 	*val = clock;
475 
476 	return 0;
477 }
478 
479 DEFINE_DEBUGFS_ATTRIBUTE(t4_fops, t4_get, NULL, "%llu\n");
480 
481 #define pcie_ptm_create_debugfs_file(pdata, mode, attr)			\
482 	do {								\
483 		if (ops->attr##_visible && ops->attr##_visible(pdata))	\
484 			debugfs_create_file(#attr, mode, ptm_debugfs->debugfs, \
485 					    ptm_debugfs, &attr##_fops);	\
486 	} while (0)
487 
488 /*
489  * pcie_ptm_create_debugfs() - Create debugfs entries for the PTM context
490  * @dev: PTM capable component device
491  * @pdata: Private data of the PTM capable component device
492  * @ops: PTM callback structure
493  *
494  * Create debugfs entries for exposing the PTM context of the PTM capable
495  * components such as Root Complex and Endpoint controllers.
496  *
497  * Return: Pointer to 'struct pci_ptm_debugfs' if success, NULL otherwise.
498  */
pcie_ptm_create_debugfs(struct device * dev,void * pdata,const struct pcie_ptm_ops * ops)499 struct pci_ptm_debugfs *pcie_ptm_create_debugfs(struct device *dev, void *pdata,
500 			  const struct pcie_ptm_ops *ops)
501 {
502 	struct pci_ptm_debugfs *ptm_debugfs;
503 	char *dirname;
504 	int ret;
505 
506 	/* Caller must provide check_capability() callback */
507 	if (!ops->check_capability)
508 		return NULL;
509 
510 	/* Check for PTM capability before creating debugfs attrbutes */
511 	ret = ops->check_capability(pdata);
512 	if (!ret) {
513 		dev_dbg(dev, "PTM capability not present\n");
514 		return NULL;
515 	}
516 
517 	ptm_debugfs = kzalloc(sizeof(*ptm_debugfs), GFP_KERNEL);
518 	if (!ptm_debugfs)
519 		return NULL;
520 
521 	dirname = devm_kasprintf(dev, GFP_KERNEL, "pcie_ptm_%s", dev_name(dev));
522 	if (!dirname)
523 		return NULL;
524 
525 	ptm_debugfs->debugfs = debugfs_create_dir(dirname, NULL);
526 	ptm_debugfs->pdata = pdata;
527 	ptm_debugfs->ops = ops;
528 	mutex_init(&ptm_debugfs->lock);
529 
530 	pcie_ptm_create_debugfs_file(pdata, 0644, context_update);
531 	pcie_ptm_create_debugfs_file(pdata, 0644, context_valid);
532 	pcie_ptm_create_debugfs_file(pdata, 0444, local_clock);
533 	pcie_ptm_create_debugfs_file(pdata, 0444, master_clock);
534 	pcie_ptm_create_debugfs_file(pdata, 0444, t1);
535 	pcie_ptm_create_debugfs_file(pdata, 0444, t2);
536 	pcie_ptm_create_debugfs_file(pdata, 0444, t3);
537 	pcie_ptm_create_debugfs_file(pdata, 0444, t4);
538 
539 	return ptm_debugfs;
540 }
541 EXPORT_SYMBOL_GPL(pcie_ptm_create_debugfs);
542 
543 /*
544  * pcie_ptm_destroy_debugfs() - Destroy debugfs entries for the PTM context
545  * @ptm_debugfs: Pointer to the PTM debugfs struct
546  */
pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs * ptm_debugfs)547 void pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs)
548 {
549 	if (!ptm_debugfs)
550 		return;
551 
552 	mutex_destroy(&ptm_debugfs->lock);
553 	debugfs_remove_recursive(ptm_debugfs->debugfs);
554 }
555 EXPORT_SYMBOL_GPL(pcie_ptm_destroy_debugfs);
556 #endif
557