xref: /linux/drivers/pci/pcie/ptm.c (revision 43dfc13ca972988e620a6edb72956981b75ab6b0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI Express Precision Time Measurement
4  * Copyright (c) 2016, Intel Corporation.
5  */
6 
7 #include <linux/bitfield.h>
8 #include <linux/debugfs.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/pci.h>
12 #include "../pci.h"
13 
14 /*
15  * If the next upstream device supports PTM, return it; otherwise return
16  * NULL.  PTM Messages are local, so both link partners must support it.
17  */
18 static struct pci_dev *pci_upstream_ptm(struct pci_dev *dev)
19 {
20 	struct pci_dev *ups = pci_upstream_bridge(dev);
21 
22 	/*
23 	 * Switch Downstream Ports are not permitted to have a PTM
24 	 * capability; their PTM behavior is controlled by the Upstream
25 	 * Port (PCIe r5.0, sec 7.9.16), so if the upstream bridge is a
26 	 * Switch Downstream Port, look up one more level.
27 	 */
28 	if (ups && pci_pcie_type(ups) == PCI_EXP_TYPE_DOWNSTREAM)
29 		ups = pci_upstream_bridge(ups);
30 
31 	if (ups && ups->ptm_cap)
32 		return ups;
33 
34 	return NULL;
35 }
36 
37 /*
38  * Find the PTM Capability (if present) and extract the information we need
39  * to use it.
40  */
41 void pci_ptm_init(struct pci_dev *dev)
42 {
43 	u16 ptm;
44 	u32 cap;
45 	struct pci_dev *ups;
46 
47 	if (!pci_is_pcie(dev))
48 		return;
49 
50 	ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
51 	if (!ptm)
52 		return;
53 
54 	dev->ptm_cap = ptm;
55 	pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_PTM, sizeof(u32));
56 
57 	pci_read_config_dword(dev, ptm + PCI_PTM_CAP, &cap);
58 	dev->ptm_granularity = FIELD_GET(PCI_PTM_GRANULARITY_MASK, cap);
59 
60 	/*
61 	 * Per the spec recommendation (PCIe r6.0, sec 7.9.15.3), select the
62 	 * furthest upstream Time Source as the PTM Root.  For Endpoints,
63 	 * "the Effective Granularity is the maximum Local Clock Granularity
64 	 * reported by the PTM Root and all intervening PTM Time Sources."
65 	 */
66 	ups = pci_upstream_ptm(dev);
67 	if (ups) {
68 		if (ups->ptm_granularity == 0)
69 			dev->ptm_granularity = 0;
70 		else if (ups->ptm_granularity > dev->ptm_granularity)
71 			dev->ptm_granularity = ups->ptm_granularity;
72 	} else if (cap & PCI_PTM_CAP_ROOT) {
73 		dev->ptm_root = 1;
74 	} else if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
75 
76 		/*
77 		 * Per sec 7.9.15.3, this should be the Local Clock
78 		 * Granularity of the associated Time Source.  But it
79 		 * doesn't say how to find that Time Source.
80 		 */
81 		dev->ptm_granularity = 0;
82 	}
83 
84 	if (cap & PCI_PTM_CAP_RES)
85 		dev->ptm_responder = 1;
86 	if (cap & PCI_PTM_CAP_REQ)
87 		dev->ptm_requester = 1;
88 
89 	if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
90 	    pci_pcie_type(dev) == PCI_EXP_TYPE_UPSTREAM)
91 		pci_enable_ptm(dev, NULL);
92 }
93 
94 void pci_save_ptm_state(struct pci_dev *dev)
95 {
96 	u16 ptm = dev->ptm_cap;
97 	struct pci_cap_saved_state *save_state;
98 	u32 *cap;
99 
100 	if (!ptm)
101 		return;
102 
103 	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
104 	if (!save_state)
105 		return;
106 
107 	cap = (u32 *)&save_state->cap.data[0];
108 	pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, cap);
109 }
110 
111 void pci_restore_ptm_state(struct pci_dev *dev)
112 {
113 	u16 ptm = dev->ptm_cap;
114 	struct pci_cap_saved_state *save_state;
115 	u32 *cap;
116 
117 	if (!ptm)
118 		return;
119 
120 	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
121 	if (!save_state)
122 		return;
123 
124 	cap = (u32 *)&save_state->cap.data[0];
125 	pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, *cap);
126 }
127 
128 /* Enable PTM in the Control register if possible */
129 static int __pci_enable_ptm(struct pci_dev *dev)
130 {
131 	u16 ptm = dev->ptm_cap;
132 	struct pci_dev *ups;
133 	u32 ctrl;
134 
135 	if (!ptm)
136 		return -EINVAL;
137 
138 	/*
139 	 * A device uses local PTM Messages to request time information
140 	 * from a PTM Root that's farther upstream.  Every device along the
141 	 * path must support PTM and have it enabled so it can handle the
142 	 * messages.  Therefore, if this device is not a PTM Root, the
143 	 * upstream link partner must have PTM enabled before we can enable
144 	 * PTM.
145 	 */
146 	if (!dev->ptm_root) {
147 		ups = pci_upstream_ptm(dev);
148 		if (!ups || !ups->ptm_enabled)
149 			return -EINVAL;
150 	}
151 
152 	switch (pci_pcie_type(dev)) {
153 	case PCI_EXP_TYPE_ROOT_PORT:
154 		if (!dev->ptm_root)
155 			return -EINVAL;
156 		break;
157 	case PCI_EXP_TYPE_UPSTREAM:
158 		if (!dev->ptm_responder)
159 			return -EINVAL;
160 		break;
161 	case PCI_EXP_TYPE_ENDPOINT:
162 	case PCI_EXP_TYPE_LEG_END:
163 		if (!dev->ptm_requester)
164 			return -EINVAL;
165 		break;
166 	default:
167 		return -EINVAL;
168 	}
169 
170 	pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, &ctrl);
171 
172 	ctrl |= PCI_PTM_CTRL_ENABLE;
173 	ctrl &= ~PCI_PTM_GRANULARITY_MASK;
174 	ctrl |= FIELD_PREP(PCI_PTM_GRANULARITY_MASK, dev->ptm_granularity);
175 	if (dev->ptm_root)
176 		ctrl |= PCI_PTM_CTRL_ROOT;
177 
178 	pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, ctrl);
179 	return 0;
180 }
181 
182 /**
183  * pci_enable_ptm() - Enable Precision Time Measurement
184  * @dev: PCI device
185  * @granularity: pointer to return granularity
186  *
187  * Enable Precision Time Measurement for @dev.  If successful and
188  * @granularity is non-NULL, return the Effective Granularity.
189  *
190  * Return: zero if successful, or -EINVAL if @dev lacks a PTM Capability or
191  * is not a PTM Root and lacks an upstream path of PTM-enabled devices.
192  */
193 int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
194 {
195 	int rc;
196 	char clock_desc[8];
197 
198 	rc = __pci_enable_ptm(dev);
199 	if (rc)
200 		return rc;
201 
202 	dev->ptm_enabled = 1;
203 
204 	if (granularity)
205 		*granularity = dev->ptm_granularity;
206 
207 	switch (dev->ptm_granularity) {
208 	case 0:
209 		snprintf(clock_desc, sizeof(clock_desc), "unknown");
210 		break;
211 	case 255:
212 		snprintf(clock_desc, sizeof(clock_desc), ">254ns");
213 		break;
214 	default:
215 		snprintf(clock_desc, sizeof(clock_desc), "%uns",
216 			 dev->ptm_granularity);
217 		break;
218 	}
219 	pci_info(dev, "PTM enabled%s, %s granularity\n",
220 		 dev->ptm_root ? " (root)" : "", clock_desc);
221 
222 	return 0;
223 }
224 EXPORT_SYMBOL(pci_enable_ptm);
225 
226 static void __pci_disable_ptm(struct pci_dev *dev)
227 {
228 	u16 ptm = dev->ptm_cap;
229 	u32 ctrl;
230 
231 	if (!ptm)
232 		return;
233 
234 	pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, &ctrl);
235 	ctrl &= ~(PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT);
236 	pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, ctrl);
237 }
238 
239 /**
240  * pci_disable_ptm() - Disable Precision Time Measurement
241  * @dev: PCI device
242  *
243  * Disable Precision Time Measurement for @dev.
244  */
245 void pci_disable_ptm(struct pci_dev *dev)
246 {
247 	if (dev->ptm_enabled) {
248 		__pci_disable_ptm(dev);
249 		dev->ptm_enabled = 0;
250 	}
251 }
252 EXPORT_SYMBOL(pci_disable_ptm);
253 
254 /*
255  * Disable PTM, but preserve dev->ptm_enabled so we silently re-enable it on
256  * resume if necessary.
257  */
258 void pci_suspend_ptm(struct pci_dev *dev)
259 {
260 	if (dev->ptm_enabled)
261 		__pci_disable_ptm(dev);
262 }
263 
264 /* If PTM was enabled before suspend, re-enable it when resuming */
265 void pci_resume_ptm(struct pci_dev *dev)
266 {
267 	if (dev->ptm_enabled)
268 		__pci_enable_ptm(dev);
269 }
270 
271 bool pcie_ptm_enabled(struct pci_dev *dev)
272 {
273 	if (!dev)
274 		return false;
275 
276 	return dev->ptm_enabled;
277 }
278 EXPORT_SYMBOL(pcie_ptm_enabled);
279 
280 #if IS_ENABLED(CONFIG_DEBUG_FS)
281 static ssize_t context_update_write(struct file *file, const char __user *ubuf,
282 			     size_t count, loff_t *ppos)
283 {
284 	struct pci_ptm_debugfs *ptm_debugfs = file->private_data;
285 	char buf[7];
286 	int ret;
287 	u8 mode;
288 
289 	if (!ptm_debugfs->ops->context_update_write)
290 		return -EOPNOTSUPP;
291 
292 	if (count < 1 || count >= sizeof(buf))
293 		return -EINVAL;
294 
295 	ret = copy_from_user(buf, ubuf, count);
296 	if (ret)
297 		return -EFAULT;
298 
299 	buf[count] = '\0';
300 
301 	if (sysfs_streq(buf, "auto"))
302 		mode = PCIE_PTM_CONTEXT_UPDATE_AUTO;
303 	else if (sysfs_streq(buf, "manual"))
304 		mode = PCIE_PTM_CONTEXT_UPDATE_MANUAL;
305 	else
306 		return -EINVAL;
307 
308 	mutex_lock(&ptm_debugfs->lock);
309 	ret = ptm_debugfs->ops->context_update_write(ptm_debugfs->pdata, mode);
310 	mutex_unlock(&ptm_debugfs->lock);
311 	if (ret)
312 		return ret;
313 
314 	return count;
315 }
316 
317 static ssize_t context_update_read(struct file *file, char __user *ubuf,
318 			     size_t count, loff_t *ppos)
319 {
320 	struct pci_ptm_debugfs *ptm_debugfs = file->private_data;
321 	char buf[8]; /* Extra space for NULL termination at the end */
322 	ssize_t pos;
323 	u8 mode;
324 
325 	if (!ptm_debugfs->ops->context_update_read)
326 		return -EOPNOTSUPP;
327 
328 	mutex_lock(&ptm_debugfs->lock);
329 	ptm_debugfs->ops->context_update_read(ptm_debugfs->pdata, &mode);
330 	mutex_unlock(&ptm_debugfs->lock);
331 
332 	if (mode == PCIE_PTM_CONTEXT_UPDATE_AUTO)
333 		pos = scnprintf(buf, sizeof(buf), "auto\n");
334 	else
335 		pos = scnprintf(buf, sizeof(buf), "manual\n");
336 
337 	return simple_read_from_buffer(ubuf, count, ppos, buf, pos);
338 }
339 
340 static const struct file_operations context_update_fops = {
341 	.open = simple_open,
342 	.read = context_update_read,
343 	.write = context_update_write,
344 };
345 
346 static int context_valid_get(void *data, u64 *val)
347 {
348 	struct pci_ptm_debugfs *ptm_debugfs = data;
349 	bool valid;
350 	int ret;
351 
352 	if (!ptm_debugfs->ops->context_valid_read)
353 		return -EOPNOTSUPP;
354 
355 	mutex_lock(&ptm_debugfs->lock);
356 	ret = ptm_debugfs->ops->context_valid_read(ptm_debugfs->pdata, &valid);
357 	mutex_unlock(&ptm_debugfs->lock);
358 	if (ret)
359 		return ret;
360 
361 	*val = valid;
362 
363 	return 0;
364 }
365 
366 static int context_valid_set(void *data, u64 val)
367 {
368 	struct pci_ptm_debugfs *ptm_debugfs = data;
369 	int ret;
370 
371 	if (!ptm_debugfs->ops->context_valid_write)
372 		return -EOPNOTSUPP;
373 
374 	mutex_lock(&ptm_debugfs->lock);
375 	ret = ptm_debugfs->ops->context_valid_write(ptm_debugfs->pdata, !!val);
376 	mutex_unlock(&ptm_debugfs->lock);
377 
378 	return ret;
379 }
380 
381 DEFINE_DEBUGFS_ATTRIBUTE(context_valid_fops, context_valid_get,
382 			 context_valid_set, "%llu\n");
383 
384 static int local_clock_get(void *data, u64 *val)
385 {
386 	struct pci_ptm_debugfs *ptm_debugfs = data;
387 	u64 clock;
388 	int ret;
389 
390 	if (!ptm_debugfs->ops->local_clock_read)
391 		return -EOPNOTSUPP;
392 
393 	ret = ptm_debugfs->ops->local_clock_read(ptm_debugfs->pdata, &clock);
394 	if (ret)
395 		return ret;
396 
397 	*val = clock;
398 
399 	return 0;
400 }
401 
402 DEFINE_DEBUGFS_ATTRIBUTE(local_clock_fops, local_clock_get, NULL, "%llu\n");
403 
404 static int master_clock_get(void *data, u64 *val)
405 {
406 	struct pci_ptm_debugfs *ptm_debugfs = data;
407 	u64 clock;
408 	int ret;
409 
410 	if (!ptm_debugfs->ops->master_clock_read)
411 		return -EOPNOTSUPP;
412 
413 	ret = ptm_debugfs->ops->master_clock_read(ptm_debugfs->pdata, &clock);
414 	if (ret)
415 		return ret;
416 
417 	*val = clock;
418 
419 	return 0;
420 }
421 
422 DEFINE_DEBUGFS_ATTRIBUTE(master_clock_fops, master_clock_get, NULL, "%llu\n");
423 
424 static int t1_get(void *data, u64 *val)
425 {
426 	struct pci_ptm_debugfs *ptm_debugfs = data;
427 	u64 clock;
428 	int ret;
429 
430 	if (!ptm_debugfs->ops->t1_read)
431 		return -EOPNOTSUPP;
432 
433 	ret = ptm_debugfs->ops->t1_read(ptm_debugfs->pdata, &clock);
434 	if (ret)
435 		return ret;
436 
437 	*val = clock;
438 
439 	return 0;
440 }
441 
442 DEFINE_DEBUGFS_ATTRIBUTE(t1_fops, t1_get, NULL, "%llu\n");
443 
444 static int t2_get(void *data, u64 *val)
445 {
446 	struct pci_ptm_debugfs *ptm_debugfs = data;
447 	u64 clock;
448 	int ret;
449 
450 	if (!ptm_debugfs->ops->t2_read)
451 		return -EOPNOTSUPP;
452 
453 	ret = ptm_debugfs->ops->t2_read(ptm_debugfs->pdata, &clock);
454 	if (ret)
455 		return ret;
456 
457 	*val = clock;
458 
459 	return 0;
460 }
461 
462 DEFINE_DEBUGFS_ATTRIBUTE(t2_fops, t2_get, NULL, "%llu\n");
463 
464 static int t3_get(void *data, u64 *val)
465 {
466 	struct pci_ptm_debugfs *ptm_debugfs = data;
467 	u64 clock;
468 	int ret;
469 
470 	if (!ptm_debugfs->ops->t3_read)
471 		return -EOPNOTSUPP;
472 
473 	ret = ptm_debugfs->ops->t3_read(ptm_debugfs->pdata, &clock);
474 	if (ret)
475 		return ret;
476 
477 	*val = clock;
478 
479 	return 0;
480 }
481 
482 DEFINE_DEBUGFS_ATTRIBUTE(t3_fops, t3_get, NULL, "%llu\n");
483 
484 static int t4_get(void *data, u64 *val)
485 {
486 	struct pci_ptm_debugfs *ptm_debugfs = data;
487 	u64 clock;
488 	int ret;
489 
490 	if (!ptm_debugfs->ops->t4_read)
491 		return -EOPNOTSUPP;
492 
493 	ret = ptm_debugfs->ops->t4_read(ptm_debugfs->pdata, &clock);
494 	if (ret)
495 		return ret;
496 
497 	*val = clock;
498 
499 	return 0;
500 }
501 
502 DEFINE_DEBUGFS_ATTRIBUTE(t4_fops, t4_get, NULL, "%llu\n");
503 
504 #define pcie_ptm_create_debugfs_file(pdata, mode, attr)			\
505 	do {								\
506 		if (ops->attr##_visible && ops->attr##_visible(pdata))	\
507 			debugfs_create_file(#attr, mode, ptm_debugfs->debugfs, \
508 					    ptm_debugfs, &attr##_fops);	\
509 	} while (0)
510 
511 /*
512  * pcie_ptm_create_debugfs() - Create debugfs entries for the PTM context
513  * @dev: PTM capable component device
514  * @pdata: Private data of the PTM capable component device
515  * @ops: PTM callback structure
516  *
517  * Create debugfs entries for exposing the PTM context of the PTM capable
518  * components such as Root Complex and Endpoint controllers.
519  *
520  * Return: Pointer to 'struct pci_ptm_debugfs' if success, NULL otherwise.
521  */
522 struct pci_ptm_debugfs *pcie_ptm_create_debugfs(struct device *dev, void *pdata,
523 			  const struct pcie_ptm_ops *ops)
524 {
525 	struct pci_ptm_debugfs *ptm_debugfs;
526 	char *dirname;
527 	int ret;
528 
529 	/* Caller must provide check_capability() callback */
530 	if (!ops->check_capability)
531 		return NULL;
532 
533 	/* Check for PTM capability before creating debugfs attributes */
534 	ret = ops->check_capability(pdata);
535 	if (!ret) {
536 		dev_dbg(dev, "PTM capability not present\n");
537 		return NULL;
538 	}
539 
540 	ptm_debugfs = kzalloc(sizeof(*ptm_debugfs), GFP_KERNEL);
541 	if (!ptm_debugfs)
542 		return NULL;
543 
544 	dirname = devm_kasprintf(dev, GFP_KERNEL, "pcie_ptm_%s", dev_name(dev));
545 	if (!dirname)
546 		return NULL;
547 
548 	ptm_debugfs->debugfs = debugfs_create_dir(dirname, NULL);
549 	ptm_debugfs->pdata = pdata;
550 	ptm_debugfs->ops = ops;
551 	mutex_init(&ptm_debugfs->lock);
552 
553 	pcie_ptm_create_debugfs_file(pdata, 0644, context_update);
554 	pcie_ptm_create_debugfs_file(pdata, 0644, context_valid);
555 	pcie_ptm_create_debugfs_file(pdata, 0444, local_clock);
556 	pcie_ptm_create_debugfs_file(pdata, 0444, master_clock);
557 	pcie_ptm_create_debugfs_file(pdata, 0444, t1);
558 	pcie_ptm_create_debugfs_file(pdata, 0444, t2);
559 	pcie_ptm_create_debugfs_file(pdata, 0444, t3);
560 	pcie_ptm_create_debugfs_file(pdata, 0444, t4);
561 
562 	return ptm_debugfs;
563 }
564 EXPORT_SYMBOL_GPL(pcie_ptm_create_debugfs);
565 
566 /*
567  * pcie_ptm_destroy_debugfs() - Destroy debugfs entries for the PTM context
568  * @ptm_debugfs: Pointer to the PTM debugfs struct
569  */
570 void pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs)
571 {
572 	if (!ptm_debugfs)
573 		return;
574 
575 	mutex_destroy(&ptm_debugfs->lock);
576 	debugfs_remove_recursive(ptm_debugfs->debugfs);
577 }
578 EXPORT_SYMBOL_GPL(pcie_ptm_destroy_debugfs);
579 #endif
580