xref: /linux/drivers/pci/p2pdma.c (revision aec2f682d47c54ef434b2d440992626d80b1ebdc)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI Peer 2 Peer DMA support.
4  *
5  * Copyright (c) 2016-2018, Logan Gunthorpe
6  * Copyright (c) 2016-2017, Microsemi Corporation
7  * Copyright (c) 2017, Christoph Hellwig
8  * Copyright (c) 2018, Eideticom Inc.
9  */
10 
11 #define pr_fmt(fmt) "pci-p2pdma: " fmt
12 #include <linux/ctype.h>
13 #include <linux/dma-map-ops.h>
14 #include <linux/pci-p2pdma.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/genalloc.h>
18 #include <linux/memremap.h>
19 #include <linux/percpu-refcount.h>
20 #include <linux/random.h>
21 #include <linux/seq_buf.h>
22 #include <linux/xarray.h>
23 
24 struct pci_p2pdma {
25 	struct gen_pool *pool;
26 	bool p2pmem_published;
27 	struct xarray map_types;
28 	struct p2pdma_provider mem[PCI_STD_NUM_BARS];
29 };
30 
31 struct pci_p2pdma_pagemap {
32 	struct dev_pagemap pgmap;
33 	struct p2pdma_provider *mem;
34 };
35 
36 static struct pci_p2pdma_pagemap *to_p2p_pgmap(struct dev_pagemap *pgmap)
37 {
38 	return container_of(pgmap, struct pci_p2pdma_pagemap, pgmap);
39 }
40 
41 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
42 			 char *buf)
43 {
44 	struct pci_dev *pdev = to_pci_dev(dev);
45 	struct pci_p2pdma *p2pdma;
46 	size_t size = 0;
47 
48 	rcu_read_lock();
49 	p2pdma = rcu_dereference(pdev->p2pdma);
50 	if (p2pdma && p2pdma->pool)
51 		size = gen_pool_size(p2pdma->pool);
52 	rcu_read_unlock();
53 
54 	return sysfs_emit(buf, "%zd\n", size);
55 }
56 static DEVICE_ATTR_RO(size);
57 
58 static ssize_t available_show(struct device *dev, struct device_attribute *attr,
59 			      char *buf)
60 {
61 	struct pci_dev *pdev = to_pci_dev(dev);
62 	struct pci_p2pdma *p2pdma;
63 	size_t avail = 0;
64 
65 	rcu_read_lock();
66 	p2pdma = rcu_dereference(pdev->p2pdma);
67 	if (p2pdma && p2pdma->pool)
68 		avail = gen_pool_avail(p2pdma->pool);
69 	rcu_read_unlock();
70 
71 	return sysfs_emit(buf, "%zd\n", avail);
72 }
73 static DEVICE_ATTR_RO(available);
74 
75 static ssize_t published_show(struct device *dev, struct device_attribute *attr,
76 			      char *buf)
77 {
78 	struct pci_dev *pdev = to_pci_dev(dev);
79 	struct pci_p2pdma *p2pdma;
80 	bool published = false;
81 
82 	rcu_read_lock();
83 	p2pdma = rcu_dereference(pdev->p2pdma);
84 	if (p2pdma)
85 		published = p2pdma->p2pmem_published;
86 	rcu_read_unlock();
87 
88 	return sysfs_emit(buf, "%d\n", published);
89 }
90 static DEVICE_ATTR_RO(published);
91 
92 static int p2pmem_alloc_mmap(struct file *filp, struct kobject *kobj,
93 		const struct bin_attribute *attr, struct vm_area_struct *vma)
94 {
95 	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
96 	size_t len = vma->vm_end - vma->vm_start;
97 	struct pci_p2pdma *p2pdma;
98 	struct percpu_ref *ref;
99 	unsigned long vaddr;
100 	void *kaddr;
101 	int ret;
102 
103 	/* prevent private mappings from being established */
104 	if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
105 		pci_info_ratelimited(pdev,
106 				     "%s: fail, attempted private mapping\n",
107 				     current->comm);
108 		return -EINVAL;
109 	}
110 
111 	if (vma->vm_pgoff) {
112 		pci_info_ratelimited(pdev,
113 				     "%s: fail, attempted mapping with non-zero offset\n",
114 				     current->comm);
115 		return -EINVAL;
116 	}
117 
118 	rcu_read_lock();
119 	p2pdma = rcu_dereference(pdev->p2pdma);
120 	if (!p2pdma) {
121 		ret = -ENODEV;
122 		goto out;
123 	}
124 
125 	kaddr = (void *)gen_pool_alloc_owner(p2pdma->pool, len, (void **)&ref);
126 	if (!kaddr) {
127 		ret = -ENOMEM;
128 		goto out;
129 	}
130 
131 	/*
132 	 * vm_insert_page() can sleep, so a reference is taken to mapping
133 	 * such that rcu_read_unlock() can be done before inserting the
134 	 * pages
135 	 */
136 	if (unlikely(!percpu_ref_tryget_live_rcu(ref))) {
137 		ret = -ENODEV;
138 		goto out_free_mem;
139 	}
140 	rcu_read_unlock();
141 
142 	for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) {
143 		struct page *page = virt_to_page(kaddr);
144 
145 		/*
146 		 * Initialise the refcount for the freshly allocated page. As
147 		 * we have just allocated the page no one else should be
148 		 * using it.
149 		 */
150 		VM_WARN_ON_ONCE_PAGE(page_ref_count(page), page);
151 		set_page_count(page, 1);
152 		ret = vm_insert_page(vma, vaddr, page);
153 		if (ret) {
154 			gen_pool_free(p2pdma->pool, (uintptr_t)kaddr, len);
155 
156 			/*
157 			 * Reset the page count. We don't use put_page()
158 			 * because we don't want to trigger the
159 			 * p2pdma_folio_free() path.
160 			 */
161 			set_page_count(page, 0);
162 			percpu_ref_put(ref);
163 			return ret;
164 		}
165 		percpu_ref_get(ref);
166 		put_page(page);
167 		kaddr += PAGE_SIZE;
168 		len -= PAGE_SIZE;
169 	}
170 
171 	percpu_ref_put(ref);
172 
173 	return 0;
174 out_free_mem:
175 	gen_pool_free(p2pdma->pool, (uintptr_t)kaddr, len);
176 out:
177 	rcu_read_unlock();
178 	return ret;
179 }
180 
181 static const struct bin_attribute p2pmem_alloc_attr = {
182 	.attr = { .name = "allocate", .mode = 0660 },
183 	.mmap = p2pmem_alloc_mmap,
184 	/*
185 	 * Some places where we want to call mmap (ie. python) will check
186 	 * that the file size is greater than the mmap size before allowing
187 	 * the mmap to continue. To work around this, just set the size
188 	 * to be very large.
189 	 */
190 	.size = SZ_1T,
191 };
192 
193 static struct attribute *p2pmem_attrs[] = {
194 	&dev_attr_size.attr,
195 	&dev_attr_available.attr,
196 	&dev_attr_published.attr,
197 	NULL,
198 };
199 
200 static const struct bin_attribute *const p2pmem_bin_attrs[] = {
201 	&p2pmem_alloc_attr,
202 	NULL,
203 };
204 
205 static const struct attribute_group p2pmem_group = {
206 	.attrs = p2pmem_attrs,
207 	.bin_attrs = p2pmem_bin_attrs,
208 	.name = "p2pmem",
209 };
210 
211 static void p2pdma_folio_free(struct folio *folio)
212 {
213 	struct page *page = &folio->page;
214 	struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page_pgmap(page));
215 	/* safe to dereference while a reference is held to the percpu ref */
216 	struct pci_p2pdma *p2pdma = rcu_dereference_protected(
217 		to_pci_dev(pgmap->mem->owner)->p2pdma, 1);
218 	struct percpu_ref *ref;
219 
220 	gen_pool_free_owner(p2pdma->pool, (uintptr_t)page_to_virt(page),
221 			    PAGE_SIZE, (void **)&ref);
222 	percpu_ref_put(ref);
223 }
224 
225 static const struct dev_pagemap_ops p2pdma_pgmap_ops = {
226 	.folio_free = p2pdma_folio_free,
227 };
228 
229 static void pci_p2pdma_release(void *data)
230 {
231 	struct pci_dev *pdev = data;
232 	struct pci_p2pdma *p2pdma;
233 
234 	p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
235 	if (!p2pdma)
236 		return;
237 
238 	/* Flush and disable pci_alloc_p2p_mem() */
239 	pdev->p2pdma = NULL;
240 	if (p2pdma->pool)
241 		synchronize_rcu();
242 	xa_destroy(&p2pdma->map_types);
243 
244 	if (!p2pdma->pool)
245 		return;
246 
247 	gen_pool_destroy(p2pdma->pool);
248 	sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group);
249 }
250 
251 /**
252  * pcim_p2pdma_init - Initialise peer-to-peer DMA providers
253  * @pdev: The PCI device to enable P2PDMA for
254  *
255  * This function initializes the peer-to-peer DMA infrastructure
256  * for a PCI device. It allocates and sets up the necessary data
257  * structures to support P2PDMA operations, including mapping type
258  * tracking.
259  */
260 int pcim_p2pdma_init(struct pci_dev *pdev)
261 {
262 	struct pci_p2pdma *p2p;
263 	int i, ret;
264 
265 	p2p = rcu_dereference_protected(pdev->p2pdma, 1);
266 	if (p2p)
267 		return 0;
268 
269 	p2p = devm_kzalloc(&pdev->dev, sizeof(*p2p), GFP_KERNEL);
270 	if (!p2p)
271 		return -ENOMEM;
272 
273 	xa_init(&p2p->map_types);
274 	/*
275 	 * Iterate over all standard PCI BARs and record only those that
276 	 * correspond to MMIO regions. Skip non-memory resources (e.g. I/O
277 	 * port BARs) since they cannot be used for peer-to-peer (P2P)
278 	 * transactions.
279 	 */
280 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
281 		if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
282 			continue;
283 
284 		p2p->mem[i].owner = &pdev->dev;
285 		p2p->mem[i].bus_offset =
286 			pci_bus_address(pdev, i) - pci_resource_start(pdev, i);
287 	}
288 
289 	ret = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev);
290 	if (ret)
291 		goto out_p2p;
292 
293 	rcu_assign_pointer(pdev->p2pdma, p2p);
294 	return 0;
295 
296 out_p2p:
297 	devm_kfree(&pdev->dev, p2p);
298 	return ret;
299 }
300 EXPORT_SYMBOL_GPL(pcim_p2pdma_init);
301 
302 /**
303  * pcim_p2pdma_provider - Get peer-to-peer DMA provider
304  * @pdev: The PCI device to enable P2PDMA for
305  * @bar: BAR index to get provider
306  *
307  * This function gets peer-to-peer DMA provider for a PCI device. The lifetime
308  * of the provider (and of course the MMIO) is bound to the lifetime of the
309  * driver. A driver calling this function must ensure that all references to the
310  * provider, and any DMA mappings created for any MMIO, are all cleaned up
311  * before the driver remove() completes.
312  *
313  * Since P2P is almost always shared with a second driver this means some system
314  * to notify, invalidate and revoke the MMIO's DMA must be in place to use this
315  * function. For example a revoke can be built using DMABUF.
316  */
317 struct p2pdma_provider *pcim_p2pdma_provider(struct pci_dev *pdev, int bar)
318 {
319 	struct pci_p2pdma *p2p;
320 
321 	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
322 		return NULL;
323 
324 	p2p = rcu_dereference_protected(pdev->p2pdma, 1);
325 	if (WARN_ON(!p2p))
326 		/* Someone forgot to call to pcim_p2pdma_init() before */
327 		return NULL;
328 
329 	return &p2p->mem[bar];
330 }
331 EXPORT_SYMBOL_GPL(pcim_p2pdma_provider);
332 
333 static int pci_p2pdma_setup_pool(struct pci_dev *pdev)
334 {
335 	struct pci_p2pdma *p2pdma;
336 	int ret;
337 
338 	p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
339 	if (p2pdma->pool)
340 		/* We already setup pools, do nothing, */
341 		return 0;
342 
343 	p2pdma->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev));
344 	if (!p2pdma->pool)
345 		return -ENOMEM;
346 
347 	ret = sysfs_create_group(&pdev->dev.kobj, &p2pmem_group);
348 	if (ret)
349 		goto out_pool_destroy;
350 
351 	return 0;
352 
353 out_pool_destroy:
354 	gen_pool_destroy(p2pdma->pool);
355 	p2pdma->pool = NULL;
356 	return ret;
357 }
358 
359 static void pci_p2pdma_unmap_mappings(void *data)
360 {
361 	struct pci_p2pdma_pagemap *p2p_pgmap = data;
362 
363 	/*
364 	 * Removing the alloc attribute from sysfs will call
365 	 * unmap_mapping_range() on the inode, teardown any existing userspace
366 	 * mappings and prevent new ones from being created.
367 	 */
368 	sysfs_remove_file_from_group(&p2p_pgmap->mem->owner->kobj,
369 				     &p2pmem_alloc_attr.attr,
370 				     p2pmem_group.name);
371 }
372 
373 /**
374  * pci_p2pdma_add_resource - add memory for use as p2p memory
375  * @pdev: the device to add the memory to
376  * @bar: PCI BAR to add
377  * @size: size of the memory to add, may be zero to use the whole BAR
378  * @offset: offset into the PCI BAR
379  *
380  * The memory will be given ZONE_DEVICE struct pages so that it may
381  * be used with any DMA request.
382  */
383 int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
384 			    u64 offset)
385 {
386 	struct pci_p2pdma_pagemap *p2p_pgmap;
387 	struct p2pdma_provider *mem;
388 	struct dev_pagemap *pgmap;
389 	struct pci_p2pdma *p2pdma;
390 	void *addr;
391 	int error;
392 
393 	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
394 		return -EINVAL;
395 
396 	if (offset >= pci_resource_len(pdev, bar))
397 		return -EINVAL;
398 
399 	if (!size)
400 		size = pci_resource_len(pdev, bar) - offset;
401 
402 	if (size + offset > pci_resource_len(pdev, bar))
403 		return -EINVAL;
404 
405 	error = pcim_p2pdma_init(pdev);
406 	if (error)
407 		return error;
408 
409 	error = pci_p2pdma_setup_pool(pdev);
410 	if (error)
411 		return error;
412 
413 	mem = pcim_p2pdma_provider(pdev, bar);
414 	/*
415 	 * We checked validity of BAR prior to call
416 	 * to pcim_p2pdma_provider. It should never return NULL.
417 	 */
418 	if (WARN_ON(!mem))
419 		return -EINVAL;
420 
421 	p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL);
422 	if (!p2p_pgmap)
423 		return -ENOMEM;
424 
425 	pgmap = &p2p_pgmap->pgmap;
426 	pgmap->range.start = pci_resource_start(pdev, bar) + offset;
427 	pgmap->range.end = pgmap->range.start + size - 1;
428 	pgmap->nr_range = 1;
429 	pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
430 	pgmap->ops = &p2pdma_pgmap_ops;
431 	p2p_pgmap->mem = mem;
432 
433 	addr = devm_memremap_pages(&pdev->dev, pgmap);
434 	if (IS_ERR(addr)) {
435 		error = PTR_ERR(addr);
436 		goto pgmap_free;
437 	}
438 
439 	error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_unmap_mappings,
440 					 p2p_pgmap);
441 	if (error)
442 		goto pages_free;
443 
444 	p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
445 	error = gen_pool_add_owner(p2pdma->pool, (unsigned long)addr,
446 			pci_bus_address(pdev, bar) + offset,
447 			range_len(&pgmap->range), dev_to_node(&pdev->dev),
448 			&pgmap->ref);
449 	if (error)
450 		goto pages_free;
451 
452 	pci_info(pdev, "added peer-to-peer DMA memory %#llx-%#llx\n",
453 		 pgmap->range.start, pgmap->range.end);
454 
455 	return 0;
456 
457 pages_free:
458 	devm_memunmap_pages(&pdev->dev, pgmap);
459 pgmap_free:
460 	devm_kfree(&pdev->dev, p2p_pgmap);
461 	return error;
462 }
463 EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource);
464 
465 /*
466  * Note this function returns the parent PCI device with a
467  * reference taken. It is the caller's responsibility to drop
468  * the reference.
469  */
470 static struct pci_dev *find_parent_pci_dev(struct device *dev)
471 {
472 	struct device *parent;
473 
474 	dev = get_device(dev);
475 
476 	while (dev) {
477 		if (dev_is_pci(dev))
478 			return to_pci_dev(dev);
479 
480 		parent = get_device(dev->parent);
481 		put_device(dev);
482 		dev = parent;
483 	}
484 
485 	return NULL;
486 }
487 
488 /*
489  * Check if a PCI bridge has its ACS redirection bits set to redirect P2P
490  * TLPs upstream via ACS. Returns 1 if the packets will be redirected
491  * upstream, 0 otherwise.
492  */
493 static int pci_bridge_has_acs_redir(struct pci_dev *pdev)
494 {
495 	int pos;
496 	u16 ctrl;
497 
498 	pos = pdev->acs_cap;
499 	if (!pos)
500 		return 0;
501 
502 	pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
503 
504 	if (ctrl & (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC))
505 		return 1;
506 
507 	return 0;
508 }
509 
510 static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev)
511 {
512 	if (!buf)
513 		return;
514 
515 	seq_buf_printf(buf, "%s;", pci_name(pdev));
516 }
517 
518 static bool cpu_supports_p2pdma(void)
519 {
520 #ifdef CONFIG_X86
521 	struct cpuinfo_x86 *c = &cpu_data(0);
522 
523 	/* Any AMD CPU whose family ID is Zen or newer supports p2pdma */
524 	if (c->x86_vendor == X86_VENDOR_AMD && c->x86 >= 0x17)
525 		return true;
526 #endif
527 
528 	return false;
529 }
530 
531 static const struct pci_p2pdma_whitelist_entry {
532 	unsigned short vendor;
533 	int device;
534 	enum {
535 		REQ_SAME_HOST_BRIDGE	= 1 << 0,
536 	} flags;
537 } pci_p2pdma_whitelist[] = {
538 	/* Intel Xeon E5/Core i7 */
539 	{PCI_VENDOR_ID_INTEL,	0x3c00, REQ_SAME_HOST_BRIDGE},
540 	{PCI_VENDOR_ID_INTEL,	0x3c01, REQ_SAME_HOST_BRIDGE},
541 	/* Intel Xeon E7 v3/Xeon E5 v3/Core i7 */
542 	{PCI_VENDOR_ID_INTEL,	0x2f00, REQ_SAME_HOST_BRIDGE},
543 	{PCI_VENDOR_ID_INTEL,	0x2f01, REQ_SAME_HOST_BRIDGE},
544 	/* Intel Skylake-E */
545 	{PCI_VENDOR_ID_INTEL,	0x2030, 0},
546 	{PCI_VENDOR_ID_INTEL,	0x2031, 0},
547 	{PCI_VENDOR_ID_INTEL,	0x2032, 0},
548 	{PCI_VENDOR_ID_INTEL,	0x2033, 0},
549 	{PCI_VENDOR_ID_INTEL,	0x2020, 0},
550 	{PCI_VENDOR_ID_INTEL,	0x09a2, 0},
551 	/* Google SoCs. */
552 	{PCI_VENDOR_ID_GOOGLE,	PCI_ANY_ID, 0},
553 	{}
554 };
555 
556 /*
557  * If the first device on host's root bus is either devfn 00.0 or a PCIe
558  * Root Port, return it.  Otherwise return NULL.
559  *
560  * We often use a devfn 00.0 "host bridge" in the pci_p2pdma_whitelist[]
561  * (though there is no PCI/PCIe requirement for such a device).  On some
562  * platforms, e.g., Intel Skylake, there is no such host bridge device, and
563  * pci_p2pdma_whitelist[] may contain a Root Port at any devfn.
564  *
565  * This function is similar to pci_get_slot(host->bus, 0), but it does
566  * not take the pci_bus_sem lock since __host_bridge_whitelist() must not
567  * sleep.
568  *
569  * For this to be safe, the caller should hold a reference to a device on the
570  * bridge, which should ensure the host_bridge device will not be freed
571  * or removed from the head of the devices list.
572  */
573 static struct pci_dev *pci_host_bridge_dev(struct pci_host_bridge *host)
574 {
575 	struct pci_dev *root;
576 
577 	root = list_first_entry_or_null(&host->bus->devices,
578 					struct pci_dev, bus_list);
579 
580 	if (!root)
581 		return NULL;
582 
583 	if (root->devfn == PCI_DEVFN(0, 0))
584 		return root;
585 
586 	if (pci_pcie_type(root) == PCI_EXP_TYPE_ROOT_PORT)
587 		return root;
588 
589 	return NULL;
590 }
591 
592 static bool __host_bridge_whitelist(struct pci_host_bridge *host,
593 				    bool same_host_bridge, bool warn)
594 {
595 	struct pci_dev *root = pci_host_bridge_dev(host);
596 	const struct pci_p2pdma_whitelist_entry *entry;
597 	unsigned short vendor, device;
598 
599 	if (!root)
600 		return false;
601 
602 	vendor = root->vendor;
603 	device = root->device;
604 
605 	for (entry = pci_p2pdma_whitelist; entry->vendor; entry++) {
606 		if (vendor != entry->vendor)
607 			continue;
608 
609 		if (entry->device != PCI_ANY_ID && device != entry->device)
610 			continue;
611 
612 		if (entry->flags & REQ_SAME_HOST_BRIDGE && !same_host_bridge)
613 			return false;
614 
615 		return true;
616 	}
617 
618 	if (warn)
619 		pci_warn(root, "Host bridge not in P2PDMA whitelist: %04x:%04x\n",
620 			 vendor, device);
621 
622 	return false;
623 }
624 
625 /*
626  * If we can't find a common upstream bridge take a look at the root
627  * complex and compare it to a whitelist of known good hardware.
628  */
629 static bool host_bridge_whitelist(struct pci_dev *a, struct pci_dev *b,
630 				  bool warn)
631 {
632 	struct pci_host_bridge *host_a = pci_find_host_bridge(a->bus);
633 	struct pci_host_bridge *host_b = pci_find_host_bridge(b->bus);
634 
635 	if (host_a == host_b)
636 		return __host_bridge_whitelist(host_a, true, warn);
637 
638 	if (__host_bridge_whitelist(host_a, false, warn) &&
639 	    __host_bridge_whitelist(host_b, false, warn))
640 		return true;
641 
642 	return false;
643 }
644 
645 static unsigned long map_types_idx(struct pci_dev *client)
646 {
647 	return (pci_domain_nr(client->bus) << 16) | pci_dev_id(client);
648 }
649 
650 /*
651  * Calculate the P2PDMA mapping type and distance between two PCI devices.
652  *
653  * If the two devices are the same PCI function, return
654  * PCI_P2PDMA_MAP_BUS_ADDR and a distance of 0.
655  *
656  * If they are two functions of the same device, return
657  * PCI_P2PDMA_MAP_BUS_ADDR and a distance of 2 (one hop up to the bridge,
658  * then one hop back down to another function of the same device).
659  *
660  * In the case where two devices are connected to the same PCIe switch,
661  * return a distance of 4. This corresponds to the following PCI tree:
662  *
663  *     -+  Root Port
664  *      \+ Switch Upstream Port
665  *       +-+ Switch Downstream Port 0
666  *       + \- Device A
667  *       \-+ Switch Downstream Port 1
668  *         \- Device B
669  *
670  * The distance is 4 because we traverse from Device A to Downstream Port 0
671  * to the common Switch Upstream Port, back down to Downstream Port 1 and
672  * then to Device B. The mapping type returned depends on the ACS
673  * redirection setting of the ports along the path.
674  *
675  * If ACS redirect is set on any port in the path, traffic between the
676  * devices will go through the host bridge, so return
677  * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE; otherwise return
678  * PCI_P2PDMA_MAP_BUS_ADDR.
679  *
680  * Any two devices that have a data path that goes through the host bridge
681  * will consult a whitelist. If the host bridge is in the whitelist, return
682  * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE with the distance set to the number of
683  * ports per above. If the device is not in the whitelist, return
684  * PCI_P2PDMA_MAP_NOT_SUPPORTED.
685  */
686 static enum pci_p2pdma_map_type
687 calc_map_type_and_dist(struct pci_dev *provider, struct pci_dev *client,
688 		int *dist, bool verbose)
689 {
690 	enum pci_p2pdma_map_type map_type = PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
691 	struct pci_dev *a = provider, *b = client, *bb;
692 	bool acs_redirects = false;
693 	struct pci_p2pdma *p2pdma;
694 	struct seq_buf acs_list;
695 	int acs_cnt = 0;
696 	int dist_a = 0;
697 	int dist_b = 0;
698 	char buf[128];
699 
700 	seq_buf_init(&acs_list, buf, sizeof(buf));
701 
702 	/*
703 	 * Note, we don't need to take references to devices returned by
704 	 * pci_upstream_bridge() seeing we hold a reference to a child
705 	 * device which will already hold a reference to the upstream bridge.
706 	 */
707 	while (a) {
708 		dist_b = 0;
709 
710 		if (pci_bridge_has_acs_redir(a)) {
711 			seq_buf_print_bus_devfn(&acs_list, a);
712 			acs_cnt++;
713 		}
714 
715 		bb = b;
716 
717 		while (bb) {
718 			if (a == bb)
719 				goto check_b_path_acs;
720 
721 			bb = pci_upstream_bridge(bb);
722 			dist_b++;
723 		}
724 
725 		a = pci_upstream_bridge(a);
726 		dist_a++;
727 	}
728 
729 	*dist = dist_a + dist_b;
730 	goto map_through_host_bridge;
731 
732 check_b_path_acs:
733 	bb = b;
734 
735 	while (bb) {
736 		if (a == bb)
737 			break;
738 
739 		if (pci_bridge_has_acs_redir(bb)) {
740 			seq_buf_print_bus_devfn(&acs_list, bb);
741 			acs_cnt++;
742 		}
743 
744 		bb = pci_upstream_bridge(bb);
745 	}
746 
747 	*dist = dist_a + dist_b;
748 
749 	if (!acs_cnt) {
750 		map_type = PCI_P2PDMA_MAP_BUS_ADDR;
751 		goto done;
752 	}
753 
754 	if (verbose) {
755 		acs_list.buffer[acs_list.len-1] = 0; /* drop final semicolon */
756 		pci_warn(client, "ACS redirect is set between the client and provider (%s)\n",
757 			 pci_name(provider));
758 		pci_warn(client, "to disable ACS redirect for this path, add the kernel parameter: pci=disable_acs_redir=%s\n",
759 			 acs_list.buffer);
760 	}
761 	acs_redirects = true;
762 
763 map_through_host_bridge:
764 	if (!cpu_supports_p2pdma() &&
765 	    !host_bridge_whitelist(provider, client, acs_redirects)) {
766 		if (verbose)
767 			pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge or whitelisted host bridge\n",
768 				 pci_name(provider));
769 		map_type = PCI_P2PDMA_MAP_NOT_SUPPORTED;
770 	}
771 done:
772 	rcu_read_lock();
773 	p2pdma = rcu_dereference(provider->p2pdma);
774 	if (p2pdma)
775 		xa_store(&p2pdma->map_types, map_types_idx(client),
776 			 xa_mk_value(map_type), GFP_ATOMIC);
777 	rcu_read_unlock();
778 	return map_type;
779 }
780 
781 /**
782  * pci_p2pdma_distance_many - Determine the cumulative distance between
783  *	a p2pdma provider and the clients in use.
784  * @provider: p2pdma provider to check against the client list
785  * @clients: array of devices to check (NULL-terminated)
786  * @num_clients: number of clients in the array
787  * @verbose: if true, print warnings for devices when we return -1
788  *
789  * Returns -1 if any of the clients are not compatible, otherwise returns a
790  * positive number where a lower number is the preferable choice. (If there's
791  * one client that's the same as the provider it will return 0, which is best
792  * choice).
793  *
794  * "compatible" means the provider and the clients are either all behind
795  * the same PCI root port or the host bridges connected to each of the devices
796  * are listed in the 'pci_p2pdma_whitelist'.
797  */
798 int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
799 			     int num_clients, bool verbose)
800 {
801 	enum pci_p2pdma_map_type map;
802 	bool not_supported = false;
803 	struct pci_dev *pci_client;
804 	int total_dist = 0;
805 	int i, distance;
806 
807 	if (num_clients == 0)
808 		return -1;
809 
810 	for (i = 0; i < num_clients; i++) {
811 		pci_client = find_parent_pci_dev(clients[i]);
812 		if (!pci_client) {
813 			if (verbose)
814 				dev_warn(clients[i],
815 					 "cannot be used for peer-to-peer DMA as it is not a PCI device\n");
816 			return -1;
817 		}
818 
819 		map = calc_map_type_and_dist(provider, pci_client, &distance,
820 					     verbose);
821 
822 		pci_dev_put(pci_client);
823 
824 		if (map == PCI_P2PDMA_MAP_NOT_SUPPORTED)
825 			not_supported = true;
826 
827 		if (not_supported && !verbose)
828 			break;
829 
830 		total_dist += distance;
831 	}
832 
833 	if (not_supported)
834 		return -1;
835 
836 	return total_dist;
837 }
838 EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many);
839 
840 /**
841  * pci_has_p2pmem - check if a given PCI device has published any p2pmem
842  * @pdev: PCI device to check
843  */
844 static bool pci_has_p2pmem(struct pci_dev *pdev)
845 {
846 	struct pci_p2pdma *p2pdma;
847 	bool res;
848 
849 	rcu_read_lock();
850 	p2pdma = rcu_dereference(pdev->p2pdma);
851 	res = p2pdma && p2pdma->p2pmem_published;
852 	rcu_read_unlock();
853 
854 	return res;
855 }
856 
857 /**
858  * pci_p2pmem_find_many - find a peer-to-peer DMA memory device compatible with
859  *	the specified list of clients and shortest distance
860  * @clients: array of devices to check (NULL-terminated)
861  * @num_clients: number of client devices in the list
862  *
863  * If multiple devices are behind the same switch, the one "closest" to the
864  * client devices in use will be chosen first. (So if one of the providers is
865  * the same as one of the clients, that provider will be used ahead of any
866  * other providers that are unrelated). If multiple providers are an equal
867  * distance away, one will be chosen at random.
868  *
869  * Returns a pointer to the PCI device with a reference taken (use pci_dev_put
870  * to return the reference) or NULL if no compatible device is found. The
871  * found provider will also be assigned to the client list.
872  */
873 struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients)
874 {
875 	struct pci_dev *pdev = NULL;
876 	int distance;
877 	int closest_distance = INT_MAX;
878 	struct pci_dev **closest_pdevs;
879 	int dev_cnt = 0;
880 	const int max_devs = PAGE_SIZE / sizeof(*closest_pdevs);
881 	int i;
882 
883 	closest_pdevs = kmalloc(PAGE_SIZE, GFP_KERNEL);
884 	if (!closest_pdevs)
885 		return NULL;
886 
887 	for_each_pci_dev(pdev) {
888 		if (!pci_has_p2pmem(pdev))
889 			continue;
890 
891 		distance = pci_p2pdma_distance_many(pdev, clients,
892 						    num_clients, false);
893 		if (distance < 0 || distance > closest_distance)
894 			continue;
895 
896 		if (distance == closest_distance && dev_cnt >= max_devs)
897 			continue;
898 
899 		if (distance < closest_distance) {
900 			for (i = 0; i < dev_cnt; i++)
901 				pci_dev_put(closest_pdevs[i]);
902 
903 			dev_cnt = 0;
904 			closest_distance = distance;
905 		}
906 
907 		closest_pdevs[dev_cnt++] = pci_dev_get(pdev);
908 	}
909 
910 	if (dev_cnt)
911 		pdev = pci_dev_get(closest_pdevs[get_random_u32_below(dev_cnt)]);
912 
913 	for (i = 0; i < dev_cnt; i++)
914 		pci_dev_put(closest_pdevs[i]);
915 
916 	kfree(closest_pdevs);
917 	return pdev;
918 }
919 EXPORT_SYMBOL_GPL(pci_p2pmem_find_many);
920 
921 /**
922  * pci_alloc_p2pmem - allocate peer-to-peer DMA memory
923  * @pdev: the device to allocate memory from
924  * @size: number of bytes to allocate
925  *
926  * Returns the allocated memory or NULL on error.
927  */
928 void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
929 {
930 	void *ret = NULL;
931 	struct percpu_ref *ref;
932 	struct pci_p2pdma *p2pdma;
933 
934 	/*
935 	 * Pairs with synchronize_rcu() in pci_p2pdma_release() to
936 	 * ensure pdev->p2pdma is non-NULL for the duration of the
937 	 * read-lock.
938 	 */
939 	rcu_read_lock();
940 	p2pdma = rcu_dereference(pdev->p2pdma);
941 	if (unlikely(!p2pdma))
942 		goto out;
943 
944 	ret = (void *)gen_pool_alloc_owner(p2pdma->pool, size, (void **) &ref);
945 	if (!ret)
946 		goto out;
947 
948 	if (unlikely(!percpu_ref_tryget_live_rcu(ref))) {
949 		gen_pool_free(p2pdma->pool, (unsigned long) ret, size);
950 		ret = NULL;
951 	}
952 out:
953 	rcu_read_unlock();
954 	return ret;
955 }
956 EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
957 
958 /**
959  * pci_free_p2pmem - free peer-to-peer DMA memory
960  * @pdev: the device the memory was allocated from
961  * @addr: address of the memory that was allocated
962  * @size: number of bytes that were allocated
963  */
964 void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size)
965 {
966 	struct percpu_ref *ref;
967 	struct pci_p2pdma *p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
968 
969 	gen_pool_free_owner(p2pdma->pool, (uintptr_t)addr, size,
970 			(void **) &ref);
971 	percpu_ref_put(ref);
972 }
973 EXPORT_SYMBOL_GPL(pci_free_p2pmem);
974 
975 /**
976  * pci_p2pmem_virt_to_bus - return the PCI bus address for a given virtual
977  *	address obtained with pci_alloc_p2pmem()
978  * @pdev: the device the memory was allocated from
979  * @addr: address of the memory that was allocated
980  */
981 pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr)
982 {
983 	struct pci_p2pdma *p2pdma;
984 
985 	if (!addr)
986 		return 0;
987 
988 	p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
989 	if (!p2pdma)
990 		return 0;
991 
992 	/*
993 	 * Note: when we added the memory to the pool we used the PCI
994 	 * bus address as the physical address. So gen_pool_virt_to_phys()
995 	 * actually returns the bus address despite the misleading name.
996 	 */
997 	return gen_pool_virt_to_phys(p2pdma->pool, (unsigned long)addr);
998 }
999 EXPORT_SYMBOL_GPL(pci_p2pmem_virt_to_bus);
1000 
1001 /**
1002  * pci_p2pmem_alloc_sgl - allocate peer-to-peer DMA memory in a scatterlist
1003  * @pdev: the device to allocate memory from
1004  * @nents: the number of SG entries in the list
1005  * @length: number of bytes to allocate
1006  *
1007  * Return: %NULL on error or &struct scatterlist pointer and @nents on success
1008  */
1009 struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
1010 					 unsigned int *nents, u32 length)
1011 {
1012 	struct scatterlist *sg;
1013 	void *addr;
1014 
1015 	sg = kmalloc_obj(*sg);
1016 	if (!sg)
1017 		return NULL;
1018 
1019 	sg_init_table(sg, 1);
1020 
1021 	addr = pci_alloc_p2pmem(pdev, length);
1022 	if (!addr)
1023 		goto out_free_sg;
1024 
1025 	sg_set_buf(sg, addr, length);
1026 	*nents = 1;
1027 	return sg;
1028 
1029 out_free_sg:
1030 	kfree(sg);
1031 	return NULL;
1032 }
1033 EXPORT_SYMBOL_GPL(pci_p2pmem_alloc_sgl);
1034 
1035 /**
1036  * pci_p2pmem_free_sgl - free a scatterlist allocated by pci_p2pmem_alloc_sgl()
1037  * @pdev: the device to allocate memory from
1038  * @sgl: the allocated scatterlist
1039  */
1040 void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl)
1041 {
1042 	struct scatterlist *sg;
1043 	int count;
1044 
1045 	for_each_sg(sgl, sg, INT_MAX, count) {
1046 		if (!sg)
1047 			break;
1048 
1049 		pci_free_p2pmem(pdev, sg_virt(sg), sg->length);
1050 	}
1051 	kfree(sgl);
1052 }
1053 EXPORT_SYMBOL_GPL(pci_p2pmem_free_sgl);
1054 
1055 /**
1056  * pci_p2pmem_publish - publish the peer-to-peer DMA memory for use by
1057  *	other devices with pci_p2pmem_find()
1058  * @pdev: the device with peer-to-peer DMA memory to publish
1059  * @publish: set to true to publish the memory, false to unpublish it
1060  *
1061  * Published memory can be used by other PCI device drivers for
1062  * peer-2-peer DMA operations. Non-published memory is reserved for
1063  * exclusive use of the device driver that registers the peer-to-peer
1064  * memory.
1065  */
1066 void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
1067 {
1068 	struct pci_p2pdma *p2pdma;
1069 
1070 	rcu_read_lock();
1071 	p2pdma = rcu_dereference(pdev->p2pdma);
1072 	if (p2pdma)
1073 		p2pdma->p2pmem_published = publish;
1074 	rcu_read_unlock();
1075 }
1076 EXPORT_SYMBOL_GPL(pci_p2pmem_publish);
1077 
1078 /**
1079  * pci_p2pdma_map_type - Determine the mapping type for P2PDMA transfers
1080  * @provider: P2PDMA provider structure
1081  * @dev: Target device for the transfer
1082  *
1083  * Determines how peer-to-peer DMA transfers should be mapped between
1084  * the provider and the target device. The mapping type indicates whether
1085  * the transfer can be done directly through PCI switches or must go
1086  * through the host bridge.
1087  */
1088 enum pci_p2pdma_map_type pci_p2pdma_map_type(struct p2pdma_provider *provider,
1089 					     struct device *dev)
1090 {
1091 	enum pci_p2pdma_map_type type = PCI_P2PDMA_MAP_NOT_SUPPORTED;
1092 	struct pci_dev *pdev = to_pci_dev(provider->owner);
1093 	struct pci_dev *client;
1094 	struct pci_p2pdma *p2pdma;
1095 	int dist;
1096 
1097 	if (!pdev->p2pdma)
1098 		return PCI_P2PDMA_MAP_NOT_SUPPORTED;
1099 
1100 	if (!dev_is_pci(dev))
1101 		return PCI_P2PDMA_MAP_NOT_SUPPORTED;
1102 
1103 	client = to_pci_dev(dev);
1104 
1105 	rcu_read_lock();
1106 	p2pdma = rcu_dereference(pdev->p2pdma);
1107 
1108 	if (p2pdma)
1109 		type = xa_to_value(xa_load(&p2pdma->map_types,
1110 					   map_types_idx(client)));
1111 	rcu_read_unlock();
1112 
1113 	if (type == PCI_P2PDMA_MAP_UNKNOWN)
1114 		return calc_map_type_and_dist(pdev, client, &dist, true);
1115 
1116 	return type;
1117 }
1118 
1119 void __pci_p2pdma_update_state(struct pci_p2pdma_map_state *state,
1120 		struct device *dev, struct page *page)
1121 {
1122 	struct pci_p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(page_pgmap(page));
1123 
1124 	if (state->mem == p2p_pgmap->mem)
1125 		return;
1126 
1127 	state->mem = p2p_pgmap->mem;
1128 	state->map = pci_p2pdma_map_type(p2p_pgmap->mem, dev);
1129 }
1130 
1131 /**
1132  * pci_p2pdma_enable_store - parse a configfs/sysfs attribute store
1133  *		to enable p2pdma
1134  * @page: contents of the value to be stored
1135  * @p2p_dev: returns the PCI device that was selected to be used
1136  *		(if one was specified in the stored value)
1137  * @use_p2pdma: returns whether to enable p2pdma or not
1138  *
1139  * Parses an attribute value to decide whether to enable p2pdma.
1140  * The value can select a PCI device (using its full BDF device
1141  * name) or a boolean (in any format kstrtobool() accepts). A false
1142  * value disables p2pdma, a true value expects the caller
1143  * to automatically find a compatible device and specifying a PCI device
1144  * expects the caller to use the specific provider.
1145  *
1146  * pci_p2pdma_enable_show() should be used as the show operation for
1147  * the attribute.
1148  *
1149  * Returns 0 on success
1150  */
1151 int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
1152 			    bool *use_p2pdma)
1153 {
1154 	struct device *dev;
1155 
1156 	dev = bus_find_device_by_name(&pci_bus_type, NULL, page);
1157 	if (dev) {
1158 		*use_p2pdma = true;
1159 		*p2p_dev = to_pci_dev(dev);
1160 
1161 		if (!pci_has_p2pmem(*p2p_dev)) {
1162 			pci_err(*p2p_dev,
1163 				"PCI device has no peer-to-peer memory: %s\n",
1164 				page);
1165 			pci_dev_put(*p2p_dev);
1166 			return -ENODEV;
1167 		}
1168 
1169 		return 0;
1170 	} else if ((page[0] == '0' || page[0] == '1') && !iscntrl(page[1])) {
1171 		/*
1172 		 * If the user enters a PCI device that  doesn't exist
1173 		 * like "0000:01:00.1", we don't want kstrtobool to think
1174 		 * it's a '0' when it's clearly not what the user wanted.
1175 		 * So we require 0's and 1's to be exactly one character.
1176 		 */
1177 	} else if (!kstrtobool(page, use_p2pdma)) {
1178 		return 0;
1179 	}
1180 
1181 	pr_err("No such PCI device: %.*s\n", (int)strcspn(page, "\n"), page);
1182 	return -ENODEV;
1183 }
1184 EXPORT_SYMBOL_GPL(pci_p2pdma_enable_store);
1185 
1186 /**
1187  * pci_p2pdma_enable_show - show a configfs/sysfs attribute indicating
1188  *		whether p2pdma is enabled
1189  * @page: contents of the stored value
1190  * @p2p_dev: the selected p2p device (NULL if no device is selected)
1191  * @use_p2pdma: whether p2pdma has been enabled
1192  *
1193  * Attributes that use pci_p2pdma_enable_store() should use this function
1194  * to show the value of the attribute.
1195  *
1196  * Returns 0 on success
1197  */
1198 ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
1199 			       bool use_p2pdma)
1200 {
1201 	if (!use_p2pdma)
1202 		return sprintf(page, "0\n");
1203 
1204 	if (!p2p_dev)
1205 		return sprintf(page, "1\n");
1206 
1207 	return sprintf(page, "%s\n", pci_name(p2p_dev));
1208 }
1209 EXPORT_SYMBOL_GPL(pci_p2pdma_enable_show);
1210