1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCI Peer 2 Peer DMA support. 4 * 5 * Copyright (c) 2016-2018, Logan Gunthorpe 6 * Copyright (c) 2016-2017, Microsemi Corporation 7 * Copyright (c) 2017, Christoph Hellwig 8 * Copyright (c) 2018, Eideticom Inc. 9 */ 10 11 #define pr_fmt(fmt) "pci-p2pdma: " fmt 12 #include <linux/ctype.h> 13 #include <linux/dma-map-ops.h> 14 #include <linux/pci-p2pdma.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/genalloc.h> 18 #include <linux/memremap.h> 19 #include <linux/percpu-refcount.h> 20 #include <linux/random.h> 21 #include <linux/seq_buf.h> 22 #include <linux/xarray.h> 23 24 struct pci_p2pdma { 25 struct gen_pool *pool; 26 bool p2pmem_published; 27 struct xarray map_types; 28 struct p2pdma_provider mem[PCI_STD_NUM_BARS]; 29 }; 30 31 struct pci_p2pdma_pagemap { 32 struct dev_pagemap pgmap; 33 struct p2pdma_provider *mem; 34 }; 35 36 static struct pci_p2pdma_pagemap *to_p2p_pgmap(struct dev_pagemap *pgmap) 37 { 38 return container_of(pgmap, struct pci_p2pdma_pagemap, pgmap); 39 } 40 41 static ssize_t size_show(struct device *dev, struct device_attribute *attr, 42 char *buf) 43 { 44 struct pci_dev *pdev = to_pci_dev(dev); 45 struct pci_p2pdma *p2pdma; 46 size_t size = 0; 47 48 rcu_read_lock(); 49 p2pdma = rcu_dereference(pdev->p2pdma); 50 if (p2pdma && p2pdma->pool) 51 size = gen_pool_size(p2pdma->pool); 52 rcu_read_unlock(); 53 54 return sysfs_emit(buf, "%zd\n", size); 55 } 56 static DEVICE_ATTR_RO(size); 57 58 static ssize_t available_show(struct device *dev, struct device_attribute *attr, 59 char *buf) 60 { 61 struct pci_dev *pdev = to_pci_dev(dev); 62 struct pci_p2pdma *p2pdma; 63 size_t avail = 0; 64 65 rcu_read_lock(); 66 p2pdma = rcu_dereference(pdev->p2pdma); 67 if (p2pdma && p2pdma->pool) 68 avail = gen_pool_avail(p2pdma->pool); 69 rcu_read_unlock(); 70 71 return sysfs_emit(buf, "%zd\n", avail); 72 } 73 static DEVICE_ATTR_RO(available); 74 75 static ssize_t published_show(struct device *dev, struct device_attribute *attr, 76 char *buf) 77 { 78 struct pci_dev *pdev = to_pci_dev(dev); 79 struct pci_p2pdma *p2pdma; 80 bool published = false; 81 82 rcu_read_lock(); 83 p2pdma = rcu_dereference(pdev->p2pdma); 84 if (p2pdma) 85 published = p2pdma->p2pmem_published; 86 rcu_read_unlock(); 87 88 return sysfs_emit(buf, "%d\n", published); 89 } 90 static DEVICE_ATTR_RO(published); 91 92 static int p2pmem_alloc_mmap(struct file *filp, struct kobject *kobj, 93 const struct bin_attribute *attr, struct vm_area_struct *vma) 94 { 95 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); 96 size_t len = vma->vm_end - vma->vm_start; 97 struct pci_p2pdma *p2pdma; 98 struct percpu_ref *ref; 99 unsigned long vaddr; 100 void *kaddr; 101 int ret; 102 103 /* prevent private mappings from being established */ 104 if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) { 105 pci_info_ratelimited(pdev, 106 "%s: fail, attempted private mapping\n", 107 current->comm); 108 return -EINVAL; 109 } 110 111 if (vma->vm_pgoff) { 112 pci_info_ratelimited(pdev, 113 "%s: fail, attempted mapping with non-zero offset\n", 114 current->comm); 115 return -EINVAL; 116 } 117 118 rcu_read_lock(); 119 p2pdma = rcu_dereference(pdev->p2pdma); 120 if (!p2pdma) { 121 ret = -ENODEV; 122 goto out; 123 } 124 125 kaddr = (void *)gen_pool_alloc_owner(p2pdma->pool, len, (void **)&ref); 126 if (!kaddr) { 127 ret = -ENOMEM; 128 goto out; 129 } 130 131 /* 132 * vm_insert_page() can sleep, so a reference is taken to mapping 133 * such that rcu_read_unlock() can be done before inserting the 134 * pages 135 */ 136 if (unlikely(!percpu_ref_tryget_live_rcu(ref))) { 137 ret = -ENODEV; 138 goto out_free_mem; 139 } 140 rcu_read_unlock(); 141 142 for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) { 143 struct page *page = virt_to_page(kaddr); 144 145 /* 146 * Initialise the refcount for the freshly allocated page. As 147 * we have just allocated the page no one else should be 148 * using it. 149 */ 150 VM_WARN_ON_ONCE_PAGE(page_ref_count(page), page); 151 set_page_count(page, 1); 152 ret = vm_insert_page(vma, vaddr, page); 153 if (ret) { 154 gen_pool_free(p2pdma->pool, (uintptr_t)kaddr, len); 155 156 /* 157 * Reset the page count. We don't use put_page() 158 * because we don't want to trigger the 159 * p2pdma_folio_free() path. 160 */ 161 set_page_count(page, 0); 162 percpu_ref_put(ref); 163 return ret; 164 } 165 percpu_ref_get(ref); 166 put_page(page); 167 kaddr += PAGE_SIZE; 168 len -= PAGE_SIZE; 169 } 170 171 percpu_ref_put(ref); 172 173 return 0; 174 out_free_mem: 175 gen_pool_free(p2pdma->pool, (uintptr_t)kaddr, len); 176 out: 177 rcu_read_unlock(); 178 return ret; 179 } 180 181 static const struct bin_attribute p2pmem_alloc_attr = { 182 .attr = { .name = "allocate", .mode = 0660 }, 183 .mmap = p2pmem_alloc_mmap, 184 /* 185 * Some places where we want to call mmap (ie. python) will check 186 * that the file size is greater than the mmap size before allowing 187 * the mmap to continue. To work around this, just set the size 188 * to be very large. 189 */ 190 .size = SZ_1T, 191 }; 192 193 static struct attribute *p2pmem_attrs[] = { 194 &dev_attr_size.attr, 195 &dev_attr_available.attr, 196 &dev_attr_published.attr, 197 NULL, 198 }; 199 200 static const struct bin_attribute *const p2pmem_bin_attrs[] = { 201 &p2pmem_alloc_attr, 202 NULL, 203 }; 204 205 static const struct attribute_group p2pmem_group = { 206 .attrs = p2pmem_attrs, 207 .bin_attrs = p2pmem_bin_attrs, 208 .name = "p2pmem", 209 }; 210 211 static void p2pdma_folio_free(struct folio *folio) 212 { 213 struct page *page = &folio->page; 214 struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page_pgmap(page)); 215 /* safe to dereference while a reference is held to the percpu ref */ 216 struct pci_p2pdma *p2pdma = rcu_dereference_protected( 217 to_pci_dev(pgmap->mem->owner)->p2pdma, 1); 218 struct percpu_ref *ref; 219 220 gen_pool_free_owner(p2pdma->pool, (uintptr_t)page_to_virt(page), 221 PAGE_SIZE, (void **)&ref); 222 percpu_ref_put(ref); 223 } 224 225 static const struct dev_pagemap_ops p2pdma_pgmap_ops = { 226 .folio_free = p2pdma_folio_free, 227 }; 228 229 static void pci_p2pdma_release(void *data) 230 { 231 struct pci_dev *pdev = data; 232 struct pci_p2pdma *p2pdma; 233 234 p2pdma = rcu_dereference_protected(pdev->p2pdma, 1); 235 if (!p2pdma) 236 return; 237 238 /* Flush and disable pci_alloc_p2p_mem() */ 239 pdev->p2pdma = NULL; 240 if (p2pdma->pool) 241 synchronize_rcu(); 242 xa_destroy(&p2pdma->map_types); 243 244 if (!p2pdma->pool) 245 return; 246 247 gen_pool_destroy(p2pdma->pool); 248 sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group); 249 } 250 251 /** 252 * pcim_p2pdma_init - Initialise peer-to-peer DMA providers 253 * @pdev: The PCI device to enable P2PDMA for 254 * 255 * This function initializes the peer-to-peer DMA infrastructure 256 * for a PCI device. It allocates and sets up the necessary data 257 * structures to support P2PDMA operations, including mapping type 258 * tracking. 259 */ 260 int pcim_p2pdma_init(struct pci_dev *pdev) 261 { 262 struct pci_p2pdma *p2p; 263 int i, ret; 264 265 p2p = rcu_dereference_protected(pdev->p2pdma, 1); 266 if (p2p) 267 return 0; 268 269 p2p = devm_kzalloc(&pdev->dev, sizeof(*p2p), GFP_KERNEL); 270 if (!p2p) 271 return -ENOMEM; 272 273 xa_init(&p2p->map_types); 274 /* 275 * Iterate over all standard PCI BARs and record only those that 276 * correspond to MMIO regions. Skip non-memory resources (e.g. I/O 277 * port BARs) since they cannot be used for peer-to-peer (P2P) 278 * transactions. 279 */ 280 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 281 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) 282 continue; 283 284 p2p->mem[i].owner = &pdev->dev; 285 p2p->mem[i].bus_offset = 286 pci_bus_address(pdev, i) - pci_resource_start(pdev, i); 287 } 288 289 ret = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev); 290 if (ret) 291 goto out_p2p; 292 293 rcu_assign_pointer(pdev->p2pdma, p2p); 294 return 0; 295 296 out_p2p: 297 devm_kfree(&pdev->dev, p2p); 298 return ret; 299 } 300 EXPORT_SYMBOL_GPL(pcim_p2pdma_init); 301 302 /** 303 * pcim_p2pdma_provider - Get peer-to-peer DMA provider 304 * @pdev: The PCI device to enable P2PDMA for 305 * @bar: BAR index to get provider 306 * 307 * This function gets peer-to-peer DMA provider for a PCI device. The lifetime 308 * of the provider (and of course the MMIO) is bound to the lifetime of the 309 * driver. A driver calling this function must ensure that all references to the 310 * provider, and any DMA mappings created for any MMIO, are all cleaned up 311 * before the driver remove() completes. 312 * 313 * Since P2P is almost always shared with a second driver this means some system 314 * to notify, invalidate and revoke the MMIO's DMA must be in place to use this 315 * function. For example a revoke can be built using DMABUF. 316 */ 317 struct p2pdma_provider *pcim_p2pdma_provider(struct pci_dev *pdev, int bar) 318 { 319 struct pci_p2pdma *p2p; 320 321 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) 322 return NULL; 323 324 p2p = rcu_dereference_protected(pdev->p2pdma, 1); 325 if (WARN_ON(!p2p)) 326 /* Someone forgot to call to pcim_p2pdma_init() before */ 327 return NULL; 328 329 return &p2p->mem[bar]; 330 } 331 EXPORT_SYMBOL_GPL(pcim_p2pdma_provider); 332 333 static int pci_p2pdma_setup_pool(struct pci_dev *pdev) 334 { 335 struct pci_p2pdma *p2pdma; 336 int ret; 337 338 p2pdma = rcu_dereference_protected(pdev->p2pdma, 1); 339 if (p2pdma->pool) 340 /* We already setup pools, do nothing, */ 341 return 0; 342 343 p2pdma->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev)); 344 if (!p2pdma->pool) 345 return -ENOMEM; 346 347 ret = sysfs_create_group(&pdev->dev.kobj, &p2pmem_group); 348 if (ret) 349 goto out_pool_destroy; 350 351 return 0; 352 353 out_pool_destroy: 354 gen_pool_destroy(p2pdma->pool); 355 p2pdma->pool = NULL; 356 return ret; 357 } 358 359 static void pci_p2pdma_unmap_mappings(void *data) 360 { 361 struct pci_p2pdma_pagemap *p2p_pgmap = data; 362 363 /* 364 * Removing the alloc attribute from sysfs will call 365 * unmap_mapping_range() on the inode, teardown any existing userspace 366 * mappings and prevent new ones from being created. 367 */ 368 sysfs_remove_file_from_group(&p2p_pgmap->mem->owner->kobj, 369 &p2pmem_alloc_attr.attr, 370 p2pmem_group.name); 371 } 372 373 /** 374 * pci_p2pdma_add_resource - add memory for use as p2p memory 375 * @pdev: the device to add the memory to 376 * @bar: PCI BAR to add 377 * @size: size of the memory to add, may be zero to use the whole BAR 378 * @offset: offset into the PCI BAR 379 * 380 * The memory will be given ZONE_DEVICE struct pages so that it may 381 * be used with any DMA request. 382 */ 383 int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, 384 u64 offset) 385 { 386 struct pci_p2pdma_pagemap *p2p_pgmap; 387 struct p2pdma_provider *mem; 388 struct dev_pagemap *pgmap; 389 struct pci_p2pdma *p2pdma; 390 void *addr; 391 int error; 392 393 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) 394 return -EINVAL; 395 396 if (offset >= pci_resource_len(pdev, bar)) 397 return -EINVAL; 398 399 if (!size) 400 size = pci_resource_len(pdev, bar) - offset; 401 402 if (size + offset > pci_resource_len(pdev, bar)) 403 return -EINVAL; 404 405 error = pcim_p2pdma_init(pdev); 406 if (error) 407 return error; 408 409 error = pci_p2pdma_setup_pool(pdev); 410 if (error) 411 return error; 412 413 mem = pcim_p2pdma_provider(pdev, bar); 414 /* 415 * We checked validity of BAR prior to call 416 * to pcim_p2pdma_provider. It should never return NULL. 417 */ 418 if (WARN_ON(!mem)) 419 return -EINVAL; 420 421 p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL); 422 if (!p2p_pgmap) 423 return -ENOMEM; 424 425 pgmap = &p2p_pgmap->pgmap; 426 pgmap->range.start = pci_resource_start(pdev, bar) + offset; 427 pgmap->range.end = pgmap->range.start + size - 1; 428 pgmap->nr_range = 1; 429 pgmap->type = MEMORY_DEVICE_PCI_P2PDMA; 430 pgmap->ops = &p2pdma_pgmap_ops; 431 p2p_pgmap->mem = mem; 432 433 addr = devm_memremap_pages(&pdev->dev, pgmap); 434 if (IS_ERR(addr)) { 435 error = PTR_ERR(addr); 436 goto pgmap_free; 437 } 438 439 error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_unmap_mappings, 440 p2p_pgmap); 441 if (error) 442 goto pages_free; 443 444 p2pdma = rcu_dereference_protected(pdev->p2pdma, 1); 445 error = gen_pool_add_owner(p2pdma->pool, (unsigned long)addr, 446 pci_bus_address(pdev, bar) + offset, 447 range_len(&pgmap->range), dev_to_node(&pdev->dev), 448 &pgmap->ref); 449 if (error) 450 goto pages_free; 451 452 pci_info(pdev, "added peer-to-peer DMA memory %#llx-%#llx\n", 453 pgmap->range.start, pgmap->range.end); 454 455 return 0; 456 457 pages_free: 458 devm_memunmap_pages(&pdev->dev, pgmap); 459 pgmap_free: 460 devm_kfree(&pdev->dev, p2p_pgmap); 461 return error; 462 } 463 EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource); 464 465 /* 466 * Note this function returns the parent PCI device with a 467 * reference taken. It is the caller's responsibility to drop 468 * the reference. 469 */ 470 static struct pci_dev *find_parent_pci_dev(struct device *dev) 471 { 472 struct device *parent; 473 474 dev = get_device(dev); 475 476 while (dev) { 477 if (dev_is_pci(dev)) 478 return to_pci_dev(dev); 479 480 parent = get_device(dev->parent); 481 put_device(dev); 482 dev = parent; 483 } 484 485 return NULL; 486 } 487 488 /* 489 * Check if a PCI bridge has its ACS redirection bits set to redirect P2P 490 * TLPs upstream via ACS. Returns 1 if the packets will be redirected 491 * upstream, 0 otherwise. 492 */ 493 static int pci_bridge_has_acs_redir(struct pci_dev *pdev) 494 { 495 int pos; 496 u16 ctrl; 497 498 pos = pdev->acs_cap; 499 if (!pos) 500 return 0; 501 502 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl); 503 504 if (ctrl & (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC)) 505 return 1; 506 507 return 0; 508 } 509 510 static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev) 511 { 512 if (!buf) 513 return; 514 515 seq_buf_printf(buf, "%s;", pci_name(pdev)); 516 } 517 518 static bool cpu_supports_p2pdma(void) 519 { 520 #ifdef CONFIG_X86 521 struct cpuinfo_x86 *c = &cpu_data(0); 522 523 /* Any AMD CPU whose family ID is Zen or newer supports p2pdma */ 524 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 >= 0x17) 525 return true; 526 #endif 527 528 return false; 529 } 530 531 static const struct pci_p2pdma_whitelist_entry { 532 unsigned short vendor; 533 unsigned short device; 534 enum { 535 REQ_SAME_HOST_BRIDGE = 1 << 0, 536 } flags; 537 } pci_p2pdma_whitelist[] = { 538 /* Intel Xeon E5/Core i7 */ 539 {PCI_VENDOR_ID_INTEL, 0x3c00, REQ_SAME_HOST_BRIDGE}, 540 {PCI_VENDOR_ID_INTEL, 0x3c01, REQ_SAME_HOST_BRIDGE}, 541 /* Intel Xeon E7 v3/Xeon E5 v3/Core i7 */ 542 {PCI_VENDOR_ID_INTEL, 0x2f00, REQ_SAME_HOST_BRIDGE}, 543 {PCI_VENDOR_ID_INTEL, 0x2f01, REQ_SAME_HOST_BRIDGE}, 544 /* Intel Skylake-E */ 545 {PCI_VENDOR_ID_INTEL, 0x2030, 0}, 546 {PCI_VENDOR_ID_INTEL, 0x2031, 0}, 547 {PCI_VENDOR_ID_INTEL, 0x2032, 0}, 548 {PCI_VENDOR_ID_INTEL, 0x2033, 0}, 549 {PCI_VENDOR_ID_INTEL, 0x2020, 0}, 550 {PCI_VENDOR_ID_INTEL, 0x09a2, 0}, 551 {} 552 }; 553 554 /* 555 * If the first device on host's root bus is either devfn 00.0 or a PCIe 556 * Root Port, return it. Otherwise return NULL. 557 * 558 * We often use a devfn 00.0 "host bridge" in the pci_p2pdma_whitelist[] 559 * (though there is no PCI/PCIe requirement for such a device). On some 560 * platforms, e.g., Intel Skylake, there is no such host bridge device, and 561 * pci_p2pdma_whitelist[] may contain a Root Port at any devfn. 562 * 563 * This function is similar to pci_get_slot(host->bus, 0), but it does 564 * not take the pci_bus_sem lock since __host_bridge_whitelist() must not 565 * sleep. 566 * 567 * For this to be safe, the caller should hold a reference to a device on the 568 * bridge, which should ensure the host_bridge device will not be freed 569 * or removed from the head of the devices list. 570 */ 571 static struct pci_dev *pci_host_bridge_dev(struct pci_host_bridge *host) 572 { 573 struct pci_dev *root; 574 575 root = list_first_entry_or_null(&host->bus->devices, 576 struct pci_dev, bus_list); 577 578 if (!root) 579 return NULL; 580 581 if (root->devfn == PCI_DEVFN(0, 0)) 582 return root; 583 584 if (pci_pcie_type(root) == PCI_EXP_TYPE_ROOT_PORT) 585 return root; 586 587 return NULL; 588 } 589 590 static bool __host_bridge_whitelist(struct pci_host_bridge *host, 591 bool same_host_bridge, bool warn) 592 { 593 struct pci_dev *root = pci_host_bridge_dev(host); 594 const struct pci_p2pdma_whitelist_entry *entry; 595 unsigned short vendor, device; 596 597 if (!root) 598 return false; 599 600 vendor = root->vendor; 601 device = root->device; 602 603 for (entry = pci_p2pdma_whitelist; entry->vendor; entry++) { 604 if (vendor != entry->vendor || device != entry->device) 605 continue; 606 if (entry->flags & REQ_SAME_HOST_BRIDGE && !same_host_bridge) 607 return false; 608 609 return true; 610 } 611 612 if (warn) 613 pci_warn(root, "Host bridge not in P2PDMA whitelist: %04x:%04x\n", 614 vendor, device); 615 616 return false; 617 } 618 619 /* 620 * If we can't find a common upstream bridge take a look at the root 621 * complex and compare it to a whitelist of known good hardware. 622 */ 623 static bool host_bridge_whitelist(struct pci_dev *a, struct pci_dev *b, 624 bool warn) 625 { 626 struct pci_host_bridge *host_a = pci_find_host_bridge(a->bus); 627 struct pci_host_bridge *host_b = pci_find_host_bridge(b->bus); 628 629 if (host_a == host_b) 630 return __host_bridge_whitelist(host_a, true, warn); 631 632 if (__host_bridge_whitelist(host_a, false, warn) && 633 __host_bridge_whitelist(host_b, false, warn)) 634 return true; 635 636 return false; 637 } 638 639 static unsigned long map_types_idx(struct pci_dev *client) 640 { 641 return (pci_domain_nr(client->bus) << 16) | pci_dev_id(client); 642 } 643 644 /* 645 * Calculate the P2PDMA mapping type and distance between two PCI devices. 646 * 647 * If the two devices are the same PCI function, return 648 * PCI_P2PDMA_MAP_BUS_ADDR and a distance of 0. 649 * 650 * If they are two functions of the same device, return 651 * PCI_P2PDMA_MAP_BUS_ADDR and a distance of 2 (one hop up to the bridge, 652 * then one hop back down to another function of the same device). 653 * 654 * In the case where two devices are connected to the same PCIe switch, 655 * return a distance of 4. This corresponds to the following PCI tree: 656 * 657 * -+ Root Port 658 * \+ Switch Upstream Port 659 * +-+ Switch Downstream Port 0 660 * + \- Device A 661 * \-+ Switch Downstream Port 1 662 * \- Device B 663 * 664 * The distance is 4 because we traverse from Device A to Downstream Port 0 665 * to the common Switch Upstream Port, back down to Downstream Port 1 and 666 * then to Device B. The mapping type returned depends on the ACS 667 * redirection setting of the ports along the path. 668 * 669 * If ACS redirect is set on any port in the path, traffic between the 670 * devices will go through the host bridge, so return 671 * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE; otherwise return 672 * PCI_P2PDMA_MAP_BUS_ADDR. 673 * 674 * Any two devices that have a data path that goes through the host bridge 675 * will consult a whitelist. If the host bridge is in the whitelist, return 676 * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE with the distance set to the number of 677 * ports per above. If the device is not in the whitelist, return 678 * PCI_P2PDMA_MAP_NOT_SUPPORTED. 679 */ 680 static enum pci_p2pdma_map_type 681 calc_map_type_and_dist(struct pci_dev *provider, struct pci_dev *client, 682 int *dist, bool verbose) 683 { 684 enum pci_p2pdma_map_type map_type = PCI_P2PDMA_MAP_THRU_HOST_BRIDGE; 685 struct pci_dev *a = provider, *b = client, *bb; 686 bool acs_redirects = false; 687 struct pci_p2pdma *p2pdma; 688 struct seq_buf acs_list; 689 int acs_cnt = 0; 690 int dist_a = 0; 691 int dist_b = 0; 692 char buf[128]; 693 694 seq_buf_init(&acs_list, buf, sizeof(buf)); 695 696 /* 697 * Note, we don't need to take references to devices returned by 698 * pci_upstream_bridge() seeing we hold a reference to a child 699 * device which will already hold a reference to the upstream bridge. 700 */ 701 while (a) { 702 dist_b = 0; 703 704 if (pci_bridge_has_acs_redir(a)) { 705 seq_buf_print_bus_devfn(&acs_list, a); 706 acs_cnt++; 707 } 708 709 bb = b; 710 711 while (bb) { 712 if (a == bb) 713 goto check_b_path_acs; 714 715 bb = pci_upstream_bridge(bb); 716 dist_b++; 717 } 718 719 a = pci_upstream_bridge(a); 720 dist_a++; 721 } 722 723 *dist = dist_a + dist_b; 724 goto map_through_host_bridge; 725 726 check_b_path_acs: 727 bb = b; 728 729 while (bb) { 730 if (a == bb) 731 break; 732 733 if (pci_bridge_has_acs_redir(bb)) { 734 seq_buf_print_bus_devfn(&acs_list, bb); 735 acs_cnt++; 736 } 737 738 bb = pci_upstream_bridge(bb); 739 } 740 741 *dist = dist_a + dist_b; 742 743 if (!acs_cnt) { 744 map_type = PCI_P2PDMA_MAP_BUS_ADDR; 745 goto done; 746 } 747 748 if (verbose) { 749 acs_list.buffer[acs_list.len-1] = 0; /* drop final semicolon */ 750 pci_warn(client, "ACS redirect is set between the client and provider (%s)\n", 751 pci_name(provider)); 752 pci_warn(client, "to disable ACS redirect for this path, add the kernel parameter: pci=disable_acs_redir=%s\n", 753 acs_list.buffer); 754 } 755 acs_redirects = true; 756 757 map_through_host_bridge: 758 if (!cpu_supports_p2pdma() && 759 !host_bridge_whitelist(provider, client, acs_redirects)) { 760 if (verbose) 761 pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge or whitelisted host bridge\n", 762 pci_name(provider)); 763 map_type = PCI_P2PDMA_MAP_NOT_SUPPORTED; 764 } 765 done: 766 rcu_read_lock(); 767 p2pdma = rcu_dereference(provider->p2pdma); 768 if (p2pdma) 769 xa_store(&p2pdma->map_types, map_types_idx(client), 770 xa_mk_value(map_type), GFP_ATOMIC); 771 rcu_read_unlock(); 772 return map_type; 773 } 774 775 /** 776 * pci_p2pdma_distance_many - Determine the cumulative distance between 777 * a p2pdma provider and the clients in use. 778 * @provider: p2pdma provider to check against the client list 779 * @clients: array of devices to check (NULL-terminated) 780 * @num_clients: number of clients in the array 781 * @verbose: if true, print warnings for devices when we return -1 782 * 783 * Returns -1 if any of the clients are not compatible, otherwise returns a 784 * positive number where a lower number is the preferable choice. (If there's 785 * one client that's the same as the provider it will return 0, which is best 786 * choice). 787 * 788 * "compatible" means the provider and the clients are either all behind 789 * the same PCI root port or the host bridges connected to each of the devices 790 * are listed in the 'pci_p2pdma_whitelist'. 791 */ 792 int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients, 793 int num_clients, bool verbose) 794 { 795 enum pci_p2pdma_map_type map; 796 bool not_supported = false; 797 struct pci_dev *pci_client; 798 int total_dist = 0; 799 int i, distance; 800 801 if (num_clients == 0) 802 return -1; 803 804 for (i = 0; i < num_clients; i++) { 805 pci_client = find_parent_pci_dev(clients[i]); 806 if (!pci_client) { 807 if (verbose) 808 dev_warn(clients[i], 809 "cannot be used for peer-to-peer DMA as it is not a PCI device\n"); 810 return -1; 811 } 812 813 map = calc_map_type_and_dist(provider, pci_client, &distance, 814 verbose); 815 816 pci_dev_put(pci_client); 817 818 if (map == PCI_P2PDMA_MAP_NOT_SUPPORTED) 819 not_supported = true; 820 821 if (not_supported && !verbose) 822 break; 823 824 total_dist += distance; 825 } 826 827 if (not_supported) 828 return -1; 829 830 return total_dist; 831 } 832 EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many); 833 834 /** 835 * pci_has_p2pmem - check if a given PCI device has published any p2pmem 836 * @pdev: PCI device to check 837 */ 838 static bool pci_has_p2pmem(struct pci_dev *pdev) 839 { 840 struct pci_p2pdma *p2pdma; 841 bool res; 842 843 rcu_read_lock(); 844 p2pdma = rcu_dereference(pdev->p2pdma); 845 res = p2pdma && p2pdma->p2pmem_published; 846 rcu_read_unlock(); 847 848 return res; 849 } 850 851 /** 852 * pci_p2pmem_find_many - find a peer-to-peer DMA memory device compatible with 853 * the specified list of clients and shortest distance 854 * @clients: array of devices to check (NULL-terminated) 855 * @num_clients: number of client devices in the list 856 * 857 * If multiple devices are behind the same switch, the one "closest" to the 858 * client devices in use will be chosen first. (So if one of the providers is 859 * the same as one of the clients, that provider will be used ahead of any 860 * other providers that are unrelated). If multiple providers are an equal 861 * distance away, one will be chosen at random. 862 * 863 * Returns a pointer to the PCI device with a reference taken (use pci_dev_put 864 * to return the reference) or NULL if no compatible device is found. The 865 * found provider will also be assigned to the client list. 866 */ 867 struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients) 868 { 869 struct pci_dev *pdev = NULL; 870 int distance; 871 int closest_distance = INT_MAX; 872 struct pci_dev **closest_pdevs; 873 int dev_cnt = 0; 874 const int max_devs = PAGE_SIZE / sizeof(*closest_pdevs); 875 int i; 876 877 closest_pdevs = kmalloc(PAGE_SIZE, GFP_KERNEL); 878 if (!closest_pdevs) 879 return NULL; 880 881 for_each_pci_dev(pdev) { 882 if (!pci_has_p2pmem(pdev)) 883 continue; 884 885 distance = pci_p2pdma_distance_many(pdev, clients, 886 num_clients, false); 887 if (distance < 0 || distance > closest_distance) 888 continue; 889 890 if (distance == closest_distance && dev_cnt >= max_devs) 891 continue; 892 893 if (distance < closest_distance) { 894 for (i = 0; i < dev_cnt; i++) 895 pci_dev_put(closest_pdevs[i]); 896 897 dev_cnt = 0; 898 closest_distance = distance; 899 } 900 901 closest_pdevs[dev_cnt++] = pci_dev_get(pdev); 902 } 903 904 if (dev_cnt) 905 pdev = pci_dev_get(closest_pdevs[get_random_u32_below(dev_cnt)]); 906 907 for (i = 0; i < dev_cnt; i++) 908 pci_dev_put(closest_pdevs[i]); 909 910 kfree(closest_pdevs); 911 return pdev; 912 } 913 EXPORT_SYMBOL_GPL(pci_p2pmem_find_many); 914 915 /** 916 * pci_alloc_p2pmem - allocate peer-to-peer DMA memory 917 * @pdev: the device to allocate memory from 918 * @size: number of bytes to allocate 919 * 920 * Returns the allocated memory or NULL on error. 921 */ 922 void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size) 923 { 924 void *ret = NULL; 925 struct percpu_ref *ref; 926 struct pci_p2pdma *p2pdma; 927 928 /* 929 * Pairs with synchronize_rcu() in pci_p2pdma_release() to 930 * ensure pdev->p2pdma is non-NULL for the duration of the 931 * read-lock. 932 */ 933 rcu_read_lock(); 934 p2pdma = rcu_dereference(pdev->p2pdma); 935 if (unlikely(!p2pdma)) 936 goto out; 937 938 ret = (void *)gen_pool_alloc_owner(p2pdma->pool, size, (void **) &ref); 939 if (!ret) 940 goto out; 941 942 if (unlikely(!percpu_ref_tryget_live_rcu(ref))) { 943 gen_pool_free(p2pdma->pool, (unsigned long) ret, size); 944 ret = NULL; 945 } 946 out: 947 rcu_read_unlock(); 948 return ret; 949 } 950 EXPORT_SYMBOL_GPL(pci_alloc_p2pmem); 951 952 /** 953 * pci_free_p2pmem - free peer-to-peer DMA memory 954 * @pdev: the device the memory was allocated from 955 * @addr: address of the memory that was allocated 956 * @size: number of bytes that were allocated 957 */ 958 void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size) 959 { 960 struct percpu_ref *ref; 961 struct pci_p2pdma *p2pdma = rcu_dereference_protected(pdev->p2pdma, 1); 962 963 gen_pool_free_owner(p2pdma->pool, (uintptr_t)addr, size, 964 (void **) &ref); 965 percpu_ref_put(ref); 966 } 967 EXPORT_SYMBOL_GPL(pci_free_p2pmem); 968 969 /** 970 * pci_p2pmem_virt_to_bus - return the PCI bus address for a given virtual 971 * address obtained with pci_alloc_p2pmem() 972 * @pdev: the device the memory was allocated from 973 * @addr: address of the memory that was allocated 974 */ 975 pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr) 976 { 977 struct pci_p2pdma *p2pdma; 978 979 if (!addr) 980 return 0; 981 982 p2pdma = rcu_dereference_protected(pdev->p2pdma, 1); 983 if (!p2pdma) 984 return 0; 985 986 /* 987 * Note: when we added the memory to the pool we used the PCI 988 * bus address as the physical address. So gen_pool_virt_to_phys() 989 * actually returns the bus address despite the misleading name. 990 */ 991 return gen_pool_virt_to_phys(p2pdma->pool, (unsigned long)addr); 992 } 993 EXPORT_SYMBOL_GPL(pci_p2pmem_virt_to_bus); 994 995 /** 996 * pci_p2pmem_alloc_sgl - allocate peer-to-peer DMA memory in a scatterlist 997 * @pdev: the device to allocate memory from 998 * @nents: the number of SG entries in the list 999 * @length: number of bytes to allocate 1000 * 1001 * Return: %NULL on error or &struct scatterlist pointer and @nents on success 1002 */ 1003 struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev, 1004 unsigned int *nents, u32 length) 1005 { 1006 struct scatterlist *sg; 1007 void *addr; 1008 1009 sg = kmalloc(sizeof(*sg), GFP_KERNEL); 1010 if (!sg) 1011 return NULL; 1012 1013 sg_init_table(sg, 1); 1014 1015 addr = pci_alloc_p2pmem(pdev, length); 1016 if (!addr) 1017 goto out_free_sg; 1018 1019 sg_set_buf(sg, addr, length); 1020 *nents = 1; 1021 return sg; 1022 1023 out_free_sg: 1024 kfree(sg); 1025 return NULL; 1026 } 1027 EXPORT_SYMBOL_GPL(pci_p2pmem_alloc_sgl); 1028 1029 /** 1030 * pci_p2pmem_free_sgl - free a scatterlist allocated by pci_p2pmem_alloc_sgl() 1031 * @pdev: the device to allocate memory from 1032 * @sgl: the allocated scatterlist 1033 */ 1034 void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl) 1035 { 1036 struct scatterlist *sg; 1037 int count; 1038 1039 for_each_sg(sgl, sg, INT_MAX, count) { 1040 if (!sg) 1041 break; 1042 1043 pci_free_p2pmem(pdev, sg_virt(sg), sg->length); 1044 } 1045 kfree(sgl); 1046 } 1047 EXPORT_SYMBOL_GPL(pci_p2pmem_free_sgl); 1048 1049 /** 1050 * pci_p2pmem_publish - publish the peer-to-peer DMA memory for use by 1051 * other devices with pci_p2pmem_find() 1052 * @pdev: the device with peer-to-peer DMA memory to publish 1053 * @publish: set to true to publish the memory, false to unpublish it 1054 * 1055 * Published memory can be used by other PCI device drivers for 1056 * peer-2-peer DMA operations. Non-published memory is reserved for 1057 * exclusive use of the device driver that registers the peer-to-peer 1058 * memory. 1059 */ 1060 void pci_p2pmem_publish(struct pci_dev *pdev, bool publish) 1061 { 1062 struct pci_p2pdma *p2pdma; 1063 1064 rcu_read_lock(); 1065 p2pdma = rcu_dereference(pdev->p2pdma); 1066 if (p2pdma) 1067 p2pdma->p2pmem_published = publish; 1068 rcu_read_unlock(); 1069 } 1070 EXPORT_SYMBOL_GPL(pci_p2pmem_publish); 1071 1072 /** 1073 * pci_p2pdma_map_type - Determine the mapping type for P2PDMA transfers 1074 * @provider: P2PDMA provider structure 1075 * @dev: Target device for the transfer 1076 * 1077 * Determines how peer-to-peer DMA transfers should be mapped between 1078 * the provider and the target device. The mapping type indicates whether 1079 * the transfer can be done directly through PCI switches or must go 1080 * through the host bridge. 1081 */ 1082 enum pci_p2pdma_map_type pci_p2pdma_map_type(struct p2pdma_provider *provider, 1083 struct device *dev) 1084 { 1085 enum pci_p2pdma_map_type type = PCI_P2PDMA_MAP_NOT_SUPPORTED; 1086 struct pci_dev *pdev = to_pci_dev(provider->owner); 1087 struct pci_dev *client; 1088 struct pci_p2pdma *p2pdma; 1089 int dist; 1090 1091 if (!pdev->p2pdma) 1092 return PCI_P2PDMA_MAP_NOT_SUPPORTED; 1093 1094 if (!dev_is_pci(dev)) 1095 return PCI_P2PDMA_MAP_NOT_SUPPORTED; 1096 1097 client = to_pci_dev(dev); 1098 1099 rcu_read_lock(); 1100 p2pdma = rcu_dereference(pdev->p2pdma); 1101 1102 if (p2pdma) 1103 type = xa_to_value(xa_load(&p2pdma->map_types, 1104 map_types_idx(client))); 1105 rcu_read_unlock(); 1106 1107 if (type == PCI_P2PDMA_MAP_UNKNOWN) 1108 return calc_map_type_and_dist(pdev, client, &dist, true); 1109 1110 return type; 1111 } 1112 1113 void __pci_p2pdma_update_state(struct pci_p2pdma_map_state *state, 1114 struct device *dev, struct page *page) 1115 { 1116 struct pci_p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(page_pgmap(page)); 1117 1118 if (state->mem == p2p_pgmap->mem) 1119 return; 1120 1121 state->mem = p2p_pgmap->mem; 1122 state->map = pci_p2pdma_map_type(p2p_pgmap->mem, dev); 1123 } 1124 1125 /** 1126 * pci_p2pdma_enable_store - parse a configfs/sysfs attribute store 1127 * to enable p2pdma 1128 * @page: contents of the value to be stored 1129 * @p2p_dev: returns the PCI device that was selected to be used 1130 * (if one was specified in the stored value) 1131 * @use_p2pdma: returns whether to enable p2pdma or not 1132 * 1133 * Parses an attribute value to decide whether to enable p2pdma. 1134 * The value can select a PCI device (using its full BDF device 1135 * name) or a boolean (in any format kstrtobool() accepts). A false 1136 * value disables p2pdma, a true value expects the caller 1137 * to automatically find a compatible device and specifying a PCI device 1138 * expects the caller to use the specific provider. 1139 * 1140 * pci_p2pdma_enable_show() should be used as the show operation for 1141 * the attribute. 1142 * 1143 * Returns 0 on success 1144 */ 1145 int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev, 1146 bool *use_p2pdma) 1147 { 1148 struct device *dev; 1149 1150 dev = bus_find_device_by_name(&pci_bus_type, NULL, page); 1151 if (dev) { 1152 *use_p2pdma = true; 1153 *p2p_dev = to_pci_dev(dev); 1154 1155 if (!pci_has_p2pmem(*p2p_dev)) { 1156 pci_err(*p2p_dev, 1157 "PCI device has no peer-to-peer memory: %s\n", 1158 page); 1159 pci_dev_put(*p2p_dev); 1160 return -ENODEV; 1161 } 1162 1163 return 0; 1164 } else if ((page[0] == '0' || page[0] == '1') && !iscntrl(page[1])) { 1165 /* 1166 * If the user enters a PCI device that doesn't exist 1167 * like "0000:01:00.1", we don't want kstrtobool to think 1168 * it's a '0' when it's clearly not what the user wanted. 1169 * So we require 0's and 1's to be exactly one character. 1170 */ 1171 } else if (!kstrtobool(page, use_p2pdma)) { 1172 return 0; 1173 } 1174 1175 pr_err("No such PCI device: %.*s\n", (int)strcspn(page, "\n"), page); 1176 return -ENODEV; 1177 } 1178 EXPORT_SYMBOL_GPL(pci_p2pdma_enable_store); 1179 1180 /** 1181 * pci_p2pdma_enable_show - show a configfs/sysfs attribute indicating 1182 * whether p2pdma is enabled 1183 * @page: contents of the stored value 1184 * @p2p_dev: the selected p2p device (NULL if no device is selected) 1185 * @use_p2pdma: whether p2pdma has been enabled 1186 * 1187 * Attributes that use pci_p2pdma_enable_store() should use this function 1188 * to show the value of the attribute. 1189 * 1190 * Returns 0 on success 1191 */ 1192 ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev, 1193 bool use_p2pdma) 1194 { 1195 if (!use_p2pdma) 1196 return sprintf(page, "0\n"); 1197 1198 if (!p2p_dev) 1199 return sprintf(page, "1\n"); 1200 1201 return sprintf(page, "%s\n", pci_name(p2p_dev)); 1202 } 1203 EXPORT_SYMBOL_GPL(pci_p2pdma_enable_show); 1204