1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCI Peer 2 Peer DMA support. 4 * 5 * Copyright (c) 2016-2018, Logan Gunthorpe 6 * Copyright (c) 2016-2017, Microsemi Corporation 7 * Copyright (c) 2017, Christoph Hellwig 8 * Copyright (c) 2018, Eideticom Inc. 9 */ 10 11 #define pr_fmt(fmt) "pci-p2pdma: " fmt 12 #include <linux/ctype.h> 13 #include <linux/pci-p2pdma.h> 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 #include <linux/genalloc.h> 17 #include <linux/memremap.h> 18 #include <linux/percpu-refcount.h> 19 #include <linux/random.h> 20 #include <linux/seq_buf.h> 21 #include <linux/iommu.h> 22 23 struct pci_p2pdma { 24 struct gen_pool *pool; 25 bool p2pmem_published; 26 }; 27 28 static ssize_t size_show(struct device *dev, struct device_attribute *attr, 29 char *buf) 30 { 31 struct pci_dev *pdev = to_pci_dev(dev); 32 size_t size = 0; 33 34 if (pdev->p2pdma->pool) 35 size = gen_pool_size(pdev->p2pdma->pool); 36 37 return snprintf(buf, PAGE_SIZE, "%zd\n", size); 38 } 39 static DEVICE_ATTR_RO(size); 40 41 static ssize_t available_show(struct device *dev, struct device_attribute *attr, 42 char *buf) 43 { 44 struct pci_dev *pdev = to_pci_dev(dev); 45 size_t avail = 0; 46 47 if (pdev->p2pdma->pool) 48 avail = gen_pool_avail(pdev->p2pdma->pool); 49 50 return snprintf(buf, PAGE_SIZE, "%zd\n", avail); 51 } 52 static DEVICE_ATTR_RO(available); 53 54 static ssize_t published_show(struct device *dev, struct device_attribute *attr, 55 char *buf) 56 { 57 struct pci_dev *pdev = to_pci_dev(dev); 58 59 return snprintf(buf, PAGE_SIZE, "%d\n", 60 pdev->p2pdma->p2pmem_published); 61 } 62 static DEVICE_ATTR_RO(published); 63 64 static struct attribute *p2pmem_attrs[] = { 65 &dev_attr_size.attr, 66 &dev_attr_available.attr, 67 &dev_attr_published.attr, 68 NULL, 69 }; 70 71 static const struct attribute_group p2pmem_group = { 72 .attrs = p2pmem_attrs, 73 .name = "p2pmem", 74 }; 75 76 static void pci_p2pdma_release(void *data) 77 { 78 struct pci_dev *pdev = data; 79 struct pci_p2pdma *p2pdma = pdev->p2pdma; 80 81 if (!p2pdma) 82 return; 83 84 /* Flush and disable pci_alloc_p2p_mem() */ 85 pdev->p2pdma = NULL; 86 synchronize_rcu(); 87 88 gen_pool_destroy(p2pdma->pool); 89 sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group); 90 } 91 92 static int pci_p2pdma_setup(struct pci_dev *pdev) 93 { 94 int error = -ENOMEM; 95 struct pci_p2pdma *p2p; 96 97 p2p = devm_kzalloc(&pdev->dev, sizeof(*p2p), GFP_KERNEL); 98 if (!p2p) 99 return -ENOMEM; 100 101 p2p->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev)); 102 if (!p2p->pool) 103 goto out; 104 105 error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev); 106 if (error) 107 goto out_pool_destroy; 108 109 pdev->p2pdma = p2p; 110 111 error = sysfs_create_group(&pdev->dev.kobj, &p2pmem_group); 112 if (error) 113 goto out_pool_destroy; 114 115 return 0; 116 117 out_pool_destroy: 118 pdev->p2pdma = NULL; 119 gen_pool_destroy(p2p->pool); 120 out: 121 devm_kfree(&pdev->dev, p2p); 122 return error; 123 } 124 125 /** 126 * pci_p2pdma_add_resource - add memory for use as p2p memory 127 * @pdev: the device to add the memory to 128 * @bar: PCI BAR to add 129 * @size: size of the memory to add, may be zero to use the whole BAR 130 * @offset: offset into the PCI BAR 131 * 132 * The memory will be given ZONE_DEVICE struct pages so that it may 133 * be used with any DMA request. 134 */ 135 int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, 136 u64 offset) 137 { 138 struct dev_pagemap *pgmap; 139 void *addr; 140 int error; 141 142 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) 143 return -EINVAL; 144 145 if (offset >= pci_resource_len(pdev, bar)) 146 return -EINVAL; 147 148 if (!size) 149 size = pci_resource_len(pdev, bar) - offset; 150 151 if (size + offset > pci_resource_len(pdev, bar)) 152 return -EINVAL; 153 154 if (!pdev->p2pdma) { 155 error = pci_p2pdma_setup(pdev); 156 if (error) 157 return error; 158 } 159 160 pgmap = devm_kzalloc(&pdev->dev, sizeof(*pgmap), GFP_KERNEL); 161 if (!pgmap) 162 return -ENOMEM; 163 pgmap->res.start = pci_resource_start(pdev, bar) + offset; 164 pgmap->res.end = pgmap->res.start + size - 1; 165 pgmap->res.flags = pci_resource_flags(pdev, bar); 166 pgmap->type = MEMORY_DEVICE_PCI_P2PDMA; 167 pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) - 168 pci_resource_start(pdev, bar); 169 170 addr = devm_memremap_pages(&pdev->dev, pgmap); 171 if (IS_ERR(addr)) { 172 error = PTR_ERR(addr); 173 goto pgmap_free; 174 } 175 176 error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr, 177 pci_bus_address(pdev, bar) + offset, 178 resource_size(&pgmap->res), dev_to_node(&pdev->dev), 179 pgmap->ref); 180 if (error) 181 goto pages_free; 182 183 pci_info(pdev, "added peer-to-peer DMA memory %pR\n", 184 &pgmap->res); 185 186 return 0; 187 188 pages_free: 189 devm_memunmap_pages(&pdev->dev, pgmap); 190 pgmap_free: 191 devm_kfree(&pdev->dev, pgmap); 192 return error; 193 } 194 EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource); 195 196 /* 197 * Note this function returns the parent PCI device with a 198 * reference taken. It is the caller's responsibility to drop 199 * the reference. 200 */ 201 static struct pci_dev *find_parent_pci_dev(struct device *dev) 202 { 203 struct device *parent; 204 205 dev = get_device(dev); 206 207 while (dev) { 208 if (dev_is_pci(dev)) 209 return to_pci_dev(dev); 210 211 parent = get_device(dev->parent); 212 put_device(dev); 213 dev = parent; 214 } 215 216 return NULL; 217 } 218 219 /* 220 * Check if a PCI bridge has its ACS redirection bits set to redirect P2P 221 * TLPs upstream via ACS. Returns 1 if the packets will be redirected 222 * upstream, 0 otherwise. 223 */ 224 static int pci_bridge_has_acs_redir(struct pci_dev *pdev) 225 { 226 int pos; 227 u16 ctrl; 228 229 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS); 230 if (!pos) 231 return 0; 232 233 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl); 234 235 if (ctrl & (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC)) 236 return 1; 237 238 return 0; 239 } 240 241 static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev) 242 { 243 if (!buf) 244 return; 245 246 seq_buf_printf(buf, "%s;", pci_name(pdev)); 247 } 248 249 /* 250 * If we can't find a common upstream bridge take a look at the root 251 * complex and compare it to a whitelist of known good hardware. 252 */ 253 static bool root_complex_whitelist(struct pci_dev *dev) 254 { 255 struct pci_host_bridge *host = pci_find_host_bridge(dev->bus); 256 struct pci_dev *root = pci_get_slot(host->bus, PCI_DEVFN(0, 0)); 257 unsigned short vendor, device; 258 259 if (iommu_present(dev->dev.bus)) 260 return false; 261 262 if (!root) 263 return false; 264 265 vendor = root->vendor; 266 device = root->device; 267 pci_dev_put(root); 268 269 /* AMD ZEN host bridges can do peer to peer */ 270 if (vendor == PCI_VENDOR_ID_AMD && device == 0x1450) 271 return true; 272 273 return false; 274 } 275 276 /* 277 * Find the distance through the nearest common upstream bridge between 278 * two PCI devices. 279 * 280 * If the two devices are the same device then 0 will be returned. 281 * 282 * If there are two virtual functions of the same device behind the same 283 * bridge port then 2 will be returned (one step down to the PCIe switch, 284 * then one step back to the same device). 285 * 286 * In the case where two devices are connected to the same PCIe switch, the 287 * value 4 will be returned. This corresponds to the following PCI tree: 288 * 289 * -+ Root Port 290 * \+ Switch Upstream Port 291 * +-+ Switch Downstream Port 292 * + \- Device A 293 * \-+ Switch Downstream Port 294 * \- Device B 295 * 296 * The distance is 4 because we traverse from Device A through the downstream 297 * port of the switch, to the common upstream port, back up to the second 298 * downstream port and then to Device B. 299 * 300 * Any two devices that don't have a common upstream bridge will return -1. 301 * In this way devices on separate PCIe root ports will be rejected, which 302 * is what we want for peer-to-peer seeing each PCIe root port defines a 303 * separate hierarchy domain and there's no way to determine whether the root 304 * complex supports forwarding between them. 305 * 306 * In the case where two devices are connected to different PCIe switches, 307 * this function will still return a positive distance as long as both 308 * switches eventually have a common upstream bridge. Note this covers 309 * the case of using multiple PCIe switches to achieve a desired level of 310 * fan-out from a root port. The exact distance will be a function of the 311 * number of switches between Device A and Device B. 312 * 313 * If a bridge which has any ACS redirection bits set is in the path 314 * then this functions will return -2. This is so we reject any 315 * cases where the TLPs are forwarded up into the root complex. 316 * In this case, a list of all infringing bridge addresses will be 317 * populated in acs_list (assuming it's non-null) for printk purposes. 318 */ 319 static int upstream_bridge_distance(struct pci_dev *provider, 320 struct pci_dev *client, 321 struct seq_buf *acs_list) 322 { 323 struct pci_dev *a = provider, *b = client, *bb; 324 int dist_a = 0; 325 int dist_b = 0; 326 int acs_cnt = 0; 327 328 /* 329 * Note, we don't need to take references to devices returned by 330 * pci_upstream_bridge() seeing we hold a reference to a child 331 * device which will already hold a reference to the upstream bridge. 332 */ 333 334 while (a) { 335 dist_b = 0; 336 337 if (pci_bridge_has_acs_redir(a)) { 338 seq_buf_print_bus_devfn(acs_list, a); 339 acs_cnt++; 340 } 341 342 bb = b; 343 344 while (bb) { 345 if (a == bb) 346 goto check_b_path_acs; 347 348 bb = pci_upstream_bridge(bb); 349 dist_b++; 350 } 351 352 a = pci_upstream_bridge(a); 353 dist_a++; 354 } 355 356 /* 357 * Allow the connection if both devices are on a whitelisted root 358 * complex, but add an arbitrary large value to the distance. 359 */ 360 if (root_complex_whitelist(provider) && 361 root_complex_whitelist(client)) 362 return 0x1000 + dist_a + dist_b; 363 364 return -1; 365 366 check_b_path_acs: 367 bb = b; 368 369 while (bb) { 370 if (a == bb) 371 break; 372 373 if (pci_bridge_has_acs_redir(bb)) { 374 seq_buf_print_bus_devfn(acs_list, bb); 375 acs_cnt++; 376 } 377 378 bb = pci_upstream_bridge(bb); 379 } 380 381 if (acs_cnt) 382 return -2; 383 384 return dist_a + dist_b; 385 } 386 387 static int upstream_bridge_distance_warn(struct pci_dev *provider, 388 struct pci_dev *client) 389 { 390 struct seq_buf acs_list; 391 int ret; 392 393 seq_buf_init(&acs_list, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE); 394 if (!acs_list.buffer) 395 return -ENOMEM; 396 397 ret = upstream_bridge_distance(provider, client, &acs_list); 398 if (ret == -2) { 399 pci_warn(client, "cannot be used for peer-to-peer DMA as ACS redirect is set between the client and provider (%s)\n", 400 pci_name(provider)); 401 /* Drop final semicolon */ 402 acs_list.buffer[acs_list.len-1] = 0; 403 pci_warn(client, "to disable ACS redirect for this path, add the kernel parameter: pci=disable_acs_redir=%s\n", 404 acs_list.buffer); 405 406 } else if (ret < 0) { 407 pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge\n", 408 pci_name(provider)); 409 } 410 411 kfree(acs_list.buffer); 412 413 return ret; 414 } 415 416 /** 417 * pci_p2pdma_distance_many - Determine the cumulative distance between 418 * a p2pdma provider and the clients in use. 419 * @provider: p2pdma provider to check against the client list 420 * @clients: array of devices to check (NULL-terminated) 421 * @num_clients: number of clients in the array 422 * @verbose: if true, print warnings for devices when we return -1 423 * 424 * Returns -1 if any of the clients are not compatible (behind the same 425 * root port as the provider), otherwise returns a positive number where 426 * a lower number is the preferable choice. (If there's one client 427 * that's the same as the provider it will return 0, which is best choice). 428 * 429 * For now, "compatible" means the provider and the clients are all behind 430 * the same PCI root port. This cuts out cases that may work but is safest 431 * for the user. Future work can expand this to white-list root complexes that 432 * can safely forward between each ports. 433 */ 434 int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients, 435 int num_clients, bool verbose) 436 { 437 bool not_supported = false; 438 struct pci_dev *pci_client; 439 int distance = 0; 440 int i, ret; 441 442 if (num_clients == 0) 443 return -1; 444 445 for (i = 0; i < num_clients; i++) { 446 if (IS_ENABLED(CONFIG_DMA_VIRT_OPS) && 447 clients[i]->dma_ops == &dma_virt_ops) { 448 if (verbose) 449 dev_warn(clients[i], 450 "cannot be used for peer-to-peer DMA because the driver makes use of dma_virt_ops\n"); 451 return -1; 452 } 453 454 pci_client = find_parent_pci_dev(clients[i]); 455 if (!pci_client) { 456 if (verbose) 457 dev_warn(clients[i], 458 "cannot be used for peer-to-peer DMA as it is not a PCI device\n"); 459 return -1; 460 } 461 462 if (verbose) 463 ret = upstream_bridge_distance_warn(provider, 464 pci_client); 465 else 466 ret = upstream_bridge_distance(provider, pci_client, 467 NULL); 468 469 pci_dev_put(pci_client); 470 471 if (ret < 0) 472 not_supported = true; 473 474 if (not_supported && !verbose) 475 break; 476 477 distance += ret; 478 } 479 480 if (not_supported) 481 return -1; 482 483 return distance; 484 } 485 EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many); 486 487 /** 488 * pci_has_p2pmem - check if a given PCI device has published any p2pmem 489 * @pdev: PCI device to check 490 */ 491 bool pci_has_p2pmem(struct pci_dev *pdev) 492 { 493 return pdev->p2pdma && pdev->p2pdma->p2pmem_published; 494 } 495 EXPORT_SYMBOL_GPL(pci_has_p2pmem); 496 497 /** 498 * pci_p2pmem_find - find a peer-to-peer DMA memory device compatible with 499 * the specified list of clients and shortest distance (as determined 500 * by pci_p2pmem_dma()) 501 * @clients: array of devices to check (NULL-terminated) 502 * @num_clients: number of client devices in the list 503 * 504 * If multiple devices are behind the same switch, the one "closest" to the 505 * client devices in use will be chosen first. (So if one of the providers is 506 * the same as one of the clients, that provider will be used ahead of any 507 * other providers that are unrelated). If multiple providers are an equal 508 * distance away, one will be chosen at random. 509 * 510 * Returns a pointer to the PCI device with a reference taken (use pci_dev_put 511 * to return the reference) or NULL if no compatible device is found. The 512 * found provider will also be assigned to the client list. 513 */ 514 struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients) 515 { 516 struct pci_dev *pdev = NULL; 517 int distance; 518 int closest_distance = INT_MAX; 519 struct pci_dev **closest_pdevs; 520 int dev_cnt = 0; 521 const int max_devs = PAGE_SIZE / sizeof(*closest_pdevs); 522 int i; 523 524 closest_pdevs = kmalloc(PAGE_SIZE, GFP_KERNEL); 525 if (!closest_pdevs) 526 return NULL; 527 528 while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) { 529 if (!pci_has_p2pmem(pdev)) 530 continue; 531 532 distance = pci_p2pdma_distance_many(pdev, clients, 533 num_clients, false); 534 if (distance < 0 || distance > closest_distance) 535 continue; 536 537 if (distance == closest_distance && dev_cnt >= max_devs) 538 continue; 539 540 if (distance < closest_distance) { 541 for (i = 0; i < dev_cnt; i++) 542 pci_dev_put(closest_pdevs[i]); 543 544 dev_cnt = 0; 545 closest_distance = distance; 546 } 547 548 closest_pdevs[dev_cnt++] = pci_dev_get(pdev); 549 } 550 551 if (dev_cnt) 552 pdev = pci_dev_get(closest_pdevs[prandom_u32_max(dev_cnt)]); 553 554 for (i = 0; i < dev_cnt; i++) 555 pci_dev_put(closest_pdevs[i]); 556 557 kfree(closest_pdevs); 558 return pdev; 559 } 560 EXPORT_SYMBOL_GPL(pci_p2pmem_find_many); 561 562 /** 563 * pci_alloc_p2p_mem - allocate peer-to-peer DMA memory 564 * @pdev: the device to allocate memory from 565 * @size: number of bytes to allocate 566 * 567 * Returns the allocated memory or NULL on error. 568 */ 569 void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size) 570 { 571 void *ret = NULL; 572 struct percpu_ref *ref; 573 574 /* 575 * Pairs with synchronize_rcu() in pci_p2pdma_release() to 576 * ensure pdev->p2pdma is non-NULL for the duration of the 577 * read-lock. 578 */ 579 rcu_read_lock(); 580 if (unlikely(!pdev->p2pdma)) 581 goto out; 582 583 ret = (void *)gen_pool_alloc_owner(pdev->p2pdma->pool, size, 584 (void **) &ref); 585 if (!ret) 586 goto out; 587 588 if (unlikely(!percpu_ref_tryget_live(ref))) { 589 gen_pool_free(pdev->p2pdma->pool, (unsigned long) ret, size); 590 ret = NULL; 591 goto out; 592 } 593 out: 594 rcu_read_unlock(); 595 return ret; 596 } 597 EXPORT_SYMBOL_GPL(pci_alloc_p2pmem); 598 599 /** 600 * pci_free_p2pmem - free peer-to-peer DMA memory 601 * @pdev: the device the memory was allocated from 602 * @addr: address of the memory that was allocated 603 * @size: number of bytes that were allocated 604 */ 605 void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size) 606 { 607 struct percpu_ref *ref; 608 609 gen_pool_free_owner(pdev->p2pdma->pool, (uintptr_t)addr, size, 610 (void **) &ref); 611 percpu_ref_put(ref); 612 } 613 EXPORT_SYMBOL_GPL(pci_free_p2pmem); 614 615 /** 616 * pci_virt_to_bus - return the PCI bus address for a given virtual 617 * address obtained with pci_alloc_p2pmem() 618 * @pdev: the device the memory was allocated from 619 * @addr: address of the memory that was allocated 620 */ 621 pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr) 622 { 623 if (!addr) 624 return 0; 625 if (!pdev->p2pdma) 626 return 0; 627 628 /* 629 * Note: when we added the memory to the pool we used the PCI 630 * bus address as the physical address. So gen_pool_virt_to_phys() 631 * actually returns the bus address despite the misleading name. 632 */ 633 return gen_pool_virt_to_phys(pdev->p2pdma->pool, (unsigned long)addr); 634 } 635 EXPORT_SYMBOL_GPL(pci_p2pmem_virt_to_bus); 636 637 /** 638 * pci_p2pmem_alloc_sgl - allocate peer-to-peer DMA memory in a scatterlist 639 * @pdev: the device to allocate memory from 640 * @nents: the number of SG entries in the list 641 * @length: number of bytes to allocate 642 * 643 * Return: %NULL on error or &struct scatterlist pointer and @nents on success 644 */ 645 struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev, 646 unsigned int *nents, u32 length) 647 { 648 struct scatterlist *sg; 649 void *addr; 650 651 sg = kzalloc(sizeof(*sg), GFP_KERNEL); 652 if (!sg) 653 return NULL; 654 655 sg_init_table(sg, 1); 656 657 addr = pci_alloc_p2pmem(pdev, length); 658 if (!addr) 659 goto out_free_sg; 660 661 sg_set_buf(sg, addr, length); 662 *nents = 1; 663 return sg; 664 665 out_free_sg: 666 kfree(sg); 667 return NULL; 668 } 669 EXPORT_SYMBOL_GPL(pci_p2pmem_alloc_sgl); 670 671 /** 672 * pci_p2pmem_free_sgl - free a scatterlist allocated by pci_p2pmem_alloc_sgl() 673 * @pdev: the device to allocate memory from 674 * @sgl: the allocated scatterlist 675 */ 676 void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl) 677 { 678 struct scatterlist *sg; 679 int count; 680 681 for_each_sg(sgl, sg, INT_MAX, count) { 682 if (!sg) 683 break; 684 685 pci_free_p2pmem(pdev, sg_virt(sg), sg->length); 686 } 687 kfree(sgl); 688 } 689 EXPORT_SYMBOL_GPL(pci_p2pmem_free_sgl); 690 691 /** 692 * pci_p2pmem_publish - publish the peer-to-peer DMA memory for use by 693 * other devices with pci_p2pmem_find() 694 * @pdev: the device with peer-to-peer DMA memory to publish 695 * @publish: set to true to publish the memory, false to unpublish it 696 * 697 * Published memory can be used by other PCI device drivers for 698 * peer-2-peer DMA operations. Non-published memory is reserved for 699 * exclusive use of the device driver that registers the peer-to-peer 700 * memory. 701 */ 702 void pci_p2pmem_publish(struct pci_dev *pdev, bool publish) 703 { 704 if (pdev->p2pdma) 705 pdev->p2pdma->p2pmem_published = publish; 706 } 707 EXPORT_SYMBOL_GPL(pci_p2pmem_publish); 708 709 /** 710 * pci_p2pdma_map_sg - map a PCI peer-to-peer scatterlist for DMA 711 * @dev: device doing the DMA request 712 * @sg: scatter list to map 713 * @nents: elements in the scatterlist 714 * @dir: DMA direction 715 * 716 * Scatterlists mapped with this function should not be unmapped in any way. 717 * 718 * Returns the number of SG entries mapped or 0 on error. 719 */ 720 int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 721 enum dma_data_direction dir) 722 { 723 struct dev_pagemap *pgmap; 724 struct scatterlist *s; 725 phys_addr_t paddr; 726 int i; 727 728 /* 729 * p2pdma mappings are not compatible with devices that use 730 * dma_virt_ops. If the upper layers do the right thing 731 * this should never happen because it will be prevented 732 * by the check in pci_p2pdma_distance_many() 733 */ 734 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_DMA_VIRT_OPS) && 735 dev->dma_ops == &dma_virt_ops)) 736 return 0; 737 738 for_each_sg(sg, s, nents, i) { 739 pgmap = sg_page(s)->pgmap; 740 paddr = sg_phys(s); 741 742 s->dma_address = paddr - pgmap->pci_p2pdma_bus_offset; 743 sg_dma_len(s) = s->length; 744 } 745 746 return nents; 747 } 748 EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg); 749 750 /** 751 * pci_p2pdma_enable_store - parse a configfs/sysfs attribute store 752 * to enable p2pdma 753 * @page: contents of the value to be stored 754 * @p2p_dev: returns the PCI device that was selected to be used 755 * (if one was specified in the stored value) 756 * @use_p2pdma: returns whether to enable p2pdma or not 757 * 758 * Parses an attribute value to decide whether to enable p2pdma. 759 * The value can select a PCI device (using its full BDF device 760 * name) or a boolean (in any format strtobool() accepts). A false 761 * value disables p2pdma, a true value expects the caller 762 * to automatically find a compatible device and specifying a PCI device 763 * expects the caller to use the specific provider. 764 * 765 * pci_p2pdma_enable_show() should be used as the show operation for 766 * the attribute. 767 * 768 * Returns 0 on success 769 */ 770 int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev, 771 bool *use_p2pdma) 772 { 773 struct device *dev; 774 775 dev = bus_find_device_by_name(&pci_bus_type, NULL, page); 776 if (dev) { 777 *use_p2pdma = true; 778 *p2p_dev = to_pci_dev(dev); 779 780 if (!pci_has_p2pmem(*p2p_dev)) { 781 pci_err(*p2p_dev, 782 "PCI device has no peer-to-peer memory: %s\n", 783 page); 784 pci_dev_put(*p2p_dev); 785 return -ENODEV; 786 } 787 788 return 0; 789 } else if ((page[0] == '0' || page[0] == '1') && !iscntrl(page[1])) { 790 /* 791 * If the user enters a PCI device that doesn't exist 792 * like "0000:01:00.1", we don't want strtobool to think 793 * it's a '0' when it's clearly not what the user wanted. 794 * So we require 0's and 1's to be exactly one character. 795 */ 796 } else if (!strtobool(page, use_p2pdma)) { 797 return 0; 798 } 799 800 pr_err("No such PCI device: %.*s\n", (int)strcspn(page, "\n"), page); 801 return -ENODEV; 802 } 803 EXPORT_SYMBOL_GPL(pci_p2pdma_enable_store); 804 805 /** 806 * pci_p2pdma_enable_show - show a configfs/sysfs attribute indicating 807 * whether p2pdma is enabled 808 * @page: contents of the stored value 809 * @p2p_dev: the selected p2p device (NULL if no device is selected) 810 * @use_p2pdma: whether p2pdma has been enabled 811 * 812 * Attributes that use pci_p2pdma_enable_store() should use this function 813 * to show the value of the attribute. 814 * 815 * Returns 0 on success 816 */ 817 ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev, 818 bool use_p2pdma) 819 { 820 if (!use_p2pdma) 821 return sprintf(page, "0\n"); 822 823 if (!p2p_dev) 824 return sprintf(page, "1\n"); 825 826 return sprintf(page, "%s\n", pci_name(p2p_dev)); 827 } 828 EXPORT_SYMBOL_GPL(pci_p2pdma_enable_show); 829