1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCI Peer 2 Peer DMA support. 4 * 5 * Copyright (c) 2016-2018, Logan Gunthorpe 6 * Copyright (c) 2016-2017, Microsemi Corporation 7 * Copyright (c) 2017, Christoph Hellwig 8 * Copyright (c) 2018, Eideticom Inc. 9 */ 10 11 #define pr_fmt(fmt) "pci-p2pdma: " fmt 12 #include <linux/ctype.h> 13 #include <linux/pci-p2pdma.h> 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 #include <linux/genalloc.h> 17 #include <linux/memremap.h> 18 #include <linux/percpu-refcount.h> 19 #include <linux/random.h> 20 #include <linux/seq_buf.h> 21 22 struct pci_p2pdma { 23 struct gen_pool *pool; 24 bool p2pmem_published; 25 }; 26 27 struct p2pdma_pagemap { 28 struct dev_pagemap pgmap; 29 struct percpu_ref ref; 30 struct completion ref_done; 31 }; 32 33 static ssize_t size_show(struct device *dev, struct device_attribute *attr, 34 char *buf) 35 { 36 struct pci_dev *pdev = to_pci_dev(dev); 37 size_t size = 0; 38 39 if (pdev->p2pdma->pool) 40 size = gen_pool_size(pdev->p2pdma->pool); 41 42 return snprintf(buf, PAGE_SIZE, "%zd\n", size); 43 } 44 static DEVICE_ATTR_RO(size); 45 46 static ssize_t available_show(struct device *dev, struct device_attribute *attr, 47 char *buf) 48 { 49 struct pci_dev *pdev = to_pci_dev(dev); 50 size_t avail = 0; 51 52 if (pdev->p2pdma->pool) 53 avail = gen_pool_avail(pdev->p2pdma->pool); 54 55 return snprintf(buf, PAGE_SIZE, "%zd\n", avail); 56 } 57 static DEVICE_ATTR_RO(available); 58 59 static ssize_t published_show(struct device *dev, struct device_attribute *attr, 60 char *buf) 61 { 62 struct pci_dev *pdev = to_pci_dev(dev); 63 64 return snprintf(buf, PAGE_SIZE, "%d\n", 65 pdev->p2pdma->p2pmem_published); 66 } 67 static DEVICE_ATTR_RO(published); 68 69 static struct attribute *p2pmem_attrs[] = { 70 &dev_attr_size.attr, 71 &dev_attr_available.attr, 72 &dev_attr_published.attr, 73 NULL, 74 }; 75 76 static const struct attribute_group p2pmem_group = { 77 .attrs = p2pmem_attrs, 78 .name = "p2pmem", 79 }; 80 81 static struct p2pdma_pagemap *to_p2p_pgmap(struct percpu_ref *ref) 82 { 83 return container_of(ref, struct p2pdma_pagemap, ref); 84 } 85 86 static void pci_p2pdma_percpu_release(struct percpu_ref *ref) 87 { 88 struct p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(ref); 89 90 complete(&p2p_pgmap->ref_done); 91 } 92 93 static void pci_p2pdma_percpu_kill(struct percpu_ref *ref) 94 { 95 percpu_ref_kill(ref); 96 } 97 98 static void pci_p2pdma_percpu_cleanup(struct percpu_ref *ref) 99 { 100 struct p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(ref); 101 102 wait_for_completion(&p2p_pgmap->ref_done); 103 percpu_ref_exit(&p2p_pgmap->ref); 104 } 105 106 static void pci_p2pdma_release(void *data) 107 { 108 struct pci_dev *pdev = data; 109 struct pci_p2pdma *p2pdma = pdev->p2pdma; 110 111 if (!p2pdma) 112 return; 113 114 /* Flush and disable pci_alloc_p2p_mem() */ 115 pdev->p2pdma = NULL; 116 synchronize_rcu(); 117 118 gen_pool_destroy(p2pdma->pool); 119 sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group); 120 } 121 122 static int pci_p2pdma_setup(struct pci_dev *pdev) 123 { 124 int error = -ENOMEM; 125 struct pci_p2pdma *p2p; 126 127 p2p = devm_kzalloc(&pdev->dev, sizeof(*p2p), GFP_KERNEL); 128 if (!p2p) 129 return -ENOMEM; 130 131 p2p->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev)); 132 if (!p2p->pool) 133 goto out; 134 135 error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev); 136 if (error) 137 goto out_pool_destroy; 138 139 pdev->p2pdma = p2p; 140 141 error = sysfs_create_group(&pdev->dev.kobj, &p2pmem_group); 142 if (error) 143 goto out_pool_destroy; 144 145 return 0; 146 147 out_pool_destroy: 148 pdev->p2pdma = NULL; 149 gen_pool_destroy(p2p->pool); 150 out: 151 devm_kfree(&pdev->dev, p2p); 152 return error; 153 } 154 155 /** 156 * pci_p2pdma_add_resource - add memory for use as p2p memory 157 * @pdev: the device to add the memory to 158 * @bar: PCI BAR to add 159 * @size: size of the memory to add, may be zero to use the whole BAR 160 * @offset: offset into the PCI BAR 161 * 162 * The memory will be given ZONE_DEVICE struct pages so that it may 163 * be used with any DMA request. 164 */ 165 int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, 166 u64 offset) 167 { 168 struct p2pdma_pagemap *p2p_pgmap; 169 struct dev_pagemap *pgmap; 170 void *addr; 171 int error; 172 173 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) 174 return -EINVAL; 175 176 if (offset >= pci_resource_len(pdev, bar)) 177 return -EINVAL; 178 179 if (!size) 180 size = pci_resource_len(pdev, bar) - offset; 181 182 if (size + offset > pci_resource_len(pdev, bar)) 183 return -EINVAL; 184 185 if (!pdev->p2pdma) { 186 error = pci_p2pdma_setup(pdev); 187 if (error) 188 return error; 189 } 190 191 p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL); 192 if (!p2p_pgmap) 193 return -ENOMEM; 194 195 init_completion(&p2p_pgmap->ref_done); 196 error = percpu_ref_init(&p2p_pgmap->ref, 197 pci_p2pdma_percpu_release, 0, GFP_KERNEL); 198 if (error) 199 goto pgmap_free; 200 201 pgmap = &p2p_pgmap->pgmap; 202 203 pgmap->res.start = pci_resource_start(pdev, bar) + offset; 204 pgmap->res.end = pgmap->res.start + size - 1; 205 pgmap->res.flags = pci_resource_flags(pdev, bar); 206 pgmap->ref = &p2p_pgmap->ref; 207 pgmap->type = MEMORY_DEVICE_PCI_P2PDMA; 208 pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) - 209 pci_resource_start(pdev, bar); 210 pgmap->kill = pci_p2pdma_percpu_kill; 211 pgmap->cleanup = pci_p2pdma_percpu_cleanup; 212 213 addr = devm_memremap_pages(&pdev->dev, pgmap); 214 if (IS_ERR(addr)) { 215 error = PTR_ERR(addr); 216 goto pgmap_free; 217 } 218 219 error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr, 220 pci_bus_address(pdev, bar) + offset, 221 resource_size(&pgmap->res), dev_to_node(&pdev->dev), 222 &p2p_pgmap->ref); 223 if (error) 224 goto pages_free; 225 226 pci_info(pdev, "added peer-to-peer DMA memory %pR\n", 227 &pgmap->res); 228 229 return 0; 230 231 pages_free: 232 devm_memunmap_pages(&pdev->dev, pgmap); 233 pgmap_free: 234 devm_kfree(&pdev->dev, p2p_pgmap); 235 return error; 236 } 237 EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource); 238 239 /* 240 * Note this function returns the parent PCI device with a 241 * reference taken. It is the caller's responsibily to drop 242 * the reference. 243 */ 244 static struct pci_dev *find_parent_pci_dev(struct device *dev) 245 { 246 struct device *parent; 247 248 dev = get_device(dev); 249 250 while (dev) { 251 if (dev_is_pci(dev)) 252 return to_pci_dev(dev); 253 254 parent = get_device(dev->parent); 255 put_device(dev); 256 dev = parent; 257 } 258 259 return NULL; 260 } 261 262 /* 263 * Check if a PCI bridge has its ACS redirection bits set to redirect P2P 264 * TLPs upstream via ACS. Returns 1 if the packets will be redirected 265 * upstream, 0 otherwise. 266 */ 267 static int pci_bridge_has_acs_redir(struct pci_dev *pdev) 268 { 269 int pos; 270 u16 ctrl; 271 272 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS); 273 if (!pos) 274 return 0; 275 276 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl); 277 278 if (ctrl & (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC)) 279 return 1; 280 281 return 0; 282 } 283 284 static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev) 285 { 286 if (!buf) 287 return; 288 289 seq_buf_printf(buf, "%s;", pci_name(pdev)); 290 } 291 292 /* 293 * If we can't find a common upstream bridge take a look at the root 294 * complex and compare it to a whitelist of known good hardware. 295 */ 296 static bool root_complex_whitelist(struct pci_dev *dev) 297 { 298 struct pci_host_bridge *host = pci_find_host_bridge(dev->bus); 299 struct pci_dev *root = pci_get_slot(host->bus, PCI_DEVFN(0, 0)); 300 unsigned short vendor, device; 301 302 if (!root) 303 return false; 304 305 vendor = root->vendor; 306 device = root->device; 307 pci_dev_put(root); 308 309 /* AMD ZEN host bridges can do peer to peer */ 310 if (vendor == PCI_VENDOR_ID_AMD && device == 0x1450) 311 return true; 312 313 return false; 314 } 315 316 /* 317 * Find the distance through the nearest common upstream bridge between 318 * two PCI devices. 319 * 320 * If the two devices are the same device then 0 will be returned. 321 * 322 * If there are two virtual functions of the same device behind the same 323 * bridge port then 2 will be returned (one step down to the PCIe switch, 324 * then one step back to the same device). 325 * 326 * In the case where two devices are connected to the same PCIe switch, the 327 * value 4 will be returned. This corresponds to the following PCI tree: 328 * 329 * -+ Root Port 330 * \+ Switch Upstream Port 331 * +-+ Switch Downstream Port 332 * + \- Device A 333 * \-+ Switch Downstream Port 334 * \- Device B 335 * 336 * The distance is 4 because we traverse from Device A through the downstream 337 * port of the switch, to the common upstream port, back up to the second 338 * downstream port and then to Device B. 339 * 340 * Any two devices that don't have a common upstream bridge will return -1. 341 * In this way devices on separate PCIe root ports will be rejected, which 342 * is what we want for peer-to-peer seeing each PCIe root port defines a 343 * separate hierarchy domain and there's no way to determine whether the root 344 * complex supports forwarding between them. 345 * 346 * In the case where two devices are connected to different PCIe switches, 347 * this function will still return a positive distance as long as both 348 * switches eventually have a common upstream bridge. Note this covers 349 * the case of using multiple PCIe switches to achieve a desired level of 350 * fan-out from a root port. The exact distance will be a function of the 351 * number of switches between Device A and Device B. 352 * 353 * If a bridge which has any ACS redirection bits set is in the path 354 * then this functions will return -2. This is so we reject any 355 * cases where the TLPs are forwarded up into the root complex. 356 * In this case, a list of all infringing bridge addresses will be 357 * populated in acs_list (assuming it's non-null) for printk purposes. 358 */ 359 static int upstream_bridge_distance(struct pci_dev *provider, 360 struct pci_dev *client, 361 struct seq_buf *acs_list) 362 { 363 struct pci_dev *a = provider, *b = client, *bb; 364 int dist_a = 0; 365 int dist_b = 0; 366 int acs_cnt = 0; 367 368 /* 369 * Note, we don't need to take references to devices returned by 370 * pci_upstream_bridge() seeing we hold a reference to a child 371 * device which will already hold a reference to the upstream bridge. 372 */ 373 374 while (a) { 375 dist_b = 0; 376 377 if (pci_bridge_has_acs_redir(a)) { 378 seq_buf_print_bus_devfn(acs_list, a); 379 acs_cnt++; 380 } 381 382 bb = b; 383 384 while (bb) { 385 if (a == bb) 386 goto check_b_path_acs; 387 388 bb = pci_upstream_bridge(bb); 389 dist_b++; 390 } 391 392 a = pci_upstream_bridge(a); 393 dist_a++; 394 } 395 396 /* 397 * Allow the connection if both devices are on a whitelisted root 398 * complex, but add an arbitary large value to the distance. 399 */ 400 if (root_complex_whitelist(provider) && 401 root_complex_whitelist(client)) 402 return 0x1000 + dist_a + dist_b; 403 404 return -1; 405 406 check_b_path_acs: 407 bb = b; 408 409 while (bb) { 410 if (a == bb) 411 break; 412 413 if (pci_bridge_has_acs_redir(bb)) { 414 seq_buf_print_bus_devfn(acs_list, bb); 415 acs_cnt++; 416 } 417 418 bb = pci_upstream_bridge(bb); 419 } 420 421 if (acs_cnt) 422 return -2; 423 424 return dist_a + dist_b; 425 } 426 427 static int upstream_bridge_distance_warn(struct pci_dev *provider, 428 struct pci_dev *client) 429 { 430 struct seq_buf acs_list; 431 int ret; 432 433 seq_buf_init(&acs_list, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE); 434 if (!acs_list.buffer) 435 return -ENOMEM; 436 437 ret = upstream_bridge_distance(provider, client, &acs_list); 438 if (ret == -2) { 439 pci_warn(client, "cannot be used for peer-to-peer DMA as ACS redirect is set between the client and provider (%s)\n", 440 pci_name(provider)); 441 /* Drop final semicolon */ 442 acs_list.buffer[acs_list.len-1] = 0; 443 pci_warn(client, "to disable ACS redirect for this path, add the kernel parameter: pci=disable_acs_redir=%s\n", 444 acs_list.buffer); 445 446 } else if (ret < 0) { 447 pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge\n", 448 pci_name(provider)); 449 } 450 451 kfree(acs_list.buffer); 452 453 return ret; 454 } 455 456 /** 457 * pci_p2pdma_distance_many - Determive the cumulative distance between 458 * a p2pdma provider and the clients in use. 459 * @provider: p2pdma provider to check against the client list 460 * @clients: array of devices to check (NULL-terminated) 461 * @num_clients: number of clients in the array 462 * @verbose: if true, print warnings for devices when we return -1 463 * 464 * Returns -1 if any of the clients are not compatible (behind the same 465 * root port as the provider), otherwise returns a positive number where 466 * a lower number is the preferable choice. (If there's one client 467 * that's the same as the provider it will return 0, which is best choice). 468 * 469 * For now, "compatible" means the provider and the clients are all behind 470 * the same PCI root port. This cuts out cases that may work but is safest 471 * for the user. Future work can expand this to white-list root complexes that 472 * can safely forward between each ports. 473 */ 474 int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients, 475 int num_clients, bool verbose) 476 { 477 bool not_supported = false; 478 struct pci_dev *pci_client; 479 int distance = 0; 480 int i, ret; 481 482 if (num_clients == 0) 483 return -1; 484 485 for (i = 0; i < num_clients; i++) { 486 pci_client = find_parent_pci_dev(clients[i]); 487 if (!pci_client) { 488 if (verbose) 489 dev_warn(clients[i], 490 "cannot be used for peer-to-peer DMA as it is not a PCI device\n"); 491 return -1; 492 } 493 494 if (verbose) 495 ret = upstream_bridge_distance_warn(provider, 496 pci_client); 497 else 498 ret = upstream_bridge_distance(provider, pci_client, 499 NULL); 500 501 pci_dev_put(pci_client); 502 503 if (ret < 0) 504 not_supported = true; 505 506 if (not_supported && !verbose) 507 break; 508 509 distance += ret; 510 } 511 512 if (not_supported) 513 return -1; 514 515 return distance; 516 } 517 EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many); 518 519 /** 520 * pci_has_p2pmem - check if a given PCI device has published any p2pmem 521 * @pdev: PCI device to check 522 */ 523 bool pci_has_p2pmem(struct pci_dev *pdev) 524 { 525 return pdev->p2pdma && pdev->p2pdma->p2pmem_published; 526 } 527 EXPORT_SYMBOL_GPL(pci_has_p2pmem); 528 529 /** 530 * pci_p2pmem_find - find a peer-to-peer DMA memory device compatible with 531 * the specified list of clients and shortest distance (as determined 532 * by pci_p2pmem_dma()) 533 * @clients: array of devices to check (NULL-terminated) 534 * @num_clients: number of client devices in the list 535 * 536 * If multiple devices are behind the same switch, the one "closest" to the 537 * client devices in use will be chosen first. (So if one of the providers is 538 * the same as one of the clients, that provider will be used ahead of any 539 * other providers that are unrelated). If multiple providers are an equal 540 * distance away, one will be chosen at random. 541 * 542 * Returns a pointer to the PCI device with a reference taken (use pci_dev_put 543 * to return the reference) or NULL if no compatible device is found. The 544 * found provider will also be assigned to the client list. 545 */ 546 struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients) 547 { 548 struct pci_dev *pdev = NULL; 549 int distance; 550 int closest_distance = INT_MAX; 551 struct pci_dev **closest_pdevs; 552 int dev_cnt = 0; 553 const int max_devs = PAGE_SIZE / sizeof(*closest_pdevs); 554 int i; 555 556 closest_pdevs = kmalloc(PAGE_SIZE, GFP_KERNEL); 557 if (!closest_pdevs) 558 return NULL; 559 560 while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) { 561 if (!pci_has_p2pmem(pdev)) 562 continue; 563 564 distance = pci_p2pdma_distance_many(pdev, clients, 565 num_clients, false); 566 if (distance < 0 || distance > closest_distance) 567 continue; 568 569 if (distance == closest_distance && dev_cnt >= max_devs) 570 continue; 571 572 if (distance < closest_distance) { 573 for (i = 0; i < dev_cnt; i++) 574 pci_dev_put(closest_pdevs[i]); 575 576 dev_cnt = 0; 577 closest_distance = distance; 578 } 579 580 closest_pdevs[dev_cnt++] = pci_dev_get(pdev); 581 } 582 583 if (dev_cnt) 584 pdev = pci_dev_get(closest_pdevs[prandom_u32_max(dev_cnt)]); 585 586 for (i = 0; i < dev_cnt; i++) 587 pci_dev_put(closest_pdevs[i]); 588 589 kfree(closest_pdevs); 590 return pdev; 591 } 592 EXPORT_SYMBOL_GPL(pci_p2pmem_find_many); 593 594 /** 595 * pci_alloc_p2p_mem - allocate peer-to-peer DMA memory 596 * @pdev: the device to allocate memory from 597 * @size: number of bytes to allocate 598 * 599 * Returns the allocated memory or NULL on error. 600 */ 601 void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size) 602 { 603 void *ret = NULL; 604 struct percpu_ref *ref; 605 606 /* 607 * Pairs with synchronize_rcu() in pci_p2pdma_release() to 608 * ensure pdev->p2pdma is non-NULL for the duration of the 609 * read-lock. 610 */ 611 rcu_read_lock(); 612 if (unlikely(!pdev->p2pdma)) 613 goto out; 614 615 ret = (void *)gen_pool_alloc_owner(pdev->p2pdma->pool, size, 616 (void **) &ref); 617 if (!ret) 618 goto out; 619 620 if (unlikely(!percpu_ref_tryget_live(ref))) { 621 gen_pool_free(pdev->p2pdma->pool, (unsigned long) ret, size); 622 ret = NULL; 623 goto out; 624 } 625 out: 626 rcu_read_unlock(); 627 return ret; 628 } 629 EXPORT_SYMBOL_GPL(pci_alloc_p2pmem); 630 631 /** 632 * pci_free_p2pmem - free peer-to-peer DMA memory 633 * @pdev: the device the memory was allocated from 634 * @addr: address of the memory that was allocated 635 * @size: number of bytes that were allocated 636 */ 637 void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size) 638 { 639 struct percpu_ref *ref; 640 641 gen_pool_free_owner(pdev->p2pdma->pool, (uintptr_t)addr, size, 642 (void **) &ref); 643 percpu_ref_put(ref); 644 } 645 EXPORT_SYMBOL_GPL(pci_free_p2pmem); 646 647 /** 648 * pci_virt_to_bus - return the PCI bus address for a given virtual 649 * address obtained with pci_alloc_p2pmem() 650 * @pdev: the device the memory was allocated from 651 * @addr: address of the memory that was allocated 652 */ 653 pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr) 654 { 655 if (!addr) 656 return 0; 657 if (!pdev->p2pdma) 658 return 0; 659 660 /* 661 * Note: when we added the memory to the pool we used the PCI 662 * bus address as the physical address. So gen_pool_virt_to_phys() 663 * actually returns the bus address despite the misleading name. 664 */ 665 return gen_pool_virt_to_phys(pdev->p2pdma->pool, (unsigned long)addr); 666 } 667 EXPORT_SYMBOL_GPL(pci_p2pmem_virt_to_bus); 668 669 /** 670 * pci_p2pmem_alloc_sgl - allocate peer-to-peer DMA memory in a scatterlist 671 * @pdev: the device to allocate memory from 672 * @nents: the number of SG entries in the list 673 * @length: number of bytes to allocate 674 * 675 * Return: %NULL on error or &struct scatterlist pointer and @nents on success 676 */ 677 struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev, 678 unsigned int *nents, u32 length) 679 { 680 struct scatterlist *sg; 681 void *addr; 682 683 sg = kzalloc(sizeof(*sg), GFP_KERNEL); 684 if (!sg) 685 return NULL; 686 687 sg_init_table(sg, 1); 688 689 addr = pci_alloc_p2pmem(pdev, length); 690 if (!addr) 691 goto out_free_sg; 692 693 sg_set_buf(sg, addr, length); 694 *nents = 1; 695 return sg; 696 697 out_free_sg: 698 kfree(sg); 699 return NULL; 700 } 701 EXPORT_SYMBOL_GPL(pci_p2pmem_alloc_sgl); 702 703 /** 704 * pci_p2pmem_free_sgl - free a scatterlist allocated by pci_p2pmem_alloc_sgl() 705 * @pdev: the device to allocate memory from 706 * @sgl: the allocated scatterlist 707 */ 708 void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl) 709 { 710 struct scatterlist *sg; 711 int count; 712 713 for_each_sg(sgl, sg, INT_MAX, count) { 714 if (!sg) 715 break; 716 717 pci_free_p2pmem(pdev, sg_virt(sg), sg->length); 718 } 719 kfree(sgl); 720 } 721 EXPORT_SYMBOL_GPL(pci_p2pmem_free_sgl); 722 723 /** 724 * pci_p2pmem_publish - publish the peer-to-peer DMA memory for use by 725 * other devices with pci_p2pmem_find() 726 * @pdev: the device with peer-to-peer DMA memory to publish 727 * @publish: set to true to publish the memory, false to unpublish it 728 * 729 * Published memory can be used by other PCI device drivers for 730 * peer-2-peer DMA operations. Non-published memory is reserved for 731 * exclusive use of the device driver that registers the peer-to-peer 732 * memory. 733 */ 734 void pci_p2pmem_publish(struct pci_dev *pdev, bool publish) 735 { 736 if (pdev->p2pdma) 737 pdev->p2pdma->p2pmem_published = publish; 738 } 739 EXPORT_SYMBOL_GPL(pci_p2pmem_publish); 740 741 /** 742 * pci_p2pdma_map_sg - map a PCI peer-to-peer scatterlist for DMA 743 * @dev: device doing the DMA request 744 * @sg: scatter list to map 745 * @nents: elements in the scatterlist 746 * @dir: DMA direction 747 * 748 * Scatterlists mapped with this function should not be unmapped in any way. 749 * 750 * Returns the number of SG entries mapped or 0 on error. 751 */ 752 int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 753 enum dma_data_direction dir) 754 { 755 struct dev_pagemap *pgmap; 756 struct scatterlist *s; 757 phys_addr_t paddr; 758 int i; 759 760 /* 761 * p2pdma mappings are not compatible with devices that use 762 * dma_virt_ops. If the upper layers do the right thing 763 * this should never happen because it will be prevented 764 * by the check in pci_p2pdma_add_client() 765 */ 766 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_DMA_VIRT_OPS) && 767 dev->dma_ops == &dma_virt_ops)) 768 return 0; 769 770 for_each_sg(sg, s, nents, i) { 771 pgmap = sg_page(s)->pgmap; 772 paddr = sg_phys(s); 773 774 s->dma_address = paddr - pgmap->pci_p2pdma_bus_offset; 775 sg_dma_len(s) = s->length; 776 } 777 778 return nents; 779 } 780 EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg); 781 782 /** 783 * pci_p2pdma_enable_store - parse a configfs/sysfs attribute store 784 * to enable p2pdma 785 * @page: contents of the value to be stored 786 * @p2p_dev: returns the PCI device that was selected to be used 787 * (if one was specified in the stored value) 788 * @use_p2pdma: returns whether to enable p2pdma or not 789 * 790 * Parses an attribute value to decide whether to enable p2pdma. 791 * The value can select a PCI device (using its full BDF device 792 * name) or a boolean (in any format strtobool() accepts). A false 793 * value disables p2pdma, a true value expects the caller 794 * to automatically find a compatible device and specifying a PCI device 795 * expects the caller to use the specific provider. 796 * 797 * pci_p2pdma_enable_show() should be used as the show operation for 798 * the attribute. 799 * 800 * Returns 0 on success 801 */ 802 int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev, 803 bool *use_p2pdma) 804 { 805 struct device *dev; 806 807 dev = bus_find_device_by_name(&pci_bus_type, NULL, page); 808 if (dev) { 809 *use_p2pdma = true; 810 *p2p_dev = to_pci_dev(dev); 811 812 if (!pci_has_p2pmem(*p2p_dev)) { 813 pci_err(*p2p_dev, 814 "PCI device has no peer-to-peer memory: %s\n", 815 page); 816 pci_dev_put(*p2p_dev); 817 return -ENODEV; 818 } 819 820 return 0; 821 } else if ((page[0] == '0' || page[0] == '1') && !iscntrl(page[1])) { 822 /* 823 * If the user enters a PCI device that doesn't exist 824 * like "0000:01:00.1", we don't want strtobool to think 825 * it's a '0' when it's clearly not what the user wanted. 826 * So we require 0's and 1's to be exactly one character. 827 */ 828 } else if (!strtobool(page, use_p2pdma)) { 829 return 0; 830 } 831 832 pr_err("No such PCI device: %.*s\n", (int)strcspn(page, "\n"), page); 833 return -ENODEV; 834 } 835 EXPORT_SYMBOL_GPL(pci_p2pdma_enable_store); 836 837 /** 838 * pci_p2pdma_enable_show - show a configfs/sysfs attribute indicating 839 * whether p2pdma is enabled 840 * @page: contents of the stored value 841 * @p2p_dev: the selected p2p device (NULL if no device is selected) 842 * @use_p2pdma: whether p2pdma has been enabled 843 * 844 * Attributes that use pci_p2pdma_enable_store() should use this function 845 * to show the value of the attribute. 846 * 847 * Returns 0 on success 848 */ 849 ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev, 850 bool use_p2pdma) 851 { 852 if (!use_p2pdma) 853 return sprintf(page, "0\n"); 854 855 if (!p2p_dev) 856 return sprintf(page, "1\n"); 857 858 return sprintf(page, "%s\n", pci_name(p2p_dev)); 859 } 860 EXPORT_SYMBOL_GPL(pci_p2pdma_enable_show); 861