1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCI Endpoint *Controller* (EPC) library 4 * 5 * Copyright (C) 2017 Texas Instruments 6 * Author: Kishon Vijay Abraham I <kishon@ti.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/slab.h> 11 #include <linux/module.h> 12 13 #include <linux/pci-epc.h> 14 #include <linux/pci-epf.h> 15 #include <linux/pci-ep-cfs.h> 16 17 static const struct class pci_epc_class = { 18 .name = "pci_epc", 19 }; 20 21 static void devm_pci_epc_release(struct device *dev, void *res) 22 { 23 struct pci_epc *epc = *(struct pci_epc **)res; 24 25 pci_epc_destroy(epc); 26 } 27 28 static int devm_pci_epc_match(struct device *dev, void *res, void *match_data) 29 { 30 struct pci_epc **epc = res; 31 32 return *epc == match_data; 33 } 34 35 /** 36 * pci_epc_put() - release the PCI endpoint controller 37 * @epc: epc returned by pci_epc_get() 38 * 39 * release the refcount the caller obtained by invoking pci_epc_get() 40 */ 41 void pci_epc_put(struct pci_epc *epc) 42 { 43 if (IS_ERR_OR_NULL(epc)) 44 return; 45 46 module_put(epc->ops->owner); 47 put_device(&epc->dev); 48 } 49 EXPORT_SYMBOL_GPL(pci_epc_put); 50 51 /** 52 * pci_epc_get() - get the PCI endpoint controller 53 * @epc_name: device name of the endpoint controller 54 * 55 * Invoke to get struct pci_epc * corresponding to the device name of the 56 * endpoint controller 57 */ 58 struct pci_epc *pci_epc_get(const char *epc_name) 59 { 60 int ret = -EINVAL; 61 struct pci_epc *epc; 62 struct device *dev; 63 64 dev = class_find_device_by_name(&pci_epc_class, epc_name); 65 if (!dev) 66 goto err; 67 68 epc = to_pci_epc(dev); 69 if (try_module_get(epc->ops->owner)) 70 return epc; 71 72 err: 73 put_device(dev); 74 return ERR_PTR(ret); 75 } 76 EXPORT_SYMBOL_GPL(pci_epc_get); 77 78 /** 79 * pci_epc_get_first_free_bar() - helper to get first unreserved BAR 80 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap 81 * 82 * Invoke to get the first unreserved BAR that can be used by the endpoint 83 * function. 84 */ 85 enum pci_barno 86 pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features) 87 { 88 return pci_epc_get_next_free_bar(epc_features, BAR_0); 89 } 90 EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar); 91 92 /** 93 * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar 94 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap 95 * @bar: the starting BAR number from where unreserved BAR should be searched 96 * 97 * Invoke to get the next unreserved BAR starting from @bar that can be used 98 * for endpoint function. 99 */ 100 enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features 101 *epc_features, enum pci_barno bar) 102 { 103 int i; 104 105 if (!epc_features) 106 return BAR_0; 107 108 /* If 'bar - 1' is a 64-bit BAR, move to the next BAR */ 109 if (bar > 0 && epc_features->bar[bar - 1].only_64bit) 110 bar++; 111 112 for (i = bar; i < PCI_STD_NUM_BARS; i++) { 113 /* If the BAR is not reserved, return it. */ 114 if (epc_features->bar[i].type != BAR_RESERVED) 115 return i; 116 } 117 118 return NO_BAR; 119 } 120 EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar); 121 122 static bool pci_epc_function_is_valid(struct pci_epc *epc, 123 u8 func_no, u8 vfunc_no) 124 { 125 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 126 return false; 127 128 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 129 return false; 130 131 return true; 132 } 133 134 /** 135 * pci_epc_get_features() - get the features supported by EPC 136 * @epc: the features supported by *this* EPC device will be returned 137 * @func_no: the features supported by the EPC device specific to the 138 * endpoint function with func_no will be returned 139 * @vfunc_no: the features supported by the EPC device specific to the 140 * virtual endpoint function with vfunc_no will be returned 141 * 142 * Invoke to get the features provided by the EPC which may be 143 * specific to an endpoint function. Returns pci_epc_features on success 144 * and NULL for any failures. 145 */ 146 const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc, 147 u8 func_no, u8 vfunc_no) 148 { 149 const struct pci_epc_features *epc_features; 150 151 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 152 return NULL; 153 154 if (!epc->ops->get_features) 155 return NULL; 156 157 mutex_lock(&epc->lock); 158 epc_features = epc->ops->get_features(epc, func_no, vfunc_no); 159 mutex_unlock(&epc->lock); 160 161 return epc_features; 162 } 163 EXPORT_SYMBOL_GPL(pci_epc_get_features); 164 165 /** 166 * pci_epc_stop() - stop the PCI link 167 * @epc: the link of the EPC device that has to be stopped 168 * 169 * Invoke to stop the PCI link 170 */ 171 void pci_epc_stop(struct pci_epc *epc) 172 { 173 if (IS_ERR(epc) || !epc->ops->stop) 174 return; 175 176 mutex_lock(&epc->lock); 177 epc->ops->stop(epc); 178 mutex_unlock(&epc->lock); 179 } 180 EXPORT_SYMBOL_GPL(pci_epc_stop); 181 182 /** 183 * pci_epc_start() - start the PCI link 184 * @epc: the link of *this* EPC device has to be started 185 * 186 * Invoke to start the PCI link 187 */ 188 int pci_epc_start(struct pci_epc *epc) 189 { 190 int ret; 191 192 if (IS_ERR(epc)) 193 return -EINVAL; 194 195 if (!epc->ops->start) 196 return 0; 197 198 mutex_lock(&epc->lock); 199 ret = epc->ops->start(epc); 200 mutex_unlock(&epc->lock); 201 202 return ret; 203 } 204 EXPORT_SYMBOL_GPL(pci_epc_start); 205 206 /** 207 * pci_epc_raise_irq() - interrupt the host system 208 * @epc: the EPC device which has to interrupt the host 209 * @func_no: the physical endpoint function number in the EPC device 210 * @vfunc_no: the virtual endpoint function number in the physical function 211 * @type: specify the type of interrupt; INTX, MSI or MSI-X 212 * @interrupt_num: the MSI or MSI-X interrupt number with range (1-N) 213 * 214 * Invoke to raise an INTX, MSI or MSI-X interrupt 215 */ 216 int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 217 unsigned int type, u16 interrupt_num) 218 { 219 int ret; 220 221 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 222 return -EINVAL; 223 224 if (!epc->ops->raise_irq) 225 return 0; 226 227 mutex_lock(&epc->lock); 228 ret = epc->ops->raise_irq(epc, func_no, vfunc_no, type, interrupt_num); 229 mutex_unlock(&epc->lock); 230 231 return ret; 232 } 233 EXPORT_SYMBOL_GPL(pci_epc_raise_irq); 234 235 /** 236 * pci_epc_map_msi_irq() - Map physical address to MSI address and return 237 * MSI data 238 * @epc: the EPC device which has the MSI capability 239 * @func_no: the physical endpoint function number in the EPC device 240 * @vfunc_no: the virtual endpoint function number in the physical function 241 * @phys_addr: the physical address of the outbound region 242 * @interrupt_num: the MSI interrupt number with range (1-N) 243 * @entry_size: Size of Outbound address region for each interrupt 244 * @msi_data: the data that should be written in order to raise MSI interrupt 245 * with interrupt number as 'interrupt num' 246 * @msi_addr_offset: Offset of MSI address from the aligned outbound address 247 * to which the MSI address is mapped 248 * 249 * Invoke to map physical address to MSI address and return MSI data. The 250 * physical address should be an address in the outbound region. This is 251 * required to implement doorbell functionality of NTB wherein EPC on either 252 * side of the interface (primary and secondary) can directly write to the 253 * physical address (in outbound region) of the other interface to ring 254 * doorbell. 255 */ 256 int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 257 phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size, 258 u32 *msi_data, u32 *msi_addr_offset) 259 { 260 int ret; 261 262 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 263 return -EINVAL; 264 265 if (!epc->ops->map_msi_irq) 266 return -EINVAL; 267 268 mutex_lock(&epc->lock); 269 ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr, 270 interrupt_num, entry_size, msi_data, 271 msi_addr_offset); 272 mutex_unlock(&epc->lock); 273 274 return ret; 275 } 276 EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq); 277 278 /** 279 * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated 280 * @epc: the EPC device to which MSI interrupts was requested 281 * @func_no: the physical endpoint function number in the EPC device 282 * @vfunc_no: the virtual endpoint function number in the physical function 283 * 284 * Invoke to get the number of MSI interrupts allocated by the RC 285 */ 286 int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 287 { 288 int interrupt; 289 290 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 291 return 0; 292 293 if (!epc->ops->get_msi) 294 return 0; 295 296 mutex_lock(&epc->lock); 297 interrupt = epc->ops->get_msi(epc, func_no, vfunc_no); 298 mutex_unlock(&epc->lock); 299 300 if (interrupt < 0) 301 return 0; 302 303 interrupt = 1 << interrupt; 304 305 return interrupt; 306 } 307 EXPORT_SYMBOL_GPL(pci_epc_get_msi); 308 309 /** 310 * pci_epc_set_msi() - set the number of MSI interrupt numbers required 311 * @epc: the EPC device on which MSI has to be configured 312 * @func_no: the physical endpoint function number in the EPC device 313 * @vfunc_no: the virtual endpoint function number in the physical function 314 * @interrupts: number of MSI interrupts required by the EPF 315 * 316 * Invoke to set the required number of MSI interrupts. 317 */ 318 int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts) 319 { 320 int ret; 321 u8 encode_int; 322 323 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 324 return -EINVAL; 325 326 if (interrupts < 1 || interrupts > 32) 327 return -EINVAL; 328 329 if (!epc->ops->set_msi) 330 return 0; 331 332 encode_int = order_base_2(interrupts); 333 334 mutex_lock(&epc->lock); 335 ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int); 336 mutex_unlock(&epc->lock); 337 338 return ret; 339 } 340 EXPORT_SYMBOL_GPL(pci_epc_set_msi); 341 342 /** 343 * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated 344 * @epc: the EPC device to which MSI-X interrupts was requested 345 * @func_no: the physical endpoint function number in the EPC device 346 * @vfunc_no: the virtual endpoint function number in the physical function 347 * 348 * Invoke to get the number of MSI-X interrupts allocated by the RC 349 */ 350 int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 351 { 352 int interrupt; 353 354 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 355 return 0; 356 357 if (!epc->ops->get_msix) 358 return 0; 359 360 mutex_lock(&epc->lock); 361 interrupt = epc->ops->get_msix(epc, func_no, vfunc_no); 362 mutex_unlock(&epc->lock); 363 364 if (interrupt < 0) 365 return 0; 366 367 return interrupt + 1; 368 } 369 EXPORT_SYMBOL_GPL(pci_epc_get_msix); 370 371 /** 372 * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required 373 * @epc: the EPC device on which MSI-X has to be configured 374 * @func_no: the physical endpoint function number in the EPC device 375 * @vfunc_no: the virtual endpoint function number in the physical function 376 * @interrupts: number of MSI-X interrupts required by the EPF 377 * @bir: BAR where the MSI-X table resides 378 * @offset: Offset pointing to the start of MSI-X table 379 * 380 * Invoke to set the required number of MSI-X interrupts. 381 */ 382 int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 383 u16 interrupts, enum pci_barno bir, u32 offset) 384 { 385 int ret; 386 387 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 388 return -EINVAL; 389 390 if (interrupts < 1 || interrupts > 2048) 391 return -EINVAL; 392 393 if (!epc->ops->set_msix) 394 return 0; 395 396 mutex_lock(&epc->lock); 397 ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir, 398 offset); 399 mutex_unlock(&epc->lock); 400 401 return ret; 402 } 403 EXPORT_SYMBOL_GPL(pci_epc_set_msix); 404 405 /** 406 * pci_epc_unmap_addr() - unmap CPU address from PCI address 407 * @epc: the EPC device on which address is allocated 408 * @func_no: the physical endpoint function number in the EPC device 409 * @vfunc_no: the virtual endpoint function number in the physical function 410 * @phys_addr: physical address of the local system 411 * 412 * Invoke to unmap the CPU address from PCI address. 413 */ 414 void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 415 phys_addr_t phys_addr) 416 { 417 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 418 return; 419 420 if (!epc->ops->unmap_addr) 421 return; 422 423 mutex_lock(&epc->lock); 424 epc->ops->unmap_addr(epc, func_no, vfunc_no, phys_addr); 425 mutex_unlock(&epc->lock); 426 } 427 EXPORT_SYMBOL_GPL(pci_epc_unmap_addr); 428 429 /** 430 * pci_epc_map_addr() - map CPU address to PCI address 431 * @epc: the EPC device on which address is allocated 432 * @func_no: the physical endpoint function number in the EPC device 433 * @vfunc_no: the virtual endpoint function number in the physical function 434 * @phys_addr: physical address of the local system 435 * @pci_addr: PCI address to which the physical address should be mapped 436 * @size: the size of the allocation 437 * 438 * Invoke to map CPU address with PCI address. 439 */ 440 int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 441 phys_addr_t phys_addr, u64 pci_addr, size_t size) 442 { 443 int ret; 444 445 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 446 return -EINVAL; 447 448 if (!epc->ops->map_addr) 449 return 0; 450 451 mutex_lock(&epc->lock); 452 ret = epc->ops->map_addr(epc, func_no, vfunc_no, phys_addr, pci_addr, 453 size); 454 mutex_unlock(&epc->lock); 455 456 return ret; 457 } 458 EXPORT_SYMBOL_GPL(pci_epc_map_addr); 459 460 /** 461 * pci_epc_mem_map() - allocate and map a PCI address to a CPU address 462 * @epc: the EPC device on which the CPU address is to be allocated and mapped 463 * @func_no: the physical endpoint function number in the EPC device 464 * @vfunc_no: the virtual endpoint function number in the physical function 465 * @pci_addr: PCI address to which the CPU address should be mapped 466 * @pci_size: the number of bytes to map starting from @pci_addr 467 * @map: where to return the mapping information 468 * 469 * Allocate a controller memory address region and map it to a RC PCI address 470 * region, taking into account the controller physical address mapping 471 * constraints using the controller operation align_addr(). If this operation is 472 * not defined, we assume that there are no alignment constraints for the 473 * mapping. 474 * 475 * The effective size of the PCI address range mapped from @pci_addr is 476 * indicated by @map->pci_size. This size may be less than the requested 477 * @pci_size. The local virtual CPU address for the mapping is indicated by 478 * @map->virt_addr (@map->phys_addr indicates the physical address). 479 * The size and CPU address of the controller memory allocated and mapped are 480 * respectively indicated by @map->map_size and @map->virt_base (and 481 * @map->phys_base for the physical address of @map->virt_base). 482 * 483 * Returns 0 on success and a negative error code in case of error. 484 */ 485 int pci_epc_mem_map(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 486 u64 pci_addr, size_t pci_size, struct pci_epc_map *map) 487 { 488 size_t map_size = pci_size; 489 size_t map_offset = 0; 490 int ret; 491 492 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 493 return -EINVAL; 494 495 if (!pci_size || !map) 496 return -EINVAL; 497 498 /* 499 * Align the PCI address to map. If the controller defines the 500 * .align_addr() operation, use it to determine the PCI address to map 501 * and the size of the mapping. Otherwise, assume that the controller 502 * has no alignment constraint. 503 */ 504 memset(map, 0, sizeof(*map)); 505 map->pci_addr = pci_addr; 506 if (epc->ops->align_addr) 507 map->map_pci_addr = 508 epc->ops->align_addr(epc, pci_addr, 509 &map_size, &map_offset); 510 else 511 map->map_pci_addr = pci_addr; 512 map->map_size = map_size; 513 if (map->map_pci_addr + map->map_size < pci_addr + pci_size) 514 map->pci_size = map->map_pci_addr + map->map_size - pci_addr; 515 else 516 map->pci_size = pci_size; 517 518 map->virt_base = pci_epc_mem_alloc_addr(epc, &map->phys_base, 519 map->map_size); 520 if (!map->virt_base) 521 return -ENOMEM; 522 523 map->phys_addr = map->phys_base + map_offset; 524 map->virt_addr = map->virt_base + map_offset; 525 526 ret = pci_epc_map_addr(epc, func_no, vfunc_no, map->phys_base, 527 map->map_pci_addr, map->map_size); 528 if (ret) { 529 pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base, 530 map->map_size); 531 return ret; 532 } 533 534 return 0; 535 } 536 EXPORT_SYMBOL_GPL(pci_epc_mem_map); 537 538 /** 539 * pci_epc_mem_unmap() - unmap and free a CPU address region 540 * @epc: the EPC device on which the CPU address is allocated and mapped 541 * @func_no: the physical endpoint function number in the EPC device 542 * @vfunc_no: the virtual endpoint function number in the physical function 543 * @map: the mapping information 544 * 545 * Unmap and free a CPU address region that was allocated and mapped with 546 * pci_epc_mem_map(). 547 */ 548 void pci_epc_mem_unmap(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 549 struct pci_epc_map *map) 550 { 551 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 552 return; 553 554 if (!map || !map->virt_base) 555 return; 556 557 pci_epc_unmap_addr(epc, func_no, vfunc_no, map->phys_base); 558 pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base, 559 map->map_size); 560 } 561 EXPORT_SYMBOL_GPL(pci_epc_mem_unmap); 562 563 /** 564 * pci_epc_clear_bar() - reset the BAR 565 * @epc: the EPC device for which the BAR has to be cleared 566 * @func_no: the physical endpoint function number in the EPC device 567 * @vfunc_no: the virtual endpoint function number in the physical function 568 * @epf_bar: the struct epf_bar that contains the BAR information 569 * 570 * Invoke to reset the BAR of the endpoint device. 571 */ 572 void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 573 struct pci_epf_bar *epf_bar) 574 { 575 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 576 return; 577 578 if (epf_bar->barno == BAR_5 && 579 epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) 580 return; 581 582 if (!epc->ops->clear_bar) 583 return; 584 585 mutex_lock(&epc->lock); 586 epc->ops->clear_bar(epc, func_no, vfunc_no, epf_bar); 587 mutex_unlock(&epc->lock); 588 } 589 EXPORT_SYMBOL_GPL(pci_epc_clear_bar); 590 591 /** 592 * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space 593 * @epc: the EPC device on which BAR has to be configured 594 * @func_no: the physical endpoint function number in the EPC device 595 * @vfunc_no: the virtual endpoint function number in the physical function 596 * @epf_bar: the struct epf_bar that contains the BAR information 597 * 598 * Invoke to configure the BAR of the endpoint device. 599 */ 600 int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 601 struct pci_epf_bar *epf_bar) 602 { 603 const struct pci_epc_features *epc_features; 604 enum pci_barno bar = epf_bar->barno; 605 int flags = epf_bar->flags; 606 int ret; 607 608 epc_features = pci_epc_get_features(epc, func_no, vfunc_no); 609 if (!epc_features) 610 return -EINVAL; 611 612 if (epc_features->bar[bar].type == BAR_FIXED && 613 (epc_features->bar[bar].fixed_size != epf_bar->size)) 614 return -EINVAL; 615 616 if (!is_power_of_2(epf_bar->size)) 617 return -EINVAL; 618 619 if ((epf_bar->barno == BAR_5 && flags & PCI_BASE_ADDRESS_MEM_TYPE_64) || 620 (flags & PCI_BASE_ADDRESS_SPACE_IO && 621 flags & PCI_BASE_ADDRESS_IO_MASK) || 622 (upper_32_bits(epf_bar->size) && 623 !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))) 624 return -EINVAL; 625 626 if (!epc->ops->set_bar) 627 return 0; 628 629 mutex_lock(&epc->lock); 630 ret = epc->ops->set_bar(epc, func_no, vfunc_no, epf_bar); 631 mutex_unlock(&epc->lock); 632 633 return ret; 634 } 635 EXPORT_SYMBOL_GPL(pci_epc_set_bar); 636 637 /** 638 * pci_epc_write_header() - write standard configuration header 639 * @epc: the EPC device to which the configuration header should be written 640 * @func_no: the physical endpoint function number in the EPC device 641 * @vfunc_no: the virtual endpoint function number in the physical function 642 * @header: standard configuration header fields 643 * 644 * Invoke to write the configuration header to the endpoint controller. Every 645 * endpoint controller will have a dedicated location to which the standard 646 * configuration header would be written. The callback function should write 647 * the header fields to this dedicated location. 648 */ 649 int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 650 struct pci_epf_header *header) 651 { 652 int ret; 653 654 if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) 655 return -EINVAL; 656 657 /* Only Virtual Function #1 has deviceID */ 658 if (vfunc_no > 1) 659 return -EINVAL; 660 661 if (!epc->ops->write_header) 662 return 0; 663 664 mutex_lock(&epc->lock); 665 ret = epc->ops->write_header(epc, func_no, vfunc_no, header); 666 mutex_unlock(&epc->lock); 667 668 return ret; 669 } 670 EXPORT_SYMBOL_GPL(pci_epc_write_header); 671 672 /** 673 * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller 674 * @epc: the EPC device to which the endpoint function should be added 675 * @epf: the endpoint function to be added 676 * @type: Identifies if the EPC is connected to the primary or secondary 677 * interface of EPF 678 * 679 * A PCI endpoint device can have one or more functions. In the case of PCIe, 680 * the specification allows up to 8 PCIe endpoint functions. Invoke 681 * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller. 682 */ 683 int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf, 684 enum pci_epc_interface_type type) 685 { 686 struct list_head *list; 687 u32 func_no; 688 int ret = 0; 689 690 if (IS_ERR_OR_NULL(epc) || epf->is_vf) 691 return -EINVAL; 692 693 if (type == PRIMARY_INTERFACE && epf->epc) 694 return -EBUSY; 695 696 if (type == SECONDARY_INTERFACE && epf->sec_epc) 697 return -EBUSY; 698 699 mutex_lock(&epc->list_lock); 700 func_no = find_first_zero_bit(&epc->function_num_map, 701 BITS_PER_LONG); 702 if (func_no >= BITS_PER_LONG) { 703 ret = -EINVAL; 704 goto ret; 705 } 706 707 if (func_no > epc->max_functions - 1) { 708 dev_err(&epc->dev, "Exceeding max supported Function Number\n"); 709 ret = -EINVAL; 710 goto ret; 711 } 712 713 set_bit(func_no, &epc->function_num_map); 714 if (type == PRIMARY_INTERFACE) { 715 epf->func_no = func_no; 716 epf->epc = epc; 717 list = &epf->list; 718 } else { 719 epf->sec_epc_func_no = func_no; 720 epf->sec_epc = epc; 721 list = &epf->sec_epc_list; 722 } 723 724 list_add_tail(list, &epc->pci_epf); 725 ret: 726 mutex_unlock(&epc->list_lock); 727 728 return ret; 729 } 730 EXPORT_SYMBOL_GPL(pci_epc_add_epf); 731 732 /** 733 * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller 734 * @epc: the EPC device from which the endpoint function should be removed 735 * @epf: the endpoint function to be removed 736 * @type: identifies if the EPC is connected to the primary or secondary 737 * interface of EPF 738 * 739 * Invoke to remove PCI endpoint function from the endpoint controller. 740 */ 741 void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf, 742 enum pci_epc_interface_type type) 743 { 744 struct list_head *list; 745 u32 func_no = 0; 746 747 if (IS_ERR_OR_NULL(epc) || !epf) 748 return; 749 750 mutex_lock(&epc->list_lock); 751 if (type == PRIMARY_INTERFACE) { 752 func_no = epf->func_no; 753 list = &epf->list; 754 epf->epc = NULL; 755 } else { 756 func_no = epf->sec_epc_func_no; 757 list = &epf->sec_epc_list; 758 epf->sec_epc = NULL; 759 } 760 clear_bit(func_no, &epc->function_num_map); 761 list_del(list); 762 mutex_unlock(&epc->list_lock); 763 } 764 EXPORT_SYMBOL_GPL(pci_epc_remove_epf); 765 766 /** 767 * pci_epc_linkup() - Notify the EPF device that EPC device has established a 768 * connection with the Root Complex. 769 * @epc: the EPC device which has established link with the host 770 * 771 * Invoke to Notify the EPF device that the EPC device has established a 772 * connection with the Root Complex. 773 */ 774 void pci_epc_linkup(struct pci_epc *epc) 775 { 776 struct pci_epf *epf; 777 778 if (IS_ERR_OR_NULL(epc)) 779 return; 780 781 mutex_lock(&epc->list_lock); 782 list_for_each_entry(epf, &epc->pci_epf, list) { 783 mutex_lock(&epf->lock); 784 if (epf->event_ops && epf->event_ops->link_up) 785 epf->event_ops->link_up(epf); 786 mutex_unlock(&epf->lock); 787 } 788 mutex_unlock(&epc->list_lock); 789 } 790 EXPORT_SYMBOL_GPL(pci_epc_linkup); 791 792 /** 793 * pci_epc_linkdown() - Notify the EPF device that EPC device has dropped the 794 * connection with the Root Complex. 795 * @epc: the EPC device which has dropped the link with the host 796 * 797 * Invoke to Notify the EPF device that the EPC device has dropped the 798 * connection with the Root Complex. 799 */ 800 void pci_epc_linkdown(struct pci_epc *epc) 801 { 802 struct pci_epf *epf; 803 804 if (IS_ERR_OR_NULL(epc)) 805 return; 806 807 mutex_lock(&epc->list_lock); 808 list_for_each_entry(epf, &epc->pci_epf, list) { 809 mutex_lock(&epf->lock); 810 if (epf->event_ops && epf->event_ops->link_down) 811 epf->event_ops->link_down(epf); 812 mutex_unlock(&epf->lock); 813 } 814 mutex_unlock(&epc->list_lock); 815 } 816 EXPORT_SYMBOL_GPL(pci_epc_linkdown); 817 818 /** 819 * pci_epc_init_notify() - Notify the EPF device that EPC device initialization 820 * is completed. 821 * @epc: the EPC device whose initialization is completed 822 * 823 * Invoke to Notify the EPF device that the EPC device's initialization 824 * is completed. 825 */ 826 void pci_epc_init_notify(struct pci_epc *epc) 827 { 828 struct pci_epf *epf; 829 830 if (IS_ERR_OR_NULL(epc)) 831 return; 832 833 mutex_lock(&epc->list_lock); 834 list_for_each_entry(epf, &epc->pci_epf, list) { 835 mutex_lock(&epf->lock); 836 if (epf->event_ops && epf->event_ops->epc_init) 837 epf->event_ops->epc_init(epf); 838 mutex_unlock(&epf->lock); 839 } 840 epc->init_complete = true; 841 mutex_unlock(&epc->list_lock); 842 } 843 EXPORT_SYMBOL_GPL(pci_epc_init_notify); 844 845 /** 846 * pci_epc_notify_pending_init() - Notify the pending EPC device initialization 847 * complete to the EPF device 848 * @epc: the EPC device whose initialization is pending to be notified 849 * @epf: the EPF device to be notified 850 * 851 * Invoke to notify the pending EPC device initialization complete to the EPF 852 * device. This is used to deliver the notification if the EPC initialization 853 * got completed before the EPF driver bind. 854 */ 855 void pci_epc_notify_pending_init(struct pci_epc *epc, struct pci_epf *epf) 856 { 857 if (epc->init_complete) { 858 mutex_lock(&epf->lock); 859 if (epf->event_ops && epf->event_ops->epc_init) 860 epf->event_ops->epc_init(epf); 861 mutex_unlock(&epf->lock); 862 } 863 } 864 EXPORT_SYMBOL_GPL(pci_epc_notify_pending_init); 865 866 /** 867 * pci_epc_deinit_notify() - Notify the EPF device about EPC deinitialization 868 * @epc: the EPC device whose deinitialization is completed 869 * 870 * Invoke to notify the EPF device that the EPC deinitialization is completed. 871 */ 872 void pci_epc_deinit_notify(struct pci_epc *epc) 873 { 874 struct pci_epf *epf; 875 876 if (IS_ERR_OR_NULL(epc)) 877 return; 878 879 mutex_lock(&epc->list_lock); 880 list_for_each_entry(epf, &epc->pci_epf, list) { 881 mutex_lock(&epf->lock); 882 if (epf->event_ops && epf->event_ops->epc_deinit) 883 epf->event_ops->epc_deinit(epf); 884 mutex_unlock(&epf->lock); 885 } 886 epc->init_complete = false; 887 mutex_unlock(&epc->list_lock); 888 } 889 EXPORT_SYMBOL_GPL(pci_epc_deinit_notify); 890 891 /** 892 * pci_epc_bus_master_enable_notify() - Notify the EPF device that the EPC 893 * device has received the Bus Master 894 * Enable event from the Root complex 895 * @epc: the EPC device that received the Bus Master Enable event 896 * 897 * Notify the EPF device that the EPC device has generated the Bus Master Enable 898 * event due to host setting the Bus Master Enable bit in the Command register. 899 */ 900 void pci_epc_bus_master_enable_notify(struct pci_epc *epc) 901 { 902 struct pci_epf *epf; 903 904 if (IS_ERR_OR_NULL(epc)) 905 return; 906 907 mutex_lock(&epc->list_lock); 908 list_for_each_entry(epf, &epc->pci_epf, list) { 909 mutex_lock(&epf->lock); 910 if (epf->event_ops && epf->event_ops->bus_master_enable) 911 epf->event_ops->bus_master_enable(epf); 912 mutex_unlock(&epf->lock); 913 } 914 mutex_unlock(&epc->list_lock); 915 } 916 EXPORT_SYMBOL_GPL(pci_epc_bus_master_enable_notify); 917 918 /** 919 * pci_epc_destroy() - destroy the EPC device 920 * @epc: the EPC device that has to be destroyed 921 * 922 * Invoke to destroy the PCI EPC device 923 */ 924 void pci_epc_destroy(struct pci_epc *epc) 925 { 926 pci_ep_cfs_remove_epc_group(epc->group); 927 #ifdef CONFIG_PCI_DOMAINS_GENERIC 928 pci_bus_release_domain_nr(epc->dev.parent, epc->domain_nr); 929 #endif 930 device_unregister(&epc->dev); 931 } 932 EXPORT_SYMBOL_GPL(pci_epc_destroy); 933 934 /** 935 * devm_pci_epc_destroy() - destroy the EPC device 936 * @dev: device that wants to destroy the EPC 937 * @epc: the EPC device that has to be destroyed 938 * 939 * Invoke to destroy the devres associated with this 940 * pci_epc and destroy the EPC device. 941 */ 942 void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc) 943 { 944 int r; 945 946 r = devres_release(dev, devm_pci_epc_release, devm_pci_epc_match, 947 epc); 948 dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n"); 949 } 950 EXPORT_SYMBOL_GPL(devm_pci_epc_destroy); 951 952 static void pci_epc_release(struct device *dev) 953 { 954 kfree(to_pci_epc(dev)); 955 } 956 957 /** 958 * __pci_epc_create() - create a new endpoint controller (EPC) device 959 * @dev: device that is creating the new EPC 960 * @ops: function pointers for performing EPC operations 961 * @owner: the owner of the module that creates the EPC device 962 * 963 * Invoke to create a new EPC device and add it to pci_epc class. 964 */ 965 struct pci_epc * 966 __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, 967 struct module *owner) 968 { 969 int ret; 970 struct pci_epc *epc; 971 972 if (WARN_ON(!dev)) { 973 ret = -EINVAL; 974 goto err_ret; 975 } 976 977 epc = kzalloc(sizeof(*epc), GFP_KERNEL); 978 if (!epc) { 979 ret = -ENOMEM; 980 goto err_ret; 981 } 982 983 mutex_init(&epc->lock); 984 mutex_init(&epc->list_lock); 985 INIT_LIST_HEAD(&epc->pci_epf); 986 987 device_initialize(&epc->dev); 988 epc->dev.class = &pci_epc_class; 989 epc->dev.parent = dev; 990 epc->dev.release = pci_epc_release; 991 epc->ops = ops; 992 993 #ifdef CONFIG_PCI_DOMAINS_GENERIC 994 epc->domain_nr = pci_bus_find_domain_nr(NULL, dev); 995 #else 996 /* 997 * TODO: If the architecture doesn't support generic PCI 998 * domains, then a custom implementation has to be used. 999 */ 1000 WARN_ONCE(1, "This architecture doesn't support generic PCI domains\n"); 1001 #endif 1002 1003 ret = dev_set_name(&epc->dev, "%s", dev_name(dev)); 1004 if (ret) 1005 goto put_dev; 1006 1007 ret = device_add(&epc->dev); 1008 if (ret) 1009 goto put_dev; 1010 1011 epc->group = pci_ep_cfs_add_epc_group(dev_name(dev)); 1012 1013 return epc; 1014 1015 put_dev: 1016 put_device(&epc->dev); 1017 1018 err_ret: 1019 return ERR_PTR(ret); 1020 } 1021 EXPORT_SYMBOL_GPL(__pci_epc_create); 1022 1023 /** 1024 * __devm_pci_epc_create() - create a new endpoint controller (EPC) device 1025 * @dev: device that is creating the new EPC 1026 * @ops: function pointers for performing EPC operations 1027 * @owner: the owner of the module that creates the EPC device 1028 * 1029 * Invoke to create a new EPC device and add it to pci_epc class. 1030 * While at that, it also associates the device with the pci_epc using devres. 1031 * On driver detach, release function is invoked on the devres data, 1032 * then, devres data is freed. 1033 */ 1034 struct pci_epc * 1035 __devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, 1036 struct module *owner) 1037 { 1038 struct pci_epc **ptr, *epc; 1039 1040 ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL); 1041 if (!ptr) 1042 return ERR_PTR(-ENOMEM); 1043 1044 epc = __pci_epc_create(dev, ops, owner); 1045 if (!IS_ERR(epc)) { 1046 *ptr = epc; 1047 devres_add(dev, ptr); 1048 } else { 1049 devres_free(ptr); 1050 } 1051 1052 return epc; 1053 } 1054 EXPORT_SYMBOL_GPL(__devm_pci_epc_create); 1055 1056 static int __init pci_epc_init(void) 1057 { 1058 return class_register(&pci_epc_class); 1059 } 1060 module_init(pci_epc_init); 1061 1062 static void __exit pci_epc_exit(void) 1063 { 1064 class_unregister(&pci_epc_class); 1065 } 1066 module_exit(pci_epc_exit); 1067 1068 MODULE_DESCRIPTION("PCI EPC Library"); 1069 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); 1070