1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCI Endpoint *Controller* (EPC) library 4 * 5 * Copyright (C) 2017 Texas Instruments 6 * Author: Kishon Vijay Abraham I <kishon@ti.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/slab.h> 11 #include <linux/module.h> 12 13 #include <linux/pci-epc.h> 14 #include <linux/pci-epf.h> 15 #include <linux/pci-ep-cfs.h> 16 17 static struct class *pci_epc_class; 18 19 static void devm_pci_epc_release(struct device *dev, void *res) 20 { 21 struct pci_epc *epc = *(struct pci_epc **)res; 22 23 pci_epc_destroy(epc); 24 } 25 26 static int devm_pci_epc_match(struct device *dev, void *res, void *match_data) 27 { 28 struct pci_epc **epc = res; 29 30 return *epc == match_data; 31 } 32 33 /** 34 * pci_epc_put() - release the PCI endpoint controller 35 * @epc: epc returned by pci_epc_get() 36 * 37 * release the refcount the caller obtained by invoking pci_epc_get() 38 */ 39 void pci_epc_put(struct pci_epc *epc) 40 { 41 if (IS_ERR_OR_NULL(epc)) 42 return; 43 44 module_put(epc->ops->owner); 45 put_device(&epc->dev); 46 } 47 EXPORT_SYMBOL_GPL(pci_epc_put); 48 49 /** 50 * pci_epc_get() - get the PCI endpoint controller 51 * @epc_name: device name of the endpoint controller 52 * 53 * Invoke to get struct pci_epc * corresponding to the device name of the 54 * endpoint controller 55 */ 56 struct pci_epc *pci_epc_get(const char *epc_name) 57 { 58 int ret = -EINVAL; 59 struct pci_epc *epc; 60 struct device *dev; 61 struct class_dev_iter iter; 62 63 class_dev_iter_init(&iter, pci_epc_class, NULL, NULL); 64 while ((dev = class_dev_iter_next(&iter))) { 65 if (strcmp(epc_name, dev_name(dev))) 66 continue; 67 68 epc = to_pci_epc(dev); 69 if (!try_module_get(epc->ops->owner)) { 70 ret = -EINVAL; 71 goto err; 72 } 73 74 class_dev_iter_exit(&iter); 75 get_device(&epc->dev); 76 return epc; 77 } 78 79 err: 80 class_dev_iter_exit(&iter); 81 return ERR_PTR(ret); 82 } 83 EXPORT_SYMBOL_GPL(pci_epc_get); 84 85 /** 86 * pci_epc_get_first_free_bar() - helper to get first unreserved BAR 87 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap 88 * 89 * Invoke to get the first unreserved BAR that can be used by the endpoint 90 * function. 91 */ 92 enum pci_barno 93 pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features) 94 { 95 return pci_epc_get_next_free_bar(epc_features, BAR_0); 96 } 97 EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar); 98 99 /** 100 * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar 101 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap 102 * @bar: the starting BAR number from where unreserved BAR should be searched 103 * 104 * Invoke to get the next unreserved BAR starting from @bar that can be used 105 * for endpoint function. 106 */ 107 enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features 108 *epc_features, enum pci_barno bar) 109 { 110 int i; 111 112 if (!epc_features) 113 return BAR_0; 114 115 /* If 'bar - 1' is a 64-bit BAR, move to the next BAR */ 116 if (bar > 0 && epc_features->bar[bar - 1].only_64bit) 117 bar++; 118 119 for (i = bar; i < PCI_STD_NUM_BARS; i++) { 120 /* If the BAR is not reserved, return it. */ 121 if (epc_features->bar[i].type != BAR_RESERVED) 122 return i; 123 } 124 125 return NO_BAR; 126 } 127 EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar); 128 129 /** 130 * pci_epc_get_features() - get the features supported by EPC 131 * @epc: the features supported by *this* EPC device will be returned 132 * @func_no: the features supported by the EPC device specific to the 133 * endpoint function with func_no will be returned 134 * @vfunc_no: the features supported by the EPC device specific to the 135 * virtual endpoint function with vfunc_no will be returned 136 * 137 * Invoke to get the features provided by the EPC which may be 138 * specific to an endpoint function. Returns pci_epc_features on success 139 * and NULL for any failures. 140 */ 141 const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc, 142 u8 func_no, u8 vfunc_no) 143 { 144 const struct pci_epc_features *epc_features; 145 146 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 147 return NULL; 148 149 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 150 return NULL; 151 152 if (!epc->ops->get_features) 153 return NULL; 154 155 mutex_lock(&epc->lock); 156 epc_features = epc->ops->get_features(epc, func_no, vfunc_no); 157 mutex_unlock(&epc->lock); 158 159 return epc_features; 160 } 161 EXPORT_SYMBOL_GPL(pci_epc_get_features); 162 163 /** 164 * pci_epc_stop() - stop the PCI link 165 * @epc: the link of the EPC device that has to be stopped 166 * 167 * Invoke to stop the PCI link 168 */ 169 void pci_epc_stop(struct pci_epc *epc) 170 { 171 if (IS_ERR(epc) || !epc->ops->stop) 172 return; 173 174 mutex_lock(&epc->lock); 175 epc->ops->stop(epc); 176 mutex_unlock(&epc->lock); 177 } 178 EXPORT_SYMBOL_GPL(pci_epc_stop); 179 180 /** 181 * pci_epc_start() - start the PCI link 182 * @epc: the link of *this* EPC device has to be started 183 * 184 * Invoke to start the PCI link 185 */ 186 int pci_epc_start(struct pci_epc *epc) 187 { 188 int ret; 189 190 if (IS_ERR(epc)) 191 return -EINVAL; 192 193 if (!epc->ops->start) 194 return 0; 195 196 mutex_lock(&epc->lock); 197 ret = epc->ops->start(epc); 198 mutex_unlock(&epc->lock); 199 200 return ret; 201 } 202 EXPORT_SYMBOL_GPL(pci_epc_start); 203 204 /** 205 * pci_epc_raise_irq() - interrupt the host system 206 * @epc: the EPC device which has to interrupt the host 207 * @func_no: the physical endpoint function number in the EPC device 208 * @vfunc_no: the virtual endpoint function number in the physical function 209 * @type: specify the type of interrupt; INTX, MSI or MSI-X 210 * @interrupt_num: the MSI or MSI-X interrupt number with range (1-N) 211 * 212 * Invoke to raise an INTX, MSI or MSI-X interrupt 213 */ 214 int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 215 unsigned int type, u16 interrupt_num) 216 { 217 int ret; 218 219 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 220 return -EINVAL; 221 222 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 223 return -EINVAL; 224 225 if (!epc->ops->raise_irq) 226 return 0; 227 228 mutex_lock(&epc->lock); 229 ret = epc->ops->raise_irq(epc, func_no, vfunc_no, type, interrupt_num); 230 mutex_unlock(&epc->lock); 231 232 return ret; 233 } 234 EXPORT_SYMBOL_GPL(pci_epc_raise_irq); 235 236 /** 237 * pci_epc_map_msi_irq() - Map physical address to MSI address and return 238 * MSI data 239 * @epc: the EPC device which has the MSI capability 240 * @func_no: the physical endpoint function number in the EPC device 241 * @vfunc_no: the virtual endpoint function number in the physical function 242 * @phys_addr: the physical address of the outbound region 243 * @interrupt_num: the MSI interrupt number with range (1-N) 244 * @entry_size: Size of Outbound address region for each interrupt 245 * @msi_data: the data that should be written in order to raise MSI interrupt 246 * with interrupt number as 'interrupt num' 247 * @msi_addr_offset: Offset of MSI address from the aligned outbound address 248 * to which the MSI address is mapped 249 * 250 * Invoke to map physical address to MSI address and return MSI data. The 251 * physical address should be an address in the outbound region. This is 252 * required to implement doorbell functionality of NTB wherein EPC on either 253 * side of the interface (primary and secondary) can directly write to the 254 * physical address (in outbound region) of the other interface to ring 255 * doorbell. 256 */ 257 int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 258 phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size, 259 u32 *msi_data, u32 *msi_addr_offset) 260 { 261 int ret; 262 263 if (IS_ERR_OR_NULL(epc)) 264 return -EINVAL; 265 266 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 267 return -EINVAL; 268 269 if (!epc->ops->map_msi_irq) 270 return -EINVAL; 271 272 mutex_lock(&epc->lock); 273 ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr, 274 interrupt_num, entry_size, msi_data, 275 msi_addr_offset); 276 mutex_unlock(&epc->lock); 277 278 return ret; 279 } 280 EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq); 281 282 /** 283 * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated 284 * @epc: the EPC device to which MSI interrupts was requested 285 * @func_no: the physical endpoint function number in the EPC device 286 * @vfunc_no: the virtual endpoint function number in the physical function 287 * 288 * Invoke to get the number of MSI interrupts allocated by the RC 289 */ 290 int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 291 { 292 int interrupt; 293 294 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 295 return 0; 296 297 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 298 return 0; 299 300 if (!epc->ops->get_msi) 301 return 0; 302 303 mutex_lock(&epc->lock); 304 interrupt = epc->ops->get_msi(epc, func_no, vfunc_no); 305 mutex_unlock(&epc->lock); 306 307 if (interrupt < 0) 308 return 0; 309 310 interrupt = 1 << interrupt; 311 312 return interrupt; 313 } 314 EXPORT_SYMBOL_GPL(pci_epc_get_msi); 315 316 /** 317 * pci_epc_set_msi() - set the number of MSI interrupt numbers required 318 * @epc: the EPC device on which MSI has to be configured 319 * @func_no: the physical endpoint function number in the EPC device 320 * @vfunc_no: the virtual endpoint function number in the physical function 321 * @interrupts: number of MSI interrupts required by the EPF 322 * 323 * Invoke to set the required number of MSI interrupts. 324 */ 325 int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts) 326 { 327 int ret; 328 u8 encode_int; 329 330 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || 331 interrupts < 1 || interrupts > 32) 332 return -EINVAL; 333 334 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 335 return -EINVAL; 336 337 if (!epc->ops->set_msi) 338 return 0; 339 340 encode_int = order_base_2(interrupts); 341 342 mutex_lock(&epc->lock); 343 ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int); 344 mutex_unlock(&epc->lock); 345 346 return ret; 347 } 348 EXPORT_SYMBOL_GPL(pci_epc_set_msi); 349 350 /** 351 * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated 352 * @epc: the EPC device to which MSI-X interrupts was requested 353 * @func_no: the physical endpoint function number in the EPC device 354 * @vfunc_no: the virtual endpoint function number in the physical function 355 * 356 * Invoke to get the number of MSI-X interrupts allocated by the RC 357 */ 358 int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 359 { 360 int interrupt; 361 362 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 363 return 0; 364 365 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 366 return 0; 367 368 if (!epc->ops->get_msix) 369 return 0; 370 371 mutex_lock(&epc->lock); 372 interrupt = epc->ops->get_msix(epc, func_no, vfunc_no); 373 mutex_unlock(&epc->lock); 374 375 if (interrupt < 0) 376 return 0; 377 378 return interrupt + 1; 379 } 380 EXPORT_SYMBOL_GPL(pci_epc_get_msix); 381 382 /** 383 * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required 384 * @epc: the EPC device on which MSI-X has to be configured 385 * @func_no: the physical endpoint function number in the EPC device 386 * @vfunc_no: the virtual endpoint function number in the physical function 387 * @interrupts: number of MSI-X interrupts required by the EPF 388 * @bir: BAR where the MSI-X table resides 389 * @offset: Offset pointing to the start of MSI-X table 390 * 391 * Invoke to set the required number of MSI-X interrupts. 392 */ 393 int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 394 u16 interrupts, enum pci_barno bir, u32 offset) 395 { 396 int ret; 397 398 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || 399 interrupts < 1 || interrupts > 2048) 400 return -EINVAL; 401 402 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 403 return -EINVAL; 404 405 if (!epc->ops->set_msix) 406 return 0; 407 408 mutex_lock(&epc->lock); 409 ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir, 410 offset); 411 mutex_unlock(&epc->lock); 412 413 return ret; 414 } 415 EXPORT_SYMBOL_GPL(pci_epc_set_msix); 416 417 /** 418 * pci_epc_unmap_addr() - unmap CPU address from PCI address 419 * @epc: the EPC device on which address is allocated 420 * @func_no: the physical endpoint function number in the EPC device 421 * @vfunc_no: the virtual endpoint function number in the physical function 422 * @phys_addr: physical address of the local system 423 * 424 * Invoke to unmap the CPU address from PCI address. 425 */ 426 void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 427 phys_addr_t phys_addr) 428 { 429 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 430 return; 431 432 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 433 return; 434 435 if (!epc->ops->unmap_addr) 436 return; 437 438 mutex_lock(&epc->lock); 439 epc->ops->unmap_addr(epc, func_no, vfunc_no, phys_addr); 440 mutex_unlock(&epc->lock); 441 } 442 EXPORT_SYMBOL_GPL(pci_epc_unmap_addr); 443 444 /** 445 * pci_epc_map_addr() - map CPU address to PCI address 446 * @epc: the EPC device on which address is allocated 447 * @func_no: the physical endpoint function number in the EPC device 448 * @vfunc_no: the virtual endpoint function number in the physical function 449 * @phys_addr: physical address of the local system 450 * @pci_addr: PCI address to which the physical address should be mapped 451 * @size: the size of the allocation 452 * 453 * Invoke to map CPU address with PCI address. 454 */ 455 int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 456 phys_addr_t phys_addr, u64 pci_addr, size_t size) 457 { 458 int ret; 459 460 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 461 return -EINVAL; 462 463 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 464 return -EINVAL; 465 466 if (!epc->ops->map_addr) 467 return 0; 468 469 mutex_lock(&epc->lock); 470 ret = epc->ops->map_addr(epc, func_no, vfunc_no, phys_addr, pci_addr, 471 size); 472 mutex_unlock(&epc->lock); 473 474 return ret; 475 } 476 EXPORT_SYMBOL_GPL(pci_epc_map_addr); 477 478 /** 479 * pci_epc_clear_bar() - reset the BAR 480 * @epc: the EPC device for which the BAR has to be cleared 481 * @func_no: the physical endpoint function number in the EPC device 482 * @vfunc_no: the virtual endpoint function number in the physical function 483 * @epf_bar: the struct epf_bar that contains the BAR information 484 * 485 * Invoke to reset the BAR of the endpoint device. 486 */ 487 void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 488 struct pci_epf_bar *epf_bar) 489 { 490 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || 491 (epf_bar->barno == BAR_5 && 492 epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)) 493 return; 494 495 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 496 return; 497 498 if (!epc->ops->clear_bar) 499 return; 500 501 mutex_lock(&epc->lock); 502 epc->ops->clear_bar(epc, func_no, vfunc_no, epf_bar); 503 mutex_unlock(&epc->lock); 504 } 505 EXPORT_SYMBOL_GPL(pci_epc_clear_bar); 506 507 /** 508 * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space 509 * @epc: the EPC device on which BAR has to be configured 510 * @func_no: the physical endpoint function number in the EPC device 511 * @vfunc_no: the virtual endpoint function number in the physical function 512 * @epf_bar: the struct epf_bar that contains the BAR information 513 * 514 * Invoke to configure the BAR of the endpoint device. 515 */ 516 int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 517 struct pci_epf_bar *epf_bar) 518 { 519 int ret; 520 int flags = epf_bar->flags; 521 522 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || 523 (epf_bar->barno == BAR_5 && 524 flags & PCI_BASE_ADDRESS_MEM_TYPE_64) || 525 (flags & PCI_BASE_ADDRESS_SPACE_IO && 526 flags & PCI_BASE_ADDRESS_IO_MASK) || 527 (upper_32_bits(epf_bar->size) && 528 !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))) 529 return -EINVAL; 530 531 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 532 return -EINVAL; 533 534 if (!epc->ops->set_bar) 535 return 0; 536 537 mutex_lock(&epc->lock); 538 ret = epc->ops->set_bar(epc, func_no, vfunc_no, epf_bar); 539 mutex_unlock(&epc->lock); 540 541 return ret; 542 } 543 EXPORT_SYMBOL_GPL(pci_epc_set_bar); 544 545 /** 546 * pci_epc_write_header() - write standard configuration header 547 * @epc: the EPC device to which the configuration header should be written 548 * @func_no: the physical endpoint function number in the EPC device 549 * @vfunc_no: the virtual endpoint function number in the physical function 550 * @header: standard configuration header fields 551 * 552 * Invoke to write the configuration header to the endpoint controller. Every 553 * endpoint controller will have a dedicated location to which the standard 554 * configuration header would be written. The callback function should write 555 * the header fields to this dedicated location. 556 */ 557 int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 558 struct pci_epf_header *header) 559 { 560 int ret; 561 562 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 563 return -EINVAL; 564 565 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 566 return -EINVAL; 567 568 /* Only Virtual Function #1 has deviceID */ 569 if (vfunc_no > 1) 570 return -EINVAL; 571 572 if (!epc->ops->write_header) 573 return 0; 574 575 mutex_lock(&epc->lock); 576 ret = epc->ops->write_header(epc, func_no, vfunc_no, header); 577 mutex_unlock(&epc->lock); 578 579 return ret; 580 } 581 EXPORT_SYMBOL_GPL(pci_epc_write_header); 582 583 /** 584 * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller 585 * @epc: the EPC device to which the endpoint function should be added 586 * @epf: the endpoint function to be added 587 * @type: Identifies if the EPC is connected to the primary or secondary 588 * interface of EPF 589 * 590 * A PCI endpoint device can have one or more functions. In the case of PCIe, 591 * the specification allows up to 8 PCIe endpoint functions. Invoke 592 * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller. 593 */ 594 int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf, 595 enum pci_epc_interface_type type) 596 { 597 struct list_head *list; 598 u32 func_no; 599 int ret = 0; 600 601 if (IS_ERR_OR_NULL(epc) || epf->is_vf) 602 return -EINVAL; 603 604 if (type == PRIMARY_INTERFACE && epf->epc) 605 return -EBUSY; 606 607 if (type == SECONDARY_INTERFACE && epf->sec_epc) 608 return -EBUSY; 609 610 mutex_lock(&epc->list_lock); 611 func_no = find_first_zero_bit(&epc->function_num_map, 612 BITS_PER_LONG); 613 if (func_no >= BITS_PER_LONG) { 614 ret = -EINVAL; 615 goto ret; 616 } 617 618 if (func_no > epc->max_functions - 1) { 619 dev_err(&epc->dev, "Exceeding max supported Function Number\n"); 620 ret = -EINVAL; 621 goto ret; 622 } 623 624 set_bit(func_no, &epc->function_num_map); 625 if (type == PRIMARY_INTERFACE) { 626 epf->func_no = func_no; 627 epf->epc = epc; 628 list = &epf->list; 629 } else { 630 epf->sec_epc_func_no = func_no; 631 epf->sec_epc = epc; 632 list = &epf->sec_epc_list; 633 } 634 635 list_add_tail(list, &epc->pci_epf); 636 ret: 637 mutex_unlock(&epc->list_lock); 638 639 return ret; 640 } 641 EXPORT_SYMBOL_GPL(pci_epc_add_epf); 642 643 /** 644 * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller 645 * @epc: the EPC device from which the endpoint function should be removed 646 * @epf: the endpoint function to be removed 647 * @type: identifies if the EPC is connected to the primary or secondary 648 * interface of EPF 649 * 650 * Invoke to remove PCI endpoint function from the endpoint controller. 651 */ 652 void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf, 653 enum pci_epc_interface_type type) 654 { 655 struct list_head *list; 656 u32 func_no = 0; 657 658 if (IS_ERR_OR_NULL(epc) || !epf) 659 return; 660 661 if (type == PRIMARY_INTERFACE) { 662 func_no = epf->func_no; 663 list = &epf->list; 664 } else { 665 func_no = epf->sec_epc_func_no; 666 list = &epf->sec_epc_list; 667 } 668 669 mutex_lock(&epc->list_lock); 670 clear_bit(func_no, &epc->function_num_map); 671 list_del(list); 672 epf->epc = NULL; 673 mutex_unlock(&epc->list_lock); 674 } 675 EXPORT_SYMBOL_GPL(pci_epc_remove_epf); 676 677 /** 678 * pci_epc_linkup() - Notify the EPF device that EPC device has established a 679 * connection with the Root Complex. 680 * @epc: the EPC device which has established link with the host 681 * 682 * Invoke to Notify the EPF device that the EPC device has established a 683 * connection with the Root Complex. 684 */ 685 void pci_epc_linkup(struct pci_epc *epc) 686 { 687 struct pci_epf *epf; 688 689 if (IS_ERR_OR_NULL(epc)) 690 return; 691 692 mutex_lock(&epc->list_lock); 693 list_for_each_entry(epf, &epc->pci_epf, list) { 694 mutex_lock(&epf->lock); 695 if (epf->event_ops && epf->event_ops->link_up) 696 epf->event_ops->link_up(epf); 697 mutex_unlock(&epf->lock); 698 } 699 mutex_unlock(&epc->list_lock); 700 } 701 EXPORT_SYMBOL_GPL(pci_epc_linkup); 702 703 /** 704 * pci_epc_linkdown() - Notify the EPF device that EPC device has dropped the 705 * connection with the Root Complex. 706 * @epc: the EPC device which has dropped the link with the host 707 * 708 * Invoke to Notify the EPF device that the EPC device has dropped the 709 * connection with the Root Complex. 710 */ 711 void pci_epc_linkdown(struct pci_epc *epc) 712 { 713 struct pci_epf *epf; 714 715 if (IS_ERR_OR_NULL(epc)) 716 return; 717 718 mutex_lock(&epc->list_lock); 719 list_for_each_entry(epf, &epc->pci_epf, list) { 720 mutex_lock(&epf->lock); 721 if (epf->event_ops && epf->event_ops->link_down) 722 epf->event_ops->link_down(epf); 723 mutex_unlock(&epf->lock); 724 } 725 mutex_unlock(&epc->list_lock); 726 } 727 EXPORT_SYMBOL_GPL(pci_epc_linkdown); 728 729 /** 730 * pci_epc_init_notify() - Notify the EPF device that EPC device's core 731 * initialization is completed. 732 * @epc: the EPC device whose core initialization is completed 733 * 734 * Invoke to Notify the EPF device that the EPC device's initialization 735 * is completed. 736 */ 737 void pci_epc_init_notify(struct pci_epc *epc) 738 { 739 struct pci_epf *epf; 740 741 if (IS_ERR_OR_NULL(epc)) 742 return; 743 744 mutex_lock(&epc->list_lock); 745 list_for_each_entry(epf, &epc->pci_epf, list) { 746 mutex_lock(&epf->lock); 747 if (epf->event_ops && epf->event_ops->core_init) 748 epf->event_ops->core_init(epf); 749 mutex_unlock(&epf->lock); 750 } 751 epc->init_complete = true; 752 mutex_unlock(&epc->list_lock); 753 } 754 EXPORT_SYMBOL_GPL(pci_epc_init_notify); 755 756 /** 757 * pci_epc_notify_pending_init() - Notify the pending EPC device initialization 758 * complete to the EPF device 759 * @epc: the EPC device whose core initialization is pending to be notified 760 * @epf: the EPF device to be notified 761 * 762 * Invoke to notify the pending EPC device initialization complete to the EPF 763 * device. This is used to deliver the notification if the EPC initialization 764 * got completed before the EPF driver bind. 765 */ 766 void pci_epc_notify_pending_init(struct pci_epc *epc, struct pci_epf *epf) 767 { 768 if (epc->init_complete) { 769 mutex_lock(&epf->lock); 770 if (epf->event_ops && epf->event_ops->core_init) 771 epf->event_ops->core_init(epf); 772 mutex_unlock(&epf->lock); 773 } 774 } 775 EXPORT_SYMBOL_GPL(pci_epc_notify_pending_init); 776 777 /** 778 * pci_epc_bme_notify() - Notify the EPF device that the EPC device has received 779 * the BME event from the Root complex 780 * @epc: the EPC device that received the BME event 781 * 782 * Invoke to Notify the EPF device that the EPC device has received the Bus 783 * Master Enable (BME) event from the Root complex 784 */ 785 void pci_epc_bme_notify(struct pci_epc *epc) 786 { 787 struct pci_epf *epf; 788 789 if (IS_ERR_OR_NULL(epc)) 790 return; 791 792 mutex_lock(&epc->list_lock); 793 list_for_each_entry(epf, &epc->pci_epf, list) { 794 mutex_lock(&epf->lock); 795 if (epf->event_ops && epf->event_ops->bme) 796 epf->event_ops->bme(epf); 797 mutex_unlock(&epf->lock); 798 } 799 mutex_unlock(&epc->list_lock); 800 } 801 EXPORT_SYMBOL_GPL(pci_epc_bme_notify); 802 803 /** 804 * pci_epc_destroy() - destroy the EPC device 805 * @epc: the EPC device that has to be destroyed 806 * 807 * Invoke to destroy the PCI EPC device 808 */ 809 void pci_epc_destroy(struct pci_epc *epc) 810 { 811 pci_ep_cfs_remove_epc_group(epc->group); 812 device_unregister(&epc->dev); 813 } 814 EXPORT_SYMBOL_GPL(pci_epc_destroy); 815 816 /** 817 * devm_pci_epc_destroy() - destroy the EPC device 818 * @dev: device that wants to destroy the EPC 819 * @epc: the EPC device that has to be destroyed 820 * 821 * Invoke to destroy the devres associated with this 822 * pci_epc and destroy the EPC device. 823 */ 824 void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc) 825 { 826 int r; 827 828 r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match, 829 epc); 830 dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n"); 831 } 832 EXPORT_SYMBOL_GPL(devm_pci_epc_destroy); 833 834 static void pci_epc_release(struct device *dev) 835 { 836 kfree(to_pci_epc(dev)); 837 } 838 839 /** 840 * __pci_epc_create() - create a new endpoint controller (EPC) device 841 * @dev: device that is creating the new EPC 842 * @ops: function pointers for performing EPC operations 843 * @owner: the owner of the module that creates the EPC device 844 * 845 * Invoke to create a new EPC device and add it to pci_epc class. 846 */ 847 struct pci_epc * 848 __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, 849 struct module *owner) 850 { 851 int ret; 852 struct pci_epc *epc; 853 854 if (WARN_ON(!dev)) { 855 ret = -EINVAL; 856 goto err_ret; 857 } 858 859 epc = kzalloc(sizeof(*epc), GFP_KERNEL); 860 if (!epc) { 861 ret = -ENOMEM; 862 goto err_ret; 863 } 864 865 mutex_init(&epc->lock); 866 mutex_init(&epc->list_lock); 867 INIT_LIST_HEAD(&epc->pci_epf); 868 869 device_initialize(&epc->dev); 870 epc->dev.class = pci_epc_class; 871 epc->dev.parent = dev; 872 epc->dev.release = pci_epc_release; 873 epc->ops = ops; 874 875 ret = dev_set_name(&epc->dev, "%s", dev_name(dev)); 876 if (ret) 877 goto put_dev; 878 879 ret = device_add(&epc->dev); 880 if (ret) 881 goto put_dev; 882 883 epc->group = pci_ep_cfs_add_epc_group(dev_name(dev)); 884 885 return epc; 886 887 put_dev: 888 put_device(&epc->dev); 889 890 err_ret: 891 return ERR_PTR(ret); 892 } 893 EXPORT_SYMBOL_GPL(__pci_epc_create); 894 895 /** 896 * __devm_pci_epc_create() - create a new endpoint controller (EPC) device 897 * @dev: device that is creating the new EPC 898 * @ops: function pointers for performing EPC operations 899 * @owner: the owner of the module that creates the EPC device 900 * 901 * Invoke to create a new EPC device and add it to pci_epc class. 902 * While at that, it also associates the device with the pci_epc using devres. 903 * On driver detach, release function is invoked on the devres data, 904 * then, devres data is freed. 905 */ 906 struct pci_epc * 907 __devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, 908 struct module *owner) 909 { 910 struct pci_epc **ptr, *epc; 911 912 ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL); 913 if (!ptr) 914 return ERR_PTR(-ENOMEM); 915 916 epc = __pci_epc_create(dev, ops, owner); 917 if (!IS_ERR(epc)) { 918 *ptr = epc; 919 devres_add(dev, ptr); 920 } else { 921 devres_free(ptr); 922 } 923 924 return epc; 925 } 926 EXPORT_SYMBOL_GPL(__devm_pci_epc_create); 927 928 static int __init pci_epc_init(void) 929 { 930 pci_epc_class = class_create("pci_epc"); 931 if (IS_ERR(pci_epc_class)) { 932 pr_err("failed to create pci epc class --> %ld\n", 933 PTR_ERR(pci_epc_class)); 934 return PTR_ERR(pci_epc_class); 935 } 936 937 return 0; 938 } 939 module_init(pci_epc_init); 940 941 static void __exit pci_epc_exit(void) 942 { 943 class_destroy(pci_epc_class); 944 } 945 module_exit(pci_epc_exit); 946 947 MODULE_DESCRIPTION("PCI EPC Library"); 948 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); 949