1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * CXL Flash Device Driver 4 * 5 * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation 6 * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation 7 * 8 * Copyright (C) 2018 IBM Corporation 9 */ 10 11 #include <linux/file.h> 12 #include <linux/idr.h> 13 #include <linux/module.h> 14 #include <linux/mount.h> 15 #include <linux/poll.h> 16 #include <linux/sched/signal.h> 17 18 #include <misc/ocxl.h> 19 20 #include <uapi/misc/cxl.h> 21 22 #include "backend.h" 23 #include "ocxl_hw.h" 24 25 /* 26 * Pseudo-filesystem to allocate inodes. 27 */ 28 29 #define OCXLFLASH_FS_MAGIC 0x1697698f 30 31 static int ocxlflash_fs_cnt; 32 static struct vfsmount *ocxlflash_vfs_mount; 33 34 static const struct dentry_operations ocxlflash_fs_dops = { 35 .d_dname = simple_dname, 36 }; 37 38 /* 39 * ocxlflash_fs_mount() - mount the pseudo-filesystem 40 * @fs_type: File system type. 41 * @flags: Flags for the filesystem. 42 * @dev_name: Device name associated with the filesystem. 43 * @data: Data pointer. 44 * 45 * Return: pointer to the directory entry structure 46 */ 47 static struct dentry *ocxlflash_fs_mount(struct file_system_type *fs_type, 48 int flags, const char *dev_name, 49 void *data) 50 { 51 return mount_pseudo(fs_type, "ocxlflash:", NULL, &ocxlflash_fs_dops, 52 OCXLFLASH_FS_MAGIC); 53 } 54 55 static struct file_system_type ocxlflash_fs_type = { 56 .name = "ocxlflash", 57 .owner = THIS_MODULE, 58 .mount = ocxlflash_fs_mount, 59 .kill_sb = kill_anon_super, 60 }; 61 62 /* 63 * ocxlflash_release_mapping() - release the memory mapping 64 * @ctx: Context whose mapping is to be released. 65 */ 66 static void ocxlflash_release_mapping(struct ocxlflash_context *ctx) 67 { 68 if (ctx->mapping) 69 simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt); 70 ctx->mapping = NULL; 71 } 72 73 /* 74 * ocxlflash_getfile() - allocate pseudo filesystem, inode, and the file 75 * @dev: Generic device of the host. 76 * @name: Name of the pseudo filesystem. 77 * @fops: File operations. 78 * @priv: Private data. 79 * @flags: Flags for the file. 80 * 81 * Return: pointer to the file on success, ERR_PTR on failure 82 */ 83 static struct file *ocxlflash_getfile(struct device *dev, const char *name, 84 const struct file_operations *fops, 85 void *priv, int flags) 86 { 87 struct file *file; 88 struct inode *inode; 89 int rc; 90 91 if (fops->owner && !try_module_get(fops->owner)) { 92 dev_err(dev, "%s: Owner does not exist\n", __func__); 93 rc = -ENOENT; 94 goto err1; 95 } 96 97 rc = simple_pin_fs(&ocxlflash_fs_type, &ocxlflash_vfs_mount, 98 &ocxlflash_fs_cnt); 99 if (unlikely(rc < 0)) { 100 dev_err(dev, "%s: Cannot mount ocxlflash pseudofs rc=%d\n", 101 __func__, rc); 102 goto err2; 103 } 104 105 inode = alloc_anon_inode(ocxlflash_vfs_mount->mnt_sb); 106 if (IS_ERR(inode)) { 107 rc = PTR_ERR(inode); 108 dev_err(dev, "%s: alloc_anon_inode failed rc=%d\n", 109 __func__, rc); 110 goto err3; 111 } 112 113 file = alloc_file_pseudo(inode, ocxlflash_vfs_mount, name, 114 flags & (O_ACCMODE | O_NONBLOCK), fops); 115 if (IS_ERR(file)) { 116 rc = PTR_ERR(file); 117 dev_err(dev, "%s: alloc_file failed rc=%d\n", 118 __func__, rc); 119 goto err4; 120 } 121 122 file->private_data = priv; 123 out: 124 return file; 125 err4: 126 iput(inode); 127 err3: 128 simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt); 129 err2: 130 module_put(fops->owner); 131 err1: 132 file = ERR_PTR(rc); 133 goto out; 134 } 135 136 /** 137 * ocxlflash_psa_map() - map the process specific MMIO space 138 * @ctx_cookie: Adapter context for which the mapping needs to be done. 139 * 140 * Return: MMIO pointer of the mapped region 141 */ 142 static void __iomem *ocxlflash_psa_map(void *ctx_cookie) 143 { 144 struct ocxlflash_context *ctx = ctx_cookie; 145 struct device *dev = ctx->hw_afu->dev; 146 147 mutex_lock(&ctx->state_mutex); 148 if (ctx->state != STARTED) { 149 dev_err(dev, "%s: Context not started, state=%d\n", __func__, 150 ctx->state); 151 mutex_unlock(&ctx->state_mutex); 152 return NULL; 153 } 154 mutex_unlock(&ctx->state_mutex); 155 156 return ioremap(ctx->psn_phys, ctx->psn_size); 157 } 158 159 /** 160 * ocxlflash_psa_unmap() - unmap the process specific MMIO space 161 * @addr: MMIO pointer to unmap. 162 */ 163 static void ocxlflash_psa_unmap(void __iomem *addr) 164 { 165 iounmap(addr); 166 } 167 168 /** 169 * ocxlflash_process_element() - get process element of the adapter context 170 * @ctx_cookie: Adapter context associated with the process element. 171 * 172 * Return: process element of the adapter context 173 */ 174 static int ocxlflash_process_element(void *ctx_cookie) 175 { 176 struct ocxlflash_context *ctx = ctx_cookie; 177 178 return ctx->pe; 179 } 180 181 /** 182 * afu_map_irq() - map the interrupt of the adapter context 183 * @flags: Flags. 184 * @ctx: Adapter context. 185 * @num: Per-context AFU interrupt number. 186 * @handler: Interrupt handler to register. 187 * @cookie: Interrupt handler private data. 188 * @name: Name of the interrupt. 189 * 190 * Return: 0 on success, -errno on failure 191 */ 192 static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num, 193 irq_handler_t handler, void *cookie, char *name) 194 { 195 struct ocxl_hw_afu *afu = ctx->hw_afu; 196 struct device *dev = afu->dev; 197 struct ocxlflash_irqs *irq; 198 void __iomem *vtrig; 199 u32 virq; 200 int rc = 0; 201 202 if (num < 0 || num >= ctx->num_irqs) { 203 dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num); 204 rc = -ENOENT; 205 goto out; 206 } 207 208 irq = &ctx->irqs[num]; 209 virq = irq_create_mapping(NULL, irq->hwirq); 210 if (unlikely(!virq)) { 211 dev_err(dev, "%s: irq_create_mapping failed\n", __func__); 212 rc = -ENOMEM; 213 goto out; 214 } 215 216 rc = request_irq(virq, handler, 0, name, cookie); 217 if (unlikely(rc)) { 218 dev_err(dev, "%s: request_irq failed rc=%d\n", __func__, rc); 219 goto err1; 220 } 221 222 vtrig = ioremap(irq->ptrig, PAGE_SIZE); 223 if (unlikely(!vtrig)) { 224 dev_err(dev, "%s: Trigger page mapping failed\n", __func__); 225 rc = -ENOMEM; 226 goto err2; 227 } 228 229 irq->virq = virq; 230 irq->vtrig = vtrig; 231 out: 232 return rc; 233 err2: 234 free_irq(virq, cookie); 235 err1: 236 irq_dispose_mapping(virq); 237 goto out; 238 } 239 240 /** 241 * ocxlflash_map_afu_irq() - map the interrupt of the adapter context 242 * @ctx_cookie: Adapter context. 243 * @num: Per-context AFU interrupt number. 244 * @handler: Interrupt handler to register. 245 * @cookie: Interrupt handler private data. 246 * @name: Name of the interrupt. 247 * 248 * Return: 0 on success, -errno on failure 249 */ 250 static int ocxlflash_map_afu_irq(void *ctx_cookie, int num, 251 irq_handler_t handler, void *cookie, 252 char *name) 253 { 254 return afu_map_irq(0, ctx_cookie, num, handler, cookie, name); 255 } 256 257 /** 258 * afu_unmap_irq() - unmap the interrupt 259 * @flags: Flags. 260 * @ctx: Adapter context. 261 * @num: Per-context AFU interrupt number. 262 * @cookie: Interrupt handler private data. 263 */ 264 static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num, 265 void *cookie) 266 { 267 struct ocxl_hw_afu *afu = ctx->hw_afu; 268 struct device *dev = afu->dev; 269 struct ocxlflash_irqs *irq; 270 271 if (num < 0 || num >= ctx->num_irqs) { 272 dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num); 273 return; 274 } 275 276 irq = &ctx->irqs[num]; 277 if (irq->vtrig) 278 iounmap(irq->vtrig); 279 280 if (irq_find_mapping(NULL, irq->hwirq)) { 281 free_irq(irq->virq, cookie); 282 irq_dispose_mapping(irq->virq); 283 } 284 285 memset(irq, 0, sizeof(*irq)); 286 } 287 288 /** 289 * ocxlflash_unmap_afu_irq() - unmap the interrupt 290 * @ctx_cookie: Adapter context. 291 * @num: Per-context AFU interrupt number. 292 * @cookie: Interrupt handler private data. 293 */ 294 static void ocxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie) 295 { 296 return afu_unmap_irq(0, ctx_cookie, num, cookie); 297 } 298 299 /** 300 * ocxlflash_get_irq_objhndl() - get the object handle for an interrupt 301 * @ctx_cookie: Context associated with the interrupt. 302 * @irq: Interrupt number. 303 * 304 * Return: effective address of the mapped region 305 */ 306 static u64 ocxlflash_get_irq_objhndl(void *ctx_cookie, int irq) 307 { 308 struct ocxlflash_context *ctx = ctx_cookie; 309 310 if (irq < 0 || irq >= ctx->num_irqs) 311 return 0; 312 313 return (__force u64)ctx->irqs[irq].vtrig; 314 } 315 316 /** 317 * ocxlflash_xsl_fault() - callback when translation error is triggered 318 * @data: Private data provided at callback registration, the context. 319 * @addr: Address that triggered the error. 320 * @dsisr: Value of dsisr register. 321 */ 322 static void ocxlflash_xsl_fault(void *data, u64 addr, u64 dsisr) 323 { 324 struct ocxlflash_context *ctx = data; 325 326 spin_lock(&ctx->slock); 327 ctx->fault_addr = addr; 328 ctx->fault_dsisr = dsisr; 329 ctx->pending_fault = true; 330 spin_unlock(&ctx->slock); 331 332 wake_up_all(&ctx->wq); 333 } 334 335 /** 336 * start_context() - local routine to start a context 337 * @ctx: Adapter context to be started. 338 * 339 * Assign the context specific MMIO space, add and enable the PE. 340 * 341 * Return: 0 on success, -errno on failure 342 */ 343 static int start_context(struct ocxlflash_context *ctx) 344 { 345 struct ocxl_hw_afu *afu = ctx->hw_afu; 346 struct ocxl_afu_config *acfg = &afu->acfg; 347 void *link_token = afu->link_token; 348 struct device *dev = afu->dev; 349 bool master = ctx->master; 350 struct mm_struct *mm; 351 int rc = 0; 352 u32 pid; 353 354 mutex_lock(&ctx->state_mutex); 355 if (ctx->state != OPENED) { 356 dev_err(dev, "%s: Context state invalid, state=%d\n", 357 __func__, ctx->state); 358 rc = -EINVAL; 359 goto out; 360 } 361 362 if (master) { 363 ctx->psn_size = acfg->global_mmio_size; 364 ctx->psn_phys = afu->gmmio_phys; 365 } else { 366 ctx->psn_size = acfg->pp_mmio_stride; 367 ctx->psn_phys = afu->ppmmio_phys + (ctx->pe * ctx->psn_size); 368 } 369 370 /* pid and mm not set for master contexts */ 371 if (master) { 372 pid = 0; 373 mm = NULL; 374 } else { 375 pid = current->mm->context.id; 376 mm = current->mm; 377 } 378 379 rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0, mm, 380 ocxlflash_xsl_fault, ctx); 381 if (unlikely(rc)) { 382 dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n", 383 __func__, rc); 384 goto out; 385 } 386 387 ctx->state = STARTED; 388 out: 389 mutex_unlock(&ctx->state_mutex); 390 return rc; 391 } 392 393 /** 394 * ocxlflash_start_context() - start a kernel context 395 * @ctx_cookie: Adapter context to be started. 396 * 397 * Return: 0 on success, -errno on failure 398 */ 399 static int ocxlflash_start_context(void *ctx_cookie) 400 { 401 struct ocxlflash_context *ctx = ctx_cookie; 402 403 return start_context(ctx); 404 } 405 406 /** 407 * ocxlflash_stop_context() - stop a context 408 * @ctx_cookie: Adapter context to be stopped. 409 * 410 * Return: 0 on success, -errno on failure 411 */ 412 static int ocxlflash_stop_context(void *ctx_cookie) 413 { 414 struct ocxlflash_context *ctx = ctx_cookie; 415 struct ocxl_hw_afu *afu = ctx->hw_afu; 416 struct ocxl_afu_config *acfg = &afu->acfg; 417 struct pci_dev *pdev = afu->pdev; 418 struct device *dev = afu->dev; 419 enum ocxlflash_ctx_state state; 420 int rc = 0; 421 422 mutex_lock(&ctx->state_mutex); 423 state = ctx->state; 424 ctx->state = CLOSED; 425 mutex_unlock(&ctx->state_mutex); 426 if (state != STARTED) 427 goto out; 428 429 rc = ocxl_config_terminate_pasid(pdev, acfg->dvsec_afu_control_pos, 430 ctx->pe); 431 if (unlikely(rc)) { 432 dev_err(dev, "%s: ocxl_config_terminate_pasid failed rc=%d\n", 433 __func__, rc); 434 /* If EBUSY, PE could be referenced in future by the AFU */ 435 if (rc == -EBUSY) 436 goto out; 437 } 438 439 rc = ocxl_link_remove_pe(afu->link_token, ctx->pe); 440 if (unlikely(rc)) { 441 dev_err(dev, "%s: ocxl_link_remove_pe failed rc=%d\n", 442 __func__, rc); 443 goto out; 444 } 445 out: 446 return rc; 447 } 448 449 /** 450 * ocxlflash_afu_reset() - reset the AFU 451 * @ctx_cookie: Adapter context. 452 */ 453 static int ocxlflash_afu_reset(void *ctx_cookie) 454 { 455 struct ocxlflash_context *ctx = ctx_cookie; 456 struct device *dev = ctx->hw_afu->dev; 457 458 /* Pending implementation from OCXL transport services */ 459 dev_err_once(dev, "%s: afu_reset() fop not supported\n", __func__); 460 461 /* Silently return success until it is implemented */ 462 return 0; 463 } 464 465 /** 466 * ocxlflash_set_master() - sets the context as master 467 * @ctx_cookie: Adapter context to set as master. 468 */ 469 static void ocxlflash_set_master(void *ctx_cookie) 470 { 471 struct ocxlflash_context *ctx = ctx_cookie; 472 473 ctx->master = true; 474 } 475 476 /** 477 * ocxlflash_get_context() - obtains the context associated with the host 478 * @pdev: PCI device associated with the host. 479 * @afu_cookie: Hardware AFU associated with the host. 480 * 481 * Return: returns the pointer to host adapter context 482 */ 483 static void *ocxlflash_get_context(struct pci_dev *pdev, void *afu_cookie) 484 { 485 struct ocxl_hw_afu *afu = afu_cookie; 486 487 return afu->ocxl_ctx; 488 } 489 490 /** 491 * ocxlflash_dev_context_init() - allocate and initialize an adapter context 492 * @pdev: PCI device associated with the host. 493 * @afu_cookie: Hardware AFU associated with the host. 494 * 495 * Return: returns the adapter context on success, ERR_PTR on failure 496 */ 497 static void *ocxlflash_dev_context_init(struct pci_dev *pdev, void *afu_cookie) 498 { 499 struct ocxl_hw_afu *afu = afu_cookie; 500 struct device *dev = afu->dev; 501 struct ocxlflash_context *ctx; 502 int rc; 503 504 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 505 if (unlikely(!ctx)) { 506 dev_err(dev, "%s: Context allocation failed\n", __func__); 507 rc = -ENOMEM; 508 goto err1; 509 } 510 511 idr_preload(GFP_KERNEL); 512 rc = idr_alloc(&afu->idr, ctx, 0, afu->max_pasid, GFP_NOWAIT); 513 idr_preload_end(); 514 if (unlikely(rc < 0)) { 515 dev_err(dev, "%s: idr_alloc failed rc=%d\n", __func__, rc); 516 goto err2; 517 } 518 519 spin_lock_init(&ctx->slock); 520 init_waitqueue_head(&ctx->wq); 521 mutex_init(&ctx->state_mutex); 522 523 ctx->state = OPENED; 524 ctx->pe = rc; 525 ctx->master = false; 526 ctx->mapping = NULL; 527 ctx->hw_afu = afu; 528 ctx->irq_bitmap = 0; 529 ctx->pending_irq = false; 530 ctx->pending_fault = false; 531 out: 532 return ctx; 533 err2: 534 kfree(ctx); 535 err1: 536 ctx = ERR_PTR(rc); 537 goto out; 538 } 539 540 /** 541 * ocxlflash_release_context() - releases an adapter context 542 * @ctx_cookie: Adapter context to be released. 543 * 544 * Return: 0 on success, -errno on failure 545 */ 546 static int ocxlflash_release_context(void *ctx_cookie) 547 { 548 struct ocxlflash_context *ctx = ctx_cookie; 549 struct device *dev; 550 int rc = 0; 551 552 if (!ctx) 553 goto out; 554 555 dev = ctx->hw_afu->dev; 556 mutex_lock(&ctx->state_mutex); 557 if (ctx->state >= STARTED) { 558 dev_err(dev, "%s: Context in use, state=%d\n", __func__, 559 ctx->state); 560 mutex_unlock(&ctx->state_mutex); 561 rc = -EBUSY; 562 goto out; 563 } 564 mutex_unlock(&ctx->state_mutex); 565 566 idr_remove(&ctx->hw_afu->idr, ctx->pe); 567 ocxlflash_release_mapping(ctx); 568 kfree(ctx); 569 out: 570 return rc; 571 } 572 573 /** 574 * ocxlflash_perst_reloads_same_image() - sets the image reload policy 575 * @afu_cookie: Hardware AFU associated with the host. 576 * @image: Whether to load the same image on PERST. 577 */ 578 static void ocxlflash_perst_reloads_same_image(void *afu_cookie, bool image) 579 { 580 struct ocxl_hw_afu *afu = afu_cookie; 581 582 afu->perst_same_image = image; 583 } 584 585 /** 586 * ocxlflash_read_adapter_vpd() - reads the adapter VPD 587 * @pdev: PCI device associated with the host. 588 * @buf: Buffer to get the VPD data. 589 * @count: Size of buffer (maximum bytes that can be read). 590 * 591 * Return: size of VPD on success, -errno on failure 592 */ 593 static ssize_t ocxlflash_read_adapter_vpd(struct pci_dev *pdev, void *buf, 594 size_t count) 595 { 596 return pci_read_vpd(pdev, 0, count, buf); 597 } 598 599 /** 600 * free_afu_irqs() - internal service to free interrupts 601 * @ctx: Adapter context. 602 */ 603 static void free_afu_irqs(struct ocxlflash_context *ctx) 604 { 605 struct ocxl_hw_afu *afu = ctx->hw_afu; 606 struct device *dev = afu->dev; 607 int i; 608 609 if (!ctx->irqs) { 610 dev_err(dev, "%s: Interrupts not allocated\n", __func__); 611 return; 612 } 613 614 for (i = ctx->num_irqs; i >= 0; i--) 615 ocxl_link_free_irq(afu->link_token, ctx->irqs[i].hwirq); 616 617 kfree(ctx->irqs); 618 ctx->irqs = NULL; 619 } 620 621 /** 622 * alloc_afu_irqs() - internal service to allocate interrupts 623 * @ctx: Context associated with the request. 624 * @num: Number of interrupts requested. 625 * 626 * Return: 0 on success, -errno on failure 627 */ 628 static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num) 629 { 630 struct ocxl_hw_afu *afu = ctx->hw_afu; 631 struct device *dev = afu->dev; 632 struct ocxlflash_irqs *irqs; 633 u64 addr; 634 int rc = 0; 635 int hwirq; 636 int i; 637 638 if (ctx->irqs) { 639 dev_err(dev, "%s: Interrupts already allocated\n", __func__); 640 rc = -EEXIST; 641 goto out; 642 } 643 644 if (num > OCXL_MAX_IRQS) { 645 dev_err(dev, "%s: Too many interrupts num=%d\n", __func__, num); 646 rc = -EINVAL; 647 goto out; 648 } 649 650 irqs = kcalloc(num, sizeof(*irqs), GFP_KERNEL); 651 if (unlikely(!irqs)) { 652 dev_err(dev, "%s: Context irqs allocation failed\n", __func__); 653 rc = -ENOMEM; 654 goto out; 655 } 656 657 for (i = 0; i < num; i++) { 658 rc = ocxl_link_irq_alloc(afu->link_token, &hwirq, &addr); 659 if (unlikely(rc)) { 660 dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n", 661 __func__, rc); 662 goto err; 663 } 664 665 irqs[i].hwirq = hwirq; 666 irqs[i].ptrig = addr; 667 } 668 669 ctx->irqs = irqs; 670 ctx->num_irqs = num; 671 out: 672 return rc; 673 err: 674 for (i = i-1; i >= 0; i--) 675 ocxl_link_free_irq(afu->link_token, irqs[i].hwirq); 676 kfree(irqs); 677 goto out; 678 } 679 680 /** 681 * ocxlflash_allocate_afu_irqs() - allocates the requested number of interrupts 682 * @ctx_cookie: Context associated with the request. 683 * @num: Number of interrupts requested. 684 * 685 * Return: 0 on success, -errno on failure 686 */ 687 static int ocxlflash_allocate_afu_irqs(void *ctx_cookie, int num) 688 { 689 return alloc_afu_irqs(ctx_cookie, num); 690 } 691 692 /** 693 * ocxlflash_free_afu_irqs() - frees the interrupts of an adapter context 694 * @ctx_cookie: Adapter context. 695 */ 696 static void ocxlflash_free_afu_irqs(void *ctx_cookie) 697 { 698 free_afu_irqs(ctx_cookie); 699 } 700 701 /** 702 * ocxlflash_unconfig_afu() - unconfigure the AFU 703 * @afu: AFU associated with the host. 704 */ 705 static void ocxlflash_unconfig_afu(struct ocxl_hw_afu *afu) 706 { 707 if (afu->gmmio_virt) { 708 iounmap(afu->gmmio_virt); 709 afu->gmmio_virt = NULL; 710 } 711 } 712 713 /** 714 * ocxlflash_destroy_afu() - destroy the AFU structure 715 * @afu_cookie: AFU to be freed. 716 */ 717 static void ocxlflash_destroy_afu(void *afu_cookie) 718 { 719 struct ocxl_hw_afu *afu = afu_cookie; 720 int pos; 721 722 if (!afu) 723 return; 724 725 ocxlflash_release_context(afu->ocxl_ctx); 726 idr_destroy(&afu->idr); 727 728 /* Disable the AFU */ 729 pos = afu->acfg.dvsec_afu_control_pos; 730 ocxl_config_set_afu_state(afu->pdev, pos, 0); 731 732 ocxlflash_unconfig_afu(afu); 733 kfree(afu); 734 } 735 736 /** 737 * ocxlflash_config_fn() - configure the host function 738 * @pdev: PCI device associated with the host. 739 * @afu: AFU associated with the host. 740 * 741 * Return: 0 on success, -errno on failure 742 */ 743 static int ocxlflash_config_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu) 744 { 745 struct ocxl_fn_config *fcfg = &afu->fcfg; 746 struct device *dev = &pdev->dev; 747 u16 base, enabled, supported; 748 int rc = 0; 749 750 /* Read DVSEC config of the function */ 751 rc = ocxl_config_read_function(pdev, fcfg); 752 if (unlikely(rc)) { 753 dev_err(dev, "%s: ocxl_config_read_function failed rc=%d\n", 754 __func__, rc); 755 goto out; 756 } 757 758 /* Check if function has AFUs defined, only 1 per function supported */ 759 if (fcfg->max_afu_index >= 0) { 760 afu->is_present = true; 761 if (fcfg->max_afu_index != 0) 762 dev_warn(dev, "%s: Unexpected AFU index value %d\n", 763 __func__, fcfg->max_afu_index); 764 } 765 766 rc = ocxl_config_get_actag_info(pdev, &base, &enabled, &supported); 767 if (unlikely(rc)) { 768 dev_err(dev, "%s: ocxl_config_get_actag_info failed rc=%d\n", 769 __func__, rc); 770 goto out; 771 } 772 773 afu->fn_actag_base = base; 774 afu->fn_actag_enabled = enabled; 775 776 ocxl_config_set_actag(pdev, fcfg->dvsec_function_pos, base, enabled); 777 dev_dbg(dev, "%s: Function acTag range base=%u enabled=%u\n", 778 __func__, base, enabled); 779 780 rc = ocxl_link_setup(pdev, 0, &afu->link_token); 781 if (unlikely(rc)) { 782 dev_err(dev, "%s: ocxl_link_setup failed rc=%d\n", 783 __func__, rc); 784 goto out; 785 } 786 787 rc = ocxl_config_set_TL(pdev, fcfg->dvsec_tl_pos); 788 if (unlikely(rc)) { 789 dev_err(dev, "%s: ocxl_config_set_TL failed rc=%d\n", 790 __func__, rc); 791 goto err; 792 } 793 out: 794 return rc; 795 err: 796 ocxl_link_release(pdev, afu->link_token); 797 goto out; 798 } 799 800 /** 801 * ocxlflash_unconfig_fn() - unconfigure the host function 802 * @pdev: PCI device associated with the host. 803 * @afu: AFU associated with the host. 804 */ 805 static void ocxlflash_unconfig_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu) 806 { 807 ocxl_link_release(pdev, afu->link_token); 808 } 809 810 /** 811 * ocxlflash_map_mmio() - map the AFU MMIO space 812 * @afu: AFU associated with the host. 813 * 814 * Return: 0 on success, -errno on failure 815 */ 816 static int ocxlflash_map_mmio(struct ocxl_hw_afu *afu) 817 { 818 struct ocxl_afu_config *acfg = &afu->acfg; 819 struct pci_dev *pdev = afu->pdev; 820 struct device *dev = afu->dev; 821 phys_addr_t gmmio, ppmmio; 822 int rc = 0; 823 824 rc = pci_request_region(pdev, acfg->global_mmio_bar, "ocxlflash"); 825 if (unlikely(rc)) { 826 dev_err(dev, "%s: pci_request_region for global failed rc=%d\n", 827 __func__, rc); 828 goto out; 829 } 830 gmmio = pci_resource_start(pdev, acfg->global_mmio_bar); 831 gmmio += acfg->global_mmio_offset; 832 833 rc = pci_request_region(pdev, acfg->pp_mmio_bar, "ocxlflash"); 834 if (unlikely(rc)) { 835 dev_err(dev, "%s: pci_request_region for pp bar failed rc=%d\n", 836 __func__, rc); 837 goto err1; 838 } 839 ppmmio = pci_resource_start(pdev, acfg->pp_mmio_bar); 840 ppmmio += acfg->pp_mmio_offset; 841 842 afu->gmmio_virt = ioremap(gmmio, acfg->global_mmio_size); 843 if (unlikely(!afu->gmmio_virt)) { 844 dev_err(dev, "%s: MMIO mapping failed\n", __func__); 845 rc = -ENOMEM; 846 goto err2; 847 } 848 849 afu->gmmio_phys = gmmio; 850 afu->ppmmio_phys = ppmmio; 851 out: 852 return rc; 853 err2: 854 pci_release_region(pdev, acfg->pp_mmio_bar); 855 err1: 856 pci_release_region(pdev, acfg->global_mmio_bar); 857 goto out; 858 } 859 860 /** 861 * ocxlflash_config_afu() - configure the host AFU 862 * @pdev: PCI device associated with the host. 863 * @afu: AFU associated with the host. 864 * 865 * Must be called _after_ host function configuration. 866 * 867 * Return: 0 on success, -errno on failure 868 */ 869 static int ocxlflash_config_afu(struct pci_dev *pdev, struct ocxl_hw_afu *afu) 870 { 871 struct ocxl_afu_config *acfg = &afu->acfg; 872 struct ocxl_fn_config *fcfg = &afu->fcfg; 873 struct device *dev = &pdev->dev; 874 int count; 875 int base; 876 int pos; 877 int rc = 0; 878 879 /* This HW AFU function does not have any AFUs defined */ 880 if (!afu->is_present) 881 goto out; 882 883 /* Read AFU config at index 0 */ 884 rc = ocxl_config_read_afu(pdev, fcfg, acfg, 0); 885 if (unlikely(rc)) { 886 dev_err(dev, "%s: ocxl_config_read_afu failed rc=%d\n", 887 __func__, rc); 888 goto out; 889 } 890 891 /* Only one AFU per function is supported, so actag_base is same */ 892 base = afu->fn_actag_base; 893 count = min_t(int, acfg->actag_supported, afu->fn_actag_enabled); 894 pos = acfg->dvsec_afu_control_pos; 895 896 ocxl_config_set_afu_actag(pdev, pos, base, count); 897 dev_dbg(dev, "%s: acTag base=%d enabled=%d\n", __func__, base, count); 898 afu->afu_actag_base = base; 899 afu->afu_actag_enabled = count; 900 afu->max_pasid = 1 << acfg->pasid_supported_log; 901 902 ocxl_config_set_afu_pasid(pdev, pos, 0, acfg->pasid_supported_log); 903 904 rc = ocxlflash_map_mmio(afu); 905 if (unlikely(rc)) { 906 dev_err(dev, "%s: ocxlflash_map_mmio failed rc=%d\n", 907 __func__, rc); 908 goto out; 909 } 910 911 /* Enable the AFU */ 912 ocxl_config_set_afu_state(pdev, acfg->dvsec_afu_control_pos, 1); 913 out: 914 return rc; 915 } 916 917 /** 918 * ocxlflash_create_afu() - create the AFU for OCXL 919 * @pdev: PCI device associated with the host. 920 * 921 * Return: AFU on success, NULL on failure 922 */ 923 static void *ocxlflash_create_afu(struct pci_dev *pdev) 924 { 925 struct device *dev = &pdev->dev; 926 struct ocxlflash_context *ctx; 927 struct ocxl_hw_afu *afu; 928 int rc; 929 930 afu = kzalloc(sizeof(*afu), GFP_KERNEL); 931 if (unlikely(!afu)) { 932 dev_err(dev, "%s: HW AFU allocation failed\n", __func__); 933 goto out; 934 } 935 936 afu->pdev = pdev; 937 afu->dev = dev; 938 idr_init(&afu->idr); 939 940 rc = ocxlflash_config_fn(pdev, afu); 941 if (unlikely(rc)) { 942 dev_err(dev, "%s: Function configuration failed rc=%d\n", 943 __func__, rc); 944 goto err1; 945 } 946 947 rc = ocxlflash_config_afu(pdev, afu); 948 if (unlikely(rc)) { 949 dev_err(dev, "%s: AFU configuration failed rc=%d\n", 950 __func__, rc); 951 goto err2; 952 } 953 954 ctx = ocxlflash_dev_context_init(pdev, afu); 955 if (IS_ERR(ctx)) { 956 rc = PTR_ERR(ctx); 957 dev_err(dev, "%s: ocxlflash_dev_context_init failed rc=%d\n", 958 __func__, rc); 959 goto err3; 960 } 961 962 afu->ocxl_ctx = ctx; 963 out: 964 return afu; 965 err3: 966 ocxlflash_unconfig_afu(afu); 967 err2: 968 ocxlflash_unconfig_fn(pdev, afu); 969 err1: 970 idr_destroy(&afu->idr); 971 kfree(afu); 972 afu = NULL; 973 goto out; 974 } 975 976 /** 977 * ctx_event_pending() - check for any event pending on the context 978 * @ctx: Context to be checked. 979 * 980 * Return: true if there is an event pending, false if none pending 981 */ 982 static inline bool ctx_event_pending(struct ocxlflash_context *ctx) 983 { 984 if (ctx->pending_irq || ctx->pending_fault) 985 return true; 986 987 return false; 988 } 989 990 /** 991 * afu_poll() - poll the AFU for events on the context 992 * @file: File associated with the adapter context. 993 * @poll: Poll structure from the user. 994 * 995 * Return: poll mask 996 */ 997 static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll) 998 { 999 struct ocxlflash_context *ctx = file->private_data; 1000 struct device *dev = ctx->hw_afu->dev; 1001 ulong lock_flags; 1002 int mask = 0; 1003 1004 poll_wait(file, &ctx->wq, poll); 1005 1006 spin_lock_irqsave(&ctx->slock, lock_flags); 1007 if (ctx_event_pending(ctx)) 1008 mask |= POLLIN | POLLRDNORM; 1009 else if (ctx->state == CLOSED) 1010 mask |= POLLERR; 1011 spin_unlock_irqrestore(&ctx->slock, lock_flags); 1012 1013 dev_dbg(dev, "%s: Poll wait completed for pe %i mask %i\n", 1014 __func__, ctx->pe, mask); 1015 1016 return mask; 1017 } 1018 1019 /** 1020 * afu_read() - perform a read on the context for any event 1021 * @file: File associated with the adapter context. 1022 * @buf: Buffer to receive the data. 1023 * @count: Size of buffer (maximum bytes that can be read). 1024 * @off: Offset. 1025 * 1026 * Return: size of the data read on success, -errno on failure 1027 */ 1028 static ssize_t afu_read(struct file *file, char __user *buf, size_t count, 1029 loff_t *off) 1030 { 1031 struct ocxlflash_context *ctx = file->private_data; 1032 struct device *dev = ctx->hw_afu->dev; 1033 struct cxl_event event; 1034 ulong lock_flags; 1035 ssize_t esize; 1036 ssize_t rc; 1037 int bit; 1038 DEFINE_WAIT(event_wait); 1039 1040 if (*off != 0) { 1041 dev_err(dev, "%s: Non-zero offset not supported, off=%lld\n", 1042 __func__, *off); 1043 rc = -EINVAL; 1044 goto out; 1045 } 1046 1047 spin_lock_irqsave(&ctx->slock, lock_flags); 1048 1049 for (;;) { 1050 prepare_to_wait(&ctx->wq, &event_wait, TASK_INTERRUPTIBLE); 1051 1052 if (ctx_event_pending(ctx) || (ctx->state == CLOSED)) 1053 break; 1054 1055 if (file->f_flags & O_NONBLOCK) { 1056 dev_err(dev, "%s: File cannot be blocked on I/O\n", 1057 __func__); 1058 rc = -EAGAIN; 1059 goto err; 1060 } 1061 1062 if (signal_pending(current)) { 1063 dev_err(dev, "%s: Signal pending on the process\n", 1064 __func__); 1065 rc = -ERESTARTSYS; 1066 goto err; 1067 } 1068 1069 spin_unlock_irqrestore(&ctx->slock, lock_flags); 1070 schedule(); 1071 spin_lock_irqsave(&ctx->slock, lock_flags); 1072 } 1073 1074 finish_wait(&ctx->wq, &event_wait); 1075 1076 memset(&event, 0, sizeof(event)); 1077 event.header.process_element = ctx->pe; 1078 event.header.size = sizeof(struct cxl_event_header); 1079 if (ctx->pending_irq) { 1080 esize = sizeof(struct cxl_event_afu_interrupt); 1081 event.header.size += esize; 1082 event.header.type = CXL_EVENT_AFU_INTERRUPT; 1083 1084 bit = find_first_bit(&ctx->irq_bitmap, ctx->num_irqs); 1085 clear_bit(bit, &ctx->irq_bitmap); 1086 event.irq.irq = bit + 1; 1087 if (bitmap_empty(&ctx->irq_bitmap, ctx->num_irqs)) 1088 ctx->pending_irq = false; 1089 } else if (ctx->pending_fault) { 1090 event.header.size += sizeof(struct cxl_event_data_storage); 1091 event.header.type = CXL_EVENT_DATA_STORAGE; 1092 event.fault.addr = ctx->fault_addr; 1093 event.fault.dsisr = ctx->fault_dsisr; 1094 ctx->pending_fault = false; 1095 } 1096 1097 spin_unlock_irqrestore(&ctx->slock, lock_flags); 1098 1099 if (copy_to_user(buf, &event, event.header.size)) { 1100 dev_err(dev, "%s: copy_to_user failed\n", __func__); 1101 rc = -EFAULT; 1102 goto out; 1103 } 1104 1105 rc = event.header.size; 1106 out: 1107 return rc; 1108 err: 1109 finish_wait(&ctx->wq, &event_wait); 1110 spin_unlock_irqrestore(&ctx->slock, lock_flags); 1111 goto out; 1112 } 1113 1114 /** 1115 * afu_release() - release and free the context 1116 * @inode: File inode pointer. 1117 * @file: File associated with the context. 1118 * 1119 * Return: 0 on success, -errno on failure 1120 */ 1121 static int afu_release(struct inode *inode, struct file *file) 1122 { 1123 struct ocxlflash_context *ctx = file->private_data; 1124 int i; 1125 1126 /* Unmap and free the interrupts associated with the context */ 1127 for (i = ctx->num_irqs; i >= 0; i--) 1128 afu_unmap_irq(0, ctx, i, ctx); 1129 free_afu_irqs(ctx); 1130 1131 return ocxlflash_release_context(ctx); 1132 } 1133 1134 /** 1135 * ocxlflash_mmap_fault() - mmap fault handler 1136 * @vmf: VM fault associated with current fault. 1137 * 1138 * Return: 0 on success, -errno on failure 1139 */ 1140 static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf) 1141 { 1142 struct vm_area_struct *vma = vmf->vma; 1143 struct ocxlflash_context *ctx = vma->vm_file->private_data; 1144 struct device *dev = ctx->hw_afu->dev; 1145 u64 mmio_area, offset; 1146 1147 offset = vmf->pgoff << PAGE_SHIFT; 1148 if (offset >= ctx->psn_size) 1149 return VM_FAULT_SIGBUS; 1150 1151 mutex_lock(&ctx->state_mutex); 1152 if (ctx->state != STARTED) { 1153 dev_err(dev, "%s: Context not started, state=%d\n", 1154 __func__, ctx->state); 1155 mutex_unlock(&ctx->state_mutex); 1156 return VM_FAULT_SIGBUS; 1157 } 1158 mutex_unlock(&ctx->state_mutex); 1159 1160 mmio_area = ctx->psn_phys; 1161 mmio_area += offset; 1162 1163 return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT); 1164 } 1165 1166 static const struct vm_operations_struct ocxlflash_vmops = { 1167 .fault = ocxlflash_mmap_fault, 1168 }; 1169 1170 /** 1171 * afu_mmap() - map the fault handler operations 1172 * @file: File associated with the context. 1173 * @vma: VM area associated with mapping. 1174 * 1175 * Return: 0 on success, -errno on failure 1176 */ 1177 static int afu_mmap(struct file *file, struct vm_area_struct *vma) 1178 { 1179 struct ocxlflash_context *ctx = file->private_data; 1180 1181 if ((vma_pages(vma) + vma->vm_pgoff) > 1182 (ctx->psn_size >> PAGE_SHIFT)) 1183 return -EINVAL; 1184 1185 vma->vm_flags |= VM_IO | VM_PFNMAP; 1186 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1187 vma->vm_ops = &ocxlflash_vmops; 1188 return 0; 1189 } 1190 1191 static const struct file_operations ocxl_afu_fops = { 1192 .owner = THIS_MODULE, 1193 .poll = afu_poll, 1194 .read = afu_read, 1195 .release = afu_release, 1196 .mmap = afu_mmap, 1197 }; 1198 1199 #define PATCH_FOPS(NAME) \ 1200 do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0) 1201 1202 /** 1203 * ocxlflash_get_fd() - get file descriptor for an adapter context 1204 * @ctx_cookie: Adapter context. 1205 * @fops: File operations to be associated. 1206 * @fd: File descriptor to be returned back. 1207 * 1208 * Return: pointer to the file on success, ERR_PTR on failure 1209 */ 1210 static struct file *ocxlflash_get_fd(void *ctx_cookie, 1211 struct file_operations *fops, int *fd) 1212 { 1213 struct ocxlflash_context *ctx = ctx_cookie; 1214 struct device *dev = ctx->hw_afu->dev; 1215 struct file *file; 1216 int flags, fdtmp; 1217 int rc = 0; 1218 char *name = NULL; 1219 1220 /* Only allow one fd per context */ 1221 if (ctx->mapping) { 1222 dev_err(dev, "%s: Context is already mapped to an fd\n", 1223 __func__); 1224 rc = -EEXIST; 1225 goto err1; 1226 } 1227 1228 flags = O_RDWR | O_CLOEXEC; 1229 1230 /* This code is similar to anon_inode_getfd() */ 1231 rc = get_unused_fd_flags(flags); 1232 if (unlikely(rc < 0)) { 1233 dev_err(dev, "%s: get_unused_fd_flags failed rc=%d\n", 1234 __func__, rc); 1235 goto err1; 1236 } 1237 fdtmp = rc; 1238 1239 /* Patch the file ops that are not defined */ 1240 if (fops) { 1241 PATCH_FOPS(poll); 1242 PATCH_FOPS(read); 1243 PATCH_FOPS(release); 1244 PATCH_FOPS(mmap); 1245 } else /* Use default ops */ 1246 fops = (struct file_operations *)&ocxl_afu_fops; 1247 1248 name = kasprintf(GFP_KERNEL, "ocxlflash:%d", ctx->pe); 1249 file = ocxlflash_getfile(dev, name, fops, ctx, flags); 1250 kfree(name); 1251 if (IS_ERR(file)) { 1252 rc = PTR_ERR(file); 1253 dev_err(dev, "%s: ocxlflash_getfile failed rc=%d\n", 1254 __func__, rc); 1255 goto err2; 1256 } 1257 1258 ctx->mapping = file->f_mapping; 1259 *fd = fdtmp; 1260 out: 1261 return file; 1262 err2: 1263 put_unused_fd(fdtmp); 1264 err1: 1265 file = ERR_PTR(rc); 1266 goto out; 1267 } 1268 1269 /** 1270 * ocxlflash_fops_get_context() - get the context associated with the file 1271 * @file: File associated with the adapter context. 1272 * 1273 * Return: pointer to the context 1274 */ 1275 static void *ocxlflash_fops_get_context(struct file *file) 1276 { 1277 return file->private_data; 1278 } 1279 1280 /** 1281 * ocxlflash_afu_irq() - interrupt handler for user contexts 1282 * @irq: Interrupt number. 1283 * @data: Private data provided at interrupt registration, the context. 1284 * 1285 * Return: Always return IRQ_HANDLED. 1286 */ 1287 static irqreturn_t ocxlflash_afu_irq(int irq, void *data) 1288 { 1289 struct ocxlflash_context *ctx = data; 1290 struct device *dev = ctx->hw_afu->dev; 1291 int i; 1292 1293 dev_dbg(dev, "%s: Interrupt raised for pe %i virq %i\n", 1294 __func__, ctx->pe, irq); 1295 1296 for (i = 0; i < ctx->num_irqs; i++) { 1297 if (ctx->irqs[i].virq == irq) 1298 break; 1299 } 1300 if (unlikely(i >= ctx->num_irqs)) { 1301 dev_err(dev, "%s: Received AFU IRQ out of range\n", __func__); 1302 goto out; 1303 } 1304 1305 spin_lock(&ctx->slock); 1306 set_bit(i - 1, &ctx->irq_bitmap); 1307 ctx->pending_irq = true; 1308 spin_unlock(&ctx->slock); 1309 1310 wake_up_all(&ctx->wq); 1311 out: 1312 return IRQ_HANDLED; 1313 } 1314 1315 /** 1316 * ocxlflash_start_work() - start a user context 1317 * @ctx_cookie: Context to be started. 1318 * @num_irqs: Number of interrupts requested. 1319 * 1320 * Return: 0 on success, -errno on failure 1321 */ 1322 static int ocxlflash_start_work(void *ctx_cookie, u64 num_irqs) 1323 { 1324 struct ocxlflash_context *ctx = ctx_cookie; 1325 struct ocxl_hw_afu *afu = ctx->hw_afu; 1326 struct device *dev = afu->dev; 1327 char *name; 1328 int rc = 0; 1329 int i; 1330 1331 rc = alloc_afu_irqs(ctx, num_irqs); 1332 if (unlikely(rc < 0)) { 1333 dev_err(dev, "%s: alloc_afu_irqs failed rc=%d\n", __func__, rc); 1334 goto out; 1335 } 1336 1337 for (i = 0; i < num_irqs; i++) { 1338 name = kasprintf(GFP_KERNEL, "ocxlflash-%s-pe%i-%i", 1339 dev_name(dev), ctx->pe, i); 1340 rc = afu_map_irq(0, ctx, i, ocxlflash_afu_irq, ctx, name); 1341 kfree(name); 1342 if (unlikely(rc < 0)) { 1343 dev_err(dev, "%s: afu_map_irq failed rc=%d\n", 1344 __func__, rc); 1345 goto err; 1346 } 1347 } 1348 1349 rc = start_context(ctx); 1350 if (unlikely(rc)) { 1351 dev_err(dev, "%s: start_context failed rc=%d\n", __func__, rc); 1352 goto err; 1353 } 1354 out: 1355 return rc; 1356 err: 1357 for (i = i-1; i >= 0; i--) 1358 afu_unmap_irq(0, ctx, i, ctx); 1359 free_afu_irqs(ctx); 1360 goto out; 1361 }; 1362 1363 /** 1364 * ocxlflash_fd_mmap() - mmap handler for adapter file descriptor 1365 * @file: File installed with adapter file descriptor. 1366 * @vma: VM area associated with mapping. 1367 * 1368 * Return: 0 on success, -errno on failure 1369 */ 1370 static int ocxlflash_fd_mmap(struct file *file, struct vm_area_struct *vma) 1371 { 1372 return afu_mmap(file, vma); 1373 } 1374 1375 /** 1376 * ocxlflash_fd_release() - release the context associated with the file 1377 * @inode: File inode pointer. 1378 * @file: File associated with the adapter context. 1379 * 1380 * Return: 0 on success, -errno on failure 1381 */ 1382 static int ocxlflash_fd_release(struct inode *inode, struct file *file) 1383 { 1384 return afu_release(inode, file); 1385 } 1386 1387 /* Backend ops to ocxlflash services */ 1388 const struct cxlflash_backend_ops cxlflash_ocxl_ops = { 1389 .module = THIS_MODULE, 1390 .psa_map = ocxlflash_psa_map, 1391 .psa_unmap = ocxlflash_psa_unmap, 1392 .process_element = ocxlflash_process_element, 1393 .map_afu_irq = ocxlflash_map_afu_irq, 1394 .unmap_afu_irq = ocxlflash_unmap_afu_irq, 1395 .get_irq_objhndl = ocxlflash_get_irq_objhndl, 1396 .start_context = ocxlflash_start_context, 1397 .stop_context = ocxlflash_stop_context, 1398 .afu_reset = ocxlflash_afu_reset, 1399 .set_master = ocxlflash_set_master, 1400 .get_context = ocxlflash_get_context, 1401 .dev_context_init = ocxlflash_dev_context_init, 1402 .release_context = ocxlflash_release_context, 1403 .perst_reloads_same_image = ocxlflash_perst_reloads_same_image, 1404 .read_adapter_vpd = ocxlflash_read_adapter_vpd, 1405 .allocate_afu_irqs = ocxlflash_allocate_afu_irqs, 1406 .free_afu_irqs = ocxlflash_free_afu_irqs, 1407 .create_afu = ocxlflash_create_afu, 1408 .destroy_afu = ocxlflash_destroy_afu, 1409 .get_fd = ocxlflash_get_fd, 1410 .fops_get_context = ocxlflash_fops_get_context, 1411 .start_work = ocxlflash_start_work, 1412 .fd_mmap = ocxlflash_fd_mmap, 1413 .fd_release = ocxlflash_fd_release, 1414 }; 1415