xref: /linux/drivers/scsi/cxlflash/ocxl_hw.c (revision a1c3be890440a1769ed6f822376a3e3ab0d42994)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * CXL Flash Device Driver
4  *
5  * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6  *             Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
7  *
8  * Copyright (C) 2018 IBM Corporation
9  */
10 
11 #include <linux/file.h>
12 #include <linux/idr.h>
13 #include <linux/module.h>
14 #include <linux/mount.h>
15 #include <linux/pseudo_fs.h>
16 #include <linux/poll.h>
17 #include <linux/sched/signal.h>
18 #include <linux/interrupt.h>
19 #include <asm/xive.h>
20 #include <misc/ocxl.h>
21 
22 #include <uapi/misc/cxl.h>
23 
24 #include "backend.h"
25 #include "ocxl_hw.h"
26 
27 /*
28  * Pseudo-filesystem to allocate inodes.
29  */
30 
31 #define OCXLFLASH_FS_MAGIC      0x1697698f
32 
33 static int ocxlflash_fs_cnt;
34 static struct vfsmount *ocxlflash_vfs_mount;
35 
36 static int ocxlflash_fs_init_fs_context(struct fs_context *fc)
37 {
38 	return init_pseudo(fc, OCXLFLASH_FS_MAGIC) ? 0 : -ENOMEM;
39 }
40 
41 static struct file_system_type ocxlflash_fs_type = {
42 	.name		= "ocxlflash",
43 	.owner		= THIS_MODULE,
44 	.init_fs_context = ocxlflash_fs_init_fs_context,
45 	.kill_sb	= kill_anon_super,
46 };
47 
48 /*
49  * ocxlflash_release_mapping() - release the memory mapping
50  * @ctx:	Context whose mapping is to be released.
51  */
52 static void ocxlflash_release_mapping(struct ocxlflash_context *ctx)
53 {
54 	if (ctx->mapping)
55 		simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
56 	ctx->mapping = NULL;
57 }
58 
59 /*
60  * ocxlflash_getfile() - allocate pseudo filesystem, inode, and the file
61  * @dev:	Generic device of the host.
62  * @name:	Name of the pseudo filesystem.
63  * @fops:	File operations.
64  * @priv:	Private data.
65  * @flags:	Flags for the file.
66  *
67  * Return: pointer to the file on success, ERR_PTR on failure
68  */
69 static struct file *ocxlflash_getfile(struct device *dev, const char *name,
70 				      const struct file_operations *fops,
71 				      void *priv, int flags)
72 {
73 	struct file *file;
74 	struct inode *inode;
75 	int rc;
76 
77 	if (fops->owner && !try_module_get(fops->owner)) {
78 		dev_err(dev, "%s: Owner does not exist\n", __func__);
79 		rc = -ENOENT;
80 		goto err1;
81 	}
82 
83 	rc = simple_pin_fs(&ocxlflash_fs_type, &ocxlflash_vfs_mount,
84 			   &ocxlflash_fs_cnt);
85 	if (unlikely(rc < 0)) {
86 		dev_err(dev, "%s: Cannot mount ocxlflash pseudofs rc=%d\n",
87 			__func__, rc);
88 		goto err2;
89 	}
90 
91 	inode = alloc_anon_inode(ocxlflash_vfs_mount->mnt_sb);
92 	if (IS_ERR(inode)) {
93 		rc = PTR_ERR(inode);
94 		dev_err(dev, "%s: alloc_anon_inode failed rc=%d\n",
95 			__func__, rc);
96 		goto err3;
97 	}
98 
99 	file = alloc_file_pseudo(inode, ocxlflash_vfs_mount, name,
100 				 flags & (O_ACCMODE | O_NONBLOCK), fops);
101 	if (IS_ERR(file)) {
102 		rc = PTR_ERR(file);
103 		dev_err(dev, "%s: alloc_file failed rc=%d\n",
104 			__func__, rc);
105 		goto err4;
106 	}
107 
108 	file->private_data = priv;
109 out:
110 	return file;
111 err4:
112 	iput(inode);
113 err3:
114 	simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
115 err2:
116 	module_put(fops->owner);
117 err1:
118 	file = ERR_PTR(rc);
119 	goto out;
120 }
121 
122 /**
123  * ocxlflash_psa_map() - map the process specific MMIO space
124  * @ctx_cookie:	Adapter context for which the mapping needs to be done.
125  *
126  * Return: MMIO pointer of the mapped region
127  */
128 static void __iomem *ocxlflash_psa_map(void *ctx_cookie)
129 {
130 	struct ocxlflash_context *ctx = ctx_cookie;
131 	struct device *dev = ctx->hw_afu->dev;
132 
133 	mutex_lock(&ctx->state_mutex);
134 	if (ctx->state != STARTED) {
135 		dev_err(dev, "%s: Context not started, state=%d\n", __func__,
136 			ctx->state);
137 		mutex_unlock(&ctx->state_mutex);
138 		return NULL;
139 	}
140 	mutex_unlock(&ctx->state_mutex);
141 
142 	return ioremap(ctx->psn_phys, ctx->psn_size);
143 }
144 
145 /**
146  * ocxlflash_psa_unmap() - unmap the process specific MMIO space
147  * @addr:	MMIO pointer to unmap.
148  */
149 static void ocxlflash_psa_unmap(void __iomem *addr)
150 {
151 	iounmap(addr);
152 }
153 
154 /**
155  * ocxlflash_process_element() - get process element of the adapter context
156  * @ctx_cookie:	Adapter context associated with the process element.
157  *
158  * Return: process element of the adapter context
159  */
160 static int ocxlflash_process_element(void *ctx_cookie)
161 {
162 	struct ocxlflash_context *ctx = ctx_cookie;
163 
164 	return ctx->pe;
165 }
166 
167 /**
168  * afu_map_irq() - map the interrupt of the adapter context
169  * @flags:	Flags.
170  * @ctx:	Adapter context.
171  * @num:	Per-context AFU interrupt number.
172  * @handler:	Interrupt handler to register.
173  * @cookie:	Interrupt handler private data.
174  * @name:	Name of the interrupt.
175  *
176  * Return: 0 on success, -errno on failure
177  */
178 static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num,
179 		       irq_handler_t handler, void *cookie, char *name)
180 {
181 	struct ocxl_hw_afu *afu = ctx->hw_afu;
182 	struct device *dev = afu->dev;
183 	struct ocxlflash_irqs *irq;
184 	struct xive_irq_data *xd;
185 	u32 virq;
186 	int rc = 0;
187 
188 	if (num < 0 || num >= ctx->num_irqs) {
189 		dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
190 		rc = -ENOENT;
191 		goto out;
192 	}
193 
194 	irq = &ctx->irqs[num];
195 	virq = irq_create_mapping(NULL, irq->hwirq);
196 	if (unlikely(!virq)) {
197 		dev_err(dev, "%s: irq_create_mapping failed\n", __func__);
198 		rc = -ENOMEM;
199 		goto out;
200 	}
201 
202 	rc = request_irq(virq, handler, 0, name, cookie);
203 	if (unlikely(rc)) {
204 		dev_err(dev, "%s: request_irq failed rc=%d\n", __func__, rc);
205 		goto err1;
206 	}
207 
208 	xd = irq_get_handler_data(virq);
209 	if (unlikely(!xd)) {
210 		dev_err(dev, "%s: Can't get interrupt data\n", __func__);
211 		rc = -ENXIO;
212 		goto err2;
213 	}
214 
215 	irq->virq = virq;
216 	irq->vtrig = xd->trig_mmio;
217 out:
218 	return rc;
219 err2:
220 	free_irq(virq, cookie);
221 err1:
222 	irq_dispose_mapping(virq);
223 	goto out;
224 }
225 
226 /**
227  * ocxlflash_map_afu_irq() - map the interrupt of the adapter context
228  * @ctx_cookie:	Adapter context.
229  * @num:	Per-context AFU interrupt number.
230  * @handler:	Interrupt handler to register.
231  * @cookie:	Interrupt handler private data.
232  * @name:	Name of the interrupt.
233  *
234  * Return: 0 on success, -errno on failure
235  */
236 static int ocxlflash_map_afu_irq(void *ctx_cookie, int num,
237 				 irq_handler_t handler, void *cookie,
238 				 char *name)
239 {
240 	return afu_map_irq(0, ctx_cookie, num, handler, cookie, name);
241 }
242 
243 /**
244  * afu_unmap_irq() - unmap the interrupt
245  * @flags:	Flags.
246  * @ctx:	Adapter context.
247  * @num:	Per-context AFU interrupt number.
248  * @cookie:	Interrupt handler private data.
249  */
250 static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num,
251 			  void *cookie)
252 {
253 	struct ocxl_hw_afu *afu = ctx->hw_afu;
254 	struct device *dev = afu->dev;
255 	struct ocxlflash_irqs *irq;
256 
257 	if (num < 0 || num >= ctx->num_irqs) {
258 		dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
259 		return;
260 	}
261 
262 	irq = &ctx->irqs[num];
263 
264 	if (irq_find_mapping(NULL, irq->hwirq)) {
265 		free_irq(irq->virq, cookie);
266 		irq_dispose_mapping(irq->virq);
267 	}
268 
269 	memset(irq, 0, sizeof(*irq));
270 }
271 
272 /**
273  * ocxlflash_unmap_afu_irq() - unmap the interrupt
274  * @ctx_cookie:	Adapter context.
275  * @num:	Per-context AFU interrupt number.
276  * @cookie:	Interrupt handler private data.
277  */
278 static void ocxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
279 {
280 	return afu_unmap_irq(0, ctx_cookie, num, cookie);
281 }
282 
283 /**
284  * ocxlflash_get_irq_objhndl() - get the object handle for an interrupt
285  * @ctx_cookie:	Context associated with the interrupt.
286  * @irq:	Interrupt number.
287  *
288  * Return: effective address of the mapped region
289  */
290 static u64 ocxlflash_get_irq_objhndl(void *ctx_cookie, int irq)
291 {
292 	struct ocxlflash_context *ctx = ctx_cookie;
293 
294 	if (irq < 0 || irq >= ctx->num_irqs)
295 		return 0;
296 
297 	return (__force u64)ctx->irqs[irq].vtrig;
298 }
299 
300 /**
301  * ocxlflash_xsl_fault() - callback when translation error is triggered
302  * @data:	Private data provided at callback registration, the context.
303  * @addr:	Address that triggered the error.
304  * @dsisr:	Value of dsisr register.
305  */
306 static void ocxlflash_xsl_fault(void *data, u64 addr, u64 dsisr)
307 {
308 	struct ocxlflash_context *ctx = data;
309 
310 	spin_lock(&ctx->slock);
311 	ctx->fault_addr = addr;
312 	ctx->fault_dsisr = dsisr;
313 	ctx->pending_fault = true;
314 	spin_unlock(&ctx->slock);
315 
316 	wake_up_all(&ctx->wq);
317 }
318 
319 /**
320  * start_context() - local routine to start a context
321  * @ctx:	Adapter context to be started.
322  *
323  * Assign the context specific MMIO space, add and enable the PE.
324  *
325  * Return: 0 on success, -errno on failure
326  */
327 static int start_context(struct ocxlflash_context *ctx)
328 {
329 	struct ocxl_hw_afu *afu = ctx->hw_afu;
330 	struct ocxl_afu_config *acfg = &afu->acfg;
331 	void *link_token = afu->link_token;
332 	struct pci_dev *pdev = afu->pdev;
333 	struct device *dev = afu->dev;
334 	bool master = ctx->master;
335 	struct mm_struct *mm;
336 	int rc = 0;
337 	u32 pid;
338 
339 	mutex_lock(&ctx->state_mutex);
340 	if (ctx->state != OPENED) {
341 		dev_err(dev, "%s: Context state invalid, state=%d\n",
342 			__func__, ctx->state);
343 		rc = -EINVAL;
344 		goto out;
345 	}
346 
347 	if (master) {
348 		ctx->psn_size = acfg->global_mmio_size;
349 		ctx->psn_phys = afu->gmmio_phys;
350 	} else {
351 		ctx->psn_size = acfg->pp_mmio_stride;
352 		ctx->psn_phys = afu->ppmmio_phys + (ctx->pe * ctx->psn_size);
353 	}
354 
355 	/* pid and mm not set for master contexts */
356 	if (master) {
357 		pid = 0;
358 		mm = NULL;
359 	} else {
360 		pid = current->mm->context.id;
361 		mm = current->mm;
362 	}
363 
364 	rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0,
365 			      pci_dev_id(pdev), mm, ocxlflash_xsl_fault,
366 			      ctx);
367 	if (unlikely(rc)) {
368 		dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n",
369 			__func__, rc);
370 		goto out;
371 	}
372 
373 	ctx->state = STARTED;
374 out:
375 	mutex_unlock(&ctx->state_mutex);
376 	return rc;
377 }
378 
379 /**
380  * ocxlflash_start_context() - start a kernel context
381  * @ctx_cookie:	Adapter context to be started.
382  *
383  * Return: 0 on success, -errno on failure
384  */
385 static int ocxlflash_start_context(void *ctx_cookie)
386 {
387 	struct ocxlflash_context *ctx = ctx_cookie;
388 
389 	return start_context(ctx);
390 }
391 
392 /**
393  * ocxlflash_stop_context() - stop a context
394  * @ctx_cookie:	Adapter context to be stopped.
395  *
396  * Return: 0 on success, -errno on failure
397  */
398 static int ocxlflash_stop_context(void *ctx_cookie)
399 {
400 	struct ocxlflash_context *ctx = ctx_cookie;
401 	struct ocxl_hw_afu *afu = ctx->hw_afu;
402 	struct ocxl_afu_config *acfg = &afu->acfg;
403 	struct pci_dev *pdev = afu->pdev;
404 	struct device *dev = afu->dev;
405 	enum ocxlflash_ctx_state state;
406 	int rc = 0;
407 
408 	mutex_lock(&ctx->state_mutex);
409 	state = ctx->state;
410 	ctx->state = CLOSED;
411 	mutex_unlock(&ctx->state_mutex);
412 	if (state != STARTED)
413 		goto out;
414 
415 	rc = ocxl_config_terminate_pasid(pdev, acfg->dvsec_afu_control_pos,
416 					 ctx->pe);
417 	if (unlikely(rc)) {
418 		dev_err(dev, "%s: ocxl_config_terminate_pasid failed rc=%d\n",
419 			__func__, rc);
420 		/* If EBUSY, PE could be referenced in future by the AFU */
421 		if (rc == -EBUSY)
422 			goto out;
423 	}
424 
425 	rc = ocxl_link_remove_pe(afu->link_token, ctx->pe);
426 	if (unlikely(rc)) {
427 		dev_err(dev, "%s: ocxl_link_remove_pe failed rc=%d\n",
428 			__func__, rc);
429 		goto out;
430 	}
431 out:
432 	return rc;
433 }
434 
435 /**
436  * ocxlflash_afu_reset() - reset the AFU
437  * @ctx_cookie:	Adapter context.
438  */
439 static int ocxlflash_afu_reset(void *ctx_cookie)
440 {
441 	struct ocxlflash_context *ctx = ctx_cookie;
442 	struct device *dev = ctx->hw_afu->dev;
443 
444 	/* Pending implementation from OCXL transport services */
445 	dev_err_once(dev, "%s: afu_reset() fop not supported\n", __func__);
446 
447 	/* Silently return success until it is implemented */
448 	return 0;
449 }
450 
451 /**
452  * ocxlflash_set_master() - sets the context as master
453  * @ctx_cookie:	Adapter context to set as master.
454  */
455 static void ocxlflash_set_master(void *ctx_cookie)
456 {
457 	struct ocxlflash_context *ctx = ctx_cookie;
458 
459 	ctx->master = true;
460 }
461 
462 /**
463  * ocxlflash_get_context() - obtains the context associated with the host
464  * @pdev:	PCI device associated with the host.
465  * @afu_cookie:	Hardware AFU associated with the host.
466  *
467  * Return: returns the pointer to host adapter context
468  */
469 static void *ocxlflash_get_context(struct pci_dev *pdev, void *afu_cookie)
470 {
471 	struct ocxl_hw_afu *afu = afu_cookie;
472 
473 	return afu->ocxl_ctx;
474 }
475 
476 /**
477  * ocxlflash_dev_context_init() - allocate and initialize an adapter context
478  * @pdev:	PCI device associated with the host.
479  * @afu_cookie:	Hardware AFU associated with the host.
480  *
481  * Return: returns the adapter context on success, ERR_PTR on failure
482  */
483 static void *ocxlflash_dev_context_init(struct pci_dev *pdev, void *afu_cookie)
484 {
485 	struct ocxl_hw_afu *afu = afu_cookie;
486 	struct device *dev = afu->dev;
487 	struct ocxlflash_context *ctx;
488 	int rc;
489 
490 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
491 	if (unlikely(!ctx)) {
492 		dev_err(dev, "%s: Context allocation failed\n", __func__);
493 		rc = -ENOMEM;
494 		goto err1;
495 	}
496 
497 	idr_preload(GFP_KERNEL);
498 	rc = idr_alloc(&afu->idr, ctx, 0, afu->max_pasid, GFP_NOWAIT);
499 	idr_preload_end();
500 	if (unlikely(rc < 0)) {
501 		dev_err(dev, "%s: idr_alloc failed rc=%d\n", __func__, rc);
502 		goto err2;
503 	}
504 
505 	spin_lock_init(&ctx->slock);
506 	init_waitqueue_head(&ctx->wq);
507 	mutex_init(&ctx->state_mutex);
508 
509 	ctx->state = OPENED;
510 	ctx->pe = rc;
511 	ctx->master = false;
512 	ctx->mapping = NULL;
513 	ctx->hw_afu = afu;
514 	ctx->irq_bitmap = 0;
515 	ctx->pending_irq = false;
516 	ctx->pending_fault = false;
517 out:
518 	return ctx;
519 err2:
520 	kfree(ctx);
521 err1:
522 	ctx = ERR_PTR(rc);
523 	goto out;
524 }
525 
526 /**
527  * ocxlflash_release_context() - releases an adapter context
528  * @ctx_cookie:	Adapter context to be released.
529  *
530  * Return: 0 on success, -errno on failure
531  */
532 static int ocxlflash_release_context(void *ctx_cookie)
533 {
534 	struct ocxlflash_context *ctx = ctx_cookie;
535 	struct device *dev;
536 	int rc = 0;
537 
538 	if (!ctx)
539 		goto out;
540 
541 	dev = ctx->hw_afu->dev;
542 	mutex_lock(&ctx->state_mutex);
543 	if (ctx->state >= STARTED) {
544 		dev_err(dev, "%s: Context in use, state=%d\n", __func__,
545 			ctx->state);
546 		mutex_unlock(&ctx->state_mutex);
547 		rc = -EBUSY;
548 		goto out;
549 	}
550 	mutex_unlock(&ctx->state_mutex);
551 
552 	idr_remove(&ctx->hw_afu->idr, ctx->pe);
553 	ocxlflash_release_mapping(ctx);
554 	kfree(ctx);
555 out:
556 	return rc;
557 }
558 
559 /**
560  * ocxlflash_perst_reloads_same_image() - sets the image reload policy
561  * @afu_cookie:	Hardware AFU associated with the host.
562  * @image:	Whether to load the same image on PERST.
563  */
564 static void ocxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
565 {
566 	struct ocxl_hw_afu *afu = afu_cookie;
567 
568 	afu->perst_same_image = image;
569 }
570 
571 /**
572  * ocxlflash_read_adapter_vpd() - reads the adapter VPD
573  * @pdev:	PCI device associated with the host.
574  * @buf:	Buffer to get the VPD data.
575  * @count:	Size of buffer (maximum bytes that can be read).
576  *
577  * Return: size of VPD on success, -errno on failure
578  */
579 static ssize_t ocxlflash_read_adapter_vpd(struct pci_dev *pdev, void *buf,
580 					  size_t count)
581 {
582 	return pci_read_vpd(pdev, 0, count, buf);
583 }
584 
585 /**
586  * free_afu_irqs() - internal service to free interrupts
587  * @ctx:	Adapter context.
588  */
589 static void free_afu_irqs(struct ocxlflash_context *ctx)
590 {
591 	struct ocxl_hw_afu *afu = ctx->hw_afu;
592 	struct device *dev = afu->dev;
593 	int i;
594 
595 	if (!ctx->irqs) {
596 		dev_err(dev, "%s: Interrupts not allocated\n", __func__);
597 		return;
598 	}
599 
600 	for (i = ctx->num_irqs; i >= 0; i--)
601 		ocxl_link_free_irq(afu->link_token, ctx->irqs[i].hwirq);
602 
603 	kfree(ctx->irqs);
604 	ctx->irqs = NULL;
605 }
606 
607 /**
608  * alloc_afu_irqs() - internal service to allocate interrupts
609  * @ctx:	Context associated with the request.
610  * @num:	Number of interrupts requested.
611  *
612  * Return: 0 on success, -errno on failure
613  */
614 static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num)
615 {
616 	struct ocxl_hw_afu *afu = ctx->hw_afu;
617 	struct device *dev = afu->dev;
618 	struct ocxlflash_irqs *irqs;
619 	int rc = 0;
620 	int hwirq;
621 	int i;
622 
623 	if (ctx->irqs) {
624 		dev_err(dev, "%s: Interrupts already allocated\n", __func__);
625 		rc = -EEXIST;
626 		goto out;
627 	}
628 
629 	if (num > OCXL_MAX_IRQS) {
630 		dev_err(dev, "%s: Too many interrupts num=%d\n", __func__, num);
631 		rc = -EINVAL;
632 		goto out;
633 	}
634 
635 	irqs = kcalloc(num, sizeof(*irqs), GFP_KERNEL);
636 	if (unlikely(!irqs)) {
637 		dev_err(dev, "%s: Context irqs allocation failed\n", __func__);
638 		rc = -ENOMEM;
639 		goto out;
640 	}
641 
642 	for (i = 0; i < num; i++) {
643 		rc = ocxl_link_irq_alloc(afu->link_token, &hwirq);
644 		if (unlikely(rc)) {
645 			dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n",
646 				__func__, rc);
647 			goto err;
648 		}
649 
650 		irqs[i].hwirq = hwirq;
651 	}
652 
653 	ctx->irqs = irqs;
654 	ctx->num_irqs = num;
655 out:
656 	return rc;
657 err:
658 	for (i = i-1; i >= 0; i--)
659 		ocxl_link_free_irq(afu->link_token, irqs[i].hwirq);
660 	kfree(irqs);
661 	goto out;
662 }
663 
664 /**
665  * ocxlflash_allocate_afu_irqs() - allocates the requested number of interrupts
666  * @ctx_cookie:	Context associated with the request.
667  * @num:	Number of interrupts requested.
668  *
669  * Return: 0 on success, -errno on failure
670  */
671 static int ocxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
672 {
673 	return alloc_afu_irqs(ctx_cookie, num);
674 }
675 
676 /**
677  * ocxlflash_free_afu_irqs() - frees the interrupts of an adapter context
678  * @ctx_cookie:	Adapter context.
679  */
680 static void ocxlflash_free_afu_irqs(void *ctx_cookie)
681 {
682 	free_afu_irqs(ctx_cookie);
683 }
684 
685 /**
686  * ocxlflash_unconfig_afu() - unconfigure the AFU
687  * @afu: AFU associated with the host.
688  */
689 static void ocxlflash_unconfig_afu(struct ocxl_hw_afu *afu)
690 {
691 	if (afu->gmmio_virt) {
692 		iounmap(afu->gmmio_virt);
693 		afu->gmmio_virt = NULL;
694 	}
695 }
696 
697 /**
698  * ocxlflash_destroy_afu() - destroy the AFU structure
699  * @afu_cookie:	AFU to be freed.
700  */
701 static void ocxlflash_destroy_afu(void *afu_cookie)
702 {
703 	struct ocxl_hw_afu *afu = afu_cookie;
704 	int pos;
705 
706 	if (!afu)
707 		return;
708 
709 	ocxlflash_release_context(afu->ocxl_ctx);
710 	idr_destroy(&afu->idr);
711 
712 	/* Disable the AFU */
713 	pos = afu->acfg.dvsec_afu_control_pos;
714 	ocxl_config_set_afu_state(afu->pdev, pos, 0);
715 
716 	ocxlflash_unconfig_afu(afu);
717 	kfree(afu);
718 }
719 
720 /**
721  * ocxlflash_config_fn() - configure the host function
722  * @pdev:	PCI device associated with the host.
723  * @afu:	AFU associated with the host.
724  *
725  * Return: 0 on success, -errno on failure
726  */
727 static int ocxlflash_config_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
728 {
729 	struct ocxl_fn_config *fcfg = &afu->fcfg;
730 	struct device *dev = &pdev->dev;
731 	u16 base, enabled, supported;
732 	int rc = 0;
733 
734 	/* Read DVSEC config of the function */
735 	rc = ocxl_config_read_function(pdev, fcfg);
736 	if (unlikely(rc)) {
737 		dev_err(dev, "%s: ocxl_config_read_function failed rc=%d\n",
738 			__func__, rc);
739 		goto out;
740 	}
741 
742 	/* Check if function has AFUs defined, only 1 per function supported */
743 	if (fcfg->max_afu_index >= 0) {
744 		afu->is_present = true;
745 		if (fcfg->max_afu_index != 0)
746 			dev_warn(dev, "%s: Unexpected AFU index value %d\n",
747 				 __func__, fcfg->max_afu_index);
748 	}
749 
750 	rc = ocxl_config_get_actag_info(pdev, &base, &enabled, &supported);
751 	if (unlikely(rc)) {
752 		dev_err(dev, "%s: ocxl_config_get_actag_info failed rc=%d\n",
753 			__func__, rc);
754 		goto out;
755 	}
756 
757 	afu->fn_actag_base = base;
758 	afu->fn_actag_enabled = enabled;
759 
760 	ocxl_config_set_actag(pdev, fcfg->dvsec_function_pos, base, enabled);
761 	dev_dbg(dev, "%s: Function acTag range base=%u enabled=%u\n",
762 		__func__, base, enabled);
763 
764 	rc = ocxl_link_setup(pdev, 0, &afu->link_token);
765 	if (unlikely(rc)) {
766 		dev_err(dev, "%s: ocxl_link_setup failed rc=%d\n",
767 			__func__, rc);
768 		goto out;
769 	}
770 
771 	rc = ocxl_config_set_TL(pdev, fcfg->dvsec_tl_pos);
772 	if (unlikely(rc)) {
773 		dev_err(dev, "%s: ocxl_config_set_TL failed rc=%d\n",
774 			__func__, rc);
775 		goto err;
776 	}
777 out:
778 	return rc;
779 err:
780 	ocxl_link_release(pdev, afu->link_token);
781 	goto out;
782 }
783 
784 /**
785  * ocxlflash_unconfig_fn() - unconfigure the host function
786  * @pdev:	PCI device associated with the host.
787  * @afu:	AFU associated with the host.
788  */
789 static void ocxlflash_unconfig_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
790 {
791 	ocxl_link_release(pdev, afu->link_token);
792 }
793 
794 /**
795  * ocxlflash_map_mmio() - map the AFU MMIO space
796  * @afu: AFU associated with the host.
797  *
798  * Return: 0 on success, -errno on failure
799  */
800 static int ocxlflash_map_mmio(struct ocxl_hw_afu *afu)
801 {
802 	struct ocxl_afu_config *acfg = &afu->acfg;
803 	struct pci_dev *pdev = afu->pdev;
804 	struct device *dev = afu->dev;
805 	phys_addr_t gmmio, ppmmio;
806 	int rc = 0;
807 
808 	rc = pci_request_region(pdev, acfg->global_mmio_bar, "ocxlflash");
809 	if (unlikely(rc)) {
810 		dev_err(dev, "%s: pci_request_region for global failed rc=%d\n",
811 			__func__, rc);
812 		goto out;
813 	}
814 	gmmio = pci_resource_start(pdev, acfg->global_mmio_bar);
815 	gmmio += acfg->global_mmio_offset;
816 
817 	rc = pci_request_region(pdev, acfg->pp_mmio_bar, "ocxlflash");
818 	if (unlikely(rc)) {
819 		dev_err(dev, "%s: pci_request_region for pp bar failed rc=%d\n",
820 			__func__, rc);
821 		goto err1;
822 	}
823 	ppmmio = pci_resource_start(pdev, acfg->pp_mmio_bar);
824 	ppmmio += acfg->pp_mmio_offset;
825 
826 	afu->gmmio_virt = ioremap(gmmio, acfg->global_mmio_size);
827 	if (unlikely(!afu->gmmio_virt)) {
828 		dev_err(dev, "%s: MMIO mapping failed\n", __func__);
829 		rc = -ENOMEM;
830 		goto err2;
831 	}
832 
833 	afu->gmmio_phys = gmmio;
834 	afu->ppmmio_phys = ppmmio;
835 out:
836 	return rc;
837 err2:
838 	pci_release_region(pdev, acfg->pp_mmio_bar);
839 err1:
840 	pci_release_region(pdev, acfg->global_mmio_bar);
841 	goto out;
842 }
843 
844 /**
845  * ocxlflash_config_afu() - configure the host AFU
846  * @pdev:	PCI device associated with the host.
847  * @afu:	AFU associated with the host.
848  *
849  * Must be called _after_ host function configuration.
850  *
851  * Return: 0 on success, -errno on failure
852  */
853 static int ocxlflash_config_afu(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
854 {
855 	struct ocxl_afu_config *acfg = &afu->acfg;
856 	struct ocxl_fn_config *fcfg = &afu->fcfg;
857 	struct device *dev = &pdev->dev;
858 	int count;
859 	int base;
860 	int pos;
861 	int rc = 0;
862 
863 	/* This HW AFU function does not have any AFUs defined */
864 	if (!afu->is_present)
865 		goto out;
866 
867 	/* Read AFU config at index 0 */
868 	rc = ocxl_config_read_afu(pdev, fcfg, acfg, 0);
869 	if (unlikely(rc)) {
870 		dev_err(dev, "%s: ocxl_config_read_afu failed rc=%d\n",
871 			__func__, rc);
872 		goto out;
873 	}
874 
875 	/* Only one AFU per function is supported, so actag_base is same */
876 	base = afu->fn_actag_base;
877 	count = min_t(int, acfg->actag_supported, afu->fn_actag_enabled);
878 	pos = acfg->dvsec_afu_control_pos;
879 
880 	ocxl_config_set_afu_actag(pdev, pos, base, count);
881 	dev_dbg(dev, "%s: acTag base=%d enabled=%d\n", __func__, base, count);
882 	afu->afu_actag_base = base;
883 	afu->afu_actag_enabled = count;
884 	afu->max_pasid = 1 << acfg->pasid_supported_log;
885 
886 	ocxl_config_set_afu_pasid(pdev, pos, 0, acfg->pasid_supported_log);
887 
888 	rc = ocxlflash_map_mmio(afu);
889 	if (unlikely(rc)) {
890 		dev_err(dev, "%s: ocxlflash_map_mmio failed rc=%d\n",
891 			__func__, rc);
892 		goto out;
893 	}
894 
895 	/* Enable the AFU */
896 	ocxl_config_set_afu_state(pdev, acfg->dvsec_afu_control_pos, 1);
897 out:
898 	return rc;
899 }
900 
901 /**
902  * ocxlflash_create_afu() - create the AFU for OCXL
903  * @pdev:	PCI device associated with the host.
904  *
905  * Return: AFU on success, NULL on failure
906  */
907 static void *ocxlflash_create_afu(struct pci_dev *pdev)
908 {
909 	struct device *dev = &pdev->dev;
910 	struct ocxlflash_context *ctx;
911 	struct ocxl_hw_afu *afu;
912 	int rc;
913 
914 	afu = kzalloc(sizeof(*afu), GFP_KERNEL);
915 	if (unlikely(!afu)) {
916 		dev_err(dev, "%s: HW AFU allocation failed\n", __func__);
917 		goto out;
918 	}
919 
920 	afu->pdev = pdev;
921 	afu->dev = dev;
922 	idr_init(&afu->idr);
923 
924 	rc = ocxlflash_config_fn(pdev, afu);
925 	if (unlikely(rc)) {
926 		dev_err(dev, "%s: Function configuration failed rc=%d\n",
927 			__func__, rc);
928 		goto err1;
929 	}
930 
931 	rc = ocxlflash_config_afu(pdev, afu);
932 	if (unlikely(rc)) {
933 		dev_err(dev, "%s: AFU configuration failed rc=%d\n",
934 			__func__, rc);
935 		goto err2;
936 	}
937 
938 	ctx = ocxlflash_dev_context_init(pdev, afu);
939 	if (IS_ERR(ctx)) {
940 		rc = PTR_ERR(ctx);
941 		dev_err(dev, "%s: ocxlflash_dev_context_init failed rc=%d\n",
942 			__func__, rc);
943 		goto err3;
944 	}
945 
946 	afu->ocxl_ctx = ctx;
947 out:
948 	return afu;
949 err3:
950 	ocxlflash_unconfig_afu(afu);
951 err2:
952 	ocxlflash_unconfig_fn(pdev, afu);
953 err1:
954 	idr_destroy(&afu->idr);
955 	kfree(afu);
956 	afu = NULL;
957 	goto out;
958 }
959 
960 /**
961  * ctx_event_pending() - check for any event pending on the context
962  * @ctx:	Context to be checked.
963  *
964  * Return: true if there is an event pending, false if none pending
965  */
966 static inline bool ctx_event_pending(struct ocxlflash_context *ctx)
967 {
968 	if (ctx->pending_irq || ctx->pending_fault)
969 		return true;
970 
971 	return false;
972 }
973 
974 /**
975  * afu_poll() - poll the AFU for events on the context
976  * @file:	File associated with the adapter context.
977  * @poll:	Poll structure from the user.
978  *
979  * Return: poll mask
980  */
981 static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
982 {
983 	struct ocxlflash_context *ctx = file->private_data;
984 	struct device *dev = ctx->hw_afu->dev;
985 	ulong lock_flags;
986 	int mask = 0;
987 
988 	poll_wait(file, &ctx->wq, poll);
989 
990 	spin_lock_irqsave(&ctx->slock, lock_flags);
991 	if (ctx_event_pending(ctx))
992 		mask |= POLLIN | POLLRDNORM;
993 	else if (ctx->state == CLOSED)
994 		mask |= POLLERR;
995 	spin_unlock_irqrestore(&ctx->slock, lock_flags);
996 
997 	dev_dbg(dev, "%s: Poll wait completed for pe %i mask %i\n",
998 		__func__, ctx->pe, mask);
999 
1000 	return mask;
1001 }
1002 
1003 /**
1004  * afu_read() - perform a read on the context for any event
1005  * @file:	File associated with the adapter context.
1006  * @buf:	Buffer to receive the data.
1007  * @count:	Size of buffer (maximum bytes that can be read).
1008  * @off:	Offset.
1009  *
1010  * Return: size of the data read on success, -errno on failure
1011  */
1012 static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
1013 			loff_t *off)
1014 {
1015 	struct ocxlflash_context *ctx = file->private_data;
1016 	struct device *dev = ctx->hw_afu->dev;
1017 	struct cxl_event event;
1018 	ulong lock_flags;
1019 	ssize_t esize;
1020 	ssize_t rc;
1021 	int bit;
1022 	DEFINE_WAIT(event_wait);
1023 
1024 	if (*off != 0) {
1025 		dev_err(dev, "%s: Non-zero offset not supported, off=%lld\n",
1026 			__func__, *off);
1027 		rc = -EINVAL;
1028 		goto out;
1029 	}
1030 
1031 	spin_lock_irqsave(&ctx->slock, lock_flags);
1032 
1033 	for (;;) {
1034 		prepare_to_wait(&ctx->wq, &event_wait, TASK_INTERRUPTIBLE);
1035 
1036 		if (ctx_event_pending(ctx) || (ctx->state == CLOSED))
1037 			break;
1038 
1039 		if (file->f_flags & O_NONBLOCK) {
1040 			dev_err(dev, "%s: File cannot be blocked on I/O\n",
1041 				__func__);
1042 			rc = -EAGAIN;
1043 			goto err;
1044 		}
1045 
1046 		if (signal_pending(current)) {
1047 			dev_err(dev, "%s: Signal pending on the process\n",
1048 				__func__);
1049 			rc = -ERESTARTSYS;
1050 			goto err;
1051 		}
1052 
1053 		spin_unlock_irqrestore(&ctx->slock, lock_flags);
1054 		schedule();
1055 		spin_lock_irqsave(&ctx->slock, lock_flags);
1056 	}
1057 
1058 	finish_wait(&ctx->wq, &event_wait);
1059 
1060 	memset(&event, 0, sizeof(event));
1061 	event.header.process_element = ctx->pe;
1062 	event.header.size = sizeof(struct cxl_event_header);
1063 	if (ctx->pending_irq) {
1064 		esize = sizeof(struct cxl_event_afu_interrupt);
1065 		event.header.size += esize;
1066 		event.header.type = CXL_EVENT_AFU_INTERRUPT;
1067 
1068 		bit = find_first_bit(&ctx->irq_bitmap, ctx->num_irqs);
1069 		clear_bit(bit, &ctx->irq_bitmap);
1070 		event.irq.irq = bit + 1;
1071 		if (bitmap_empty(&ctx->irq_bitmap, ctx->num_irqs))
1072 			ctx->pending_irq = false;
1073 	} else if (ctx->pending_fault) {
1074 		event.header.size += sizeof(struct cxl_event_data_storage);
1075 		event.header.type = CXL_EVENT_DATA_STORAGE;
1076 		event.fault.addr = ctx->fault_addr;
1077 		event.fault.dsisr = ctx->fault_dsisr;
1078 		ctx->pending_fault = false;
1079 	}
1080 
1081 	spin_unlock_irqrestore(&ctx->slock, lock_flags);
1082 
1083 	if (copy_to_user(buf, &event, event.header.size)) {
1084 		dev_err(dev, "%s: copy_to_user failed\n", __func__);
1085 		rc = -EFAULT;
1086 		goto out;
1087 	}
1088 
1089 	rc = event.header.size;
1090 out:
1091 	return rc;
1092 err:
1093 	finish_wait(&ctx->wq, &event_wait);
1094 	spin_unlock_irqrestore(&ctx->slock, lock_flags);
1095 	goto out;
1096 }
1097 
1098 /**
1099  * afu_release() - release and free the context
1100  * @inode:	File inode pointer.
1101  * @file:	File associated with the context.
1102  *
1103  * Return: 0 on success, -errno on failure
1104  */
1105 static int afu_release(struct inode *inode, struct file *file)
1106 {
1107 	struct ocxlflash_context *ctx = file->private_data;
1108 	int i;
1109 
1110 	/* Unmap and free the interrupts associated with the context */
1111 	for (i = ctx->num_irqs; i >= 0; i--)
1112 		afu_unmap_irq(0, ctx, i, ctx);
1113 	free_afu_irqs(ctx);
1114 
1115 	return ocxlflash_release_context(ctx);
1116 }
1117 
1118 /**
1119  * ocxlflash_mmap_fault() - mmap fault handler
1120  * @vmf:	VM fault associated with current fault.
1121  *
1122  * Return: 0 on success, -errno on failure
1123  */
1124 static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf)
1125 {
1126 	struct vm_area_struct *vma = vmf->vma;
1127 	struct ocxlflash_context *ctx = vma->vm_file->private_data;
1128 	struct device *dev = ctx->hw_afu->dev;
1129 	u64 mmio_area, offset;
1130 
1131 	offset = vmf->pgoff << PAGE_SHIFT;
1132 	if (offset >= ctx->psn_size)
1133 		return VM_FAULT_SIGBUS;
1134 
1135 	mutex_lock(&ctx->state_mutex);
1136 	if (ctx->state != STARTED) {
1137 		dev_err(dev, "%s: Context not started, state=%d\n",
1138 			__func__, ctx->state);
1139 		mutex_unlock(&ctx->state_mutex);
1140 		return VM_FAULT_SIGBUS;
1141 	}
1142 	mutex_unlock(&ctx->state_mutex);
1143 
1144 	mmio_area = ctx->psn_phys;
1145 	mmio_area += offset;
1146 
1147 	return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
1148 }
1149 
1150 static const struct vm_operations_struct ocxlflash_vmops = {
1151 	.fault = ocxlflash_mmap_fault,
1152 };
1153 
1154 /**
1155  * afu_mmap() - map the fault handler operations
1156  * @file:	File associated with the context.
1157  * @vma:	VM area associated with mapping.
1158  *
1159  * Return: 0 on success, -errno on failure
1160  */
1161 static int afu_mmap(struct file *file, struct vm_area_struct *vma)
1162 {
1163 	struct ocxlflash_context *ctx = file->private_data;
1164 
1165 	if ((vma_pages(vma) + vma->vm_pgoff) >
1166 	    (ctx->psn_size >> PAGE_SHIFT))
1167 		return -EINVAL;
1168 
1169 	vma->vm_flags |= VM_IO | VM_PFNMAP;
1170 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1171 	vma->vm_ops = &ocxlflash_vmops;
1172 	return 0;
1173 }
1174 
1175 static const struct file_operations ocxl_afu_fops = {
1176 	.owner		= THIS_MODULE,
1177 	.poll		= afu_poll,
1178 	.read		= afu_read,
1179 	.release	= afu_release,
1180 	.mmap		= afu_mmap,
1181 };
1182 
1183 #define PATCH_FOPS(NAME)						\
1184 	do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0)
1185 
1186 /**
1187  * ocxlflash_get_fd() - get file descriptor for an adapter context
1188  * @ctx_cookie:	Adapter context.
1189  * @fops:	File operations to be associated.
1190  * @fd:		File descriptor to be returned back.
1191  *
1192  * Return: pointer to the file on success, ERR_PTR on failure
1193  */
1194 static struct file *ocxlflash_get_fd(void *ctx_cookie,
1195 				     struct file_operations *fops, int *fd)
1196 {
1197 	struct ocxlflash_context *ctx = ctx_cookie;
1198 	struct device *dev = ctx->hw_afu->dev;
1199 	struct file *file;
1200 	int flags, fdtmp;
1201 	int rc = 0;
1202 	char *name = NULL;
1203 
1204 	/* Only allow one fd per context */
1205 	if (ctx->mapping) {
1206 		dev_err(dev, "%s: Context is already mapped to an fd\n",
1207 			__func__);
1208 		rc = -EEXIST;
1209 		goto err1;
1210 	}
1211 
1212 	flags = O_RDWR | O_CLOEXEC;
1213 
1214 	/* This code is similar to anon_inode_getfd() */
1215 	rc = get_unused_fd_flags(flags);
1216 	if (unlikely(rc < 0)) {
1217 		dev_err(dev, "%s: get_unused_fd_flags failed rc=%d\n",
1218 			__func__, rc);
1219 		goto err1;
1220 	}
1221 	fdtmp = rc;
1222 
1223 	/* Patch the file ops that are not defined */
1224 	if (fops) {
1225 		PATCH_FOPS(poll);
1226 		PATCH_FOPS(read);
1227 		PATCH_FOPS(release);
1228 		PATCH_FOPS(mmap);
1229 	} else /* Use default ops */
1230 		fops = (struct file_operations *)&ocxl_afu_fops;
1231 
1232 	name = kasprintf(GFP_KERNEL, "ocxlflash:%d", ctx->pe);
1233 	file = ocxlflash_getfile(dev, name, fops, ctx, flags);
1234 	kfree(name);
1235 	if (IS_ERR(file)) {
1236 		rc = PTR_ERR(file);
1237 		dev_err(dev, "%s: ocxlflash_getfile failed rc=%d\n",
1238 			__func__, rc);
1239 		goto err2;
1240 	}
1241 
1242 	ctx->mapping = file->f_mapping;
1243 	*fd = fdtmp;
1244 out:
1245 	return file;
1246 err2:
1247 	put_unused_fd(fdtmp);
1248 err1:
1249 	file = ERR_PTR(rc);
1250 	goto out;
1251 }
1252 
1253 /**
1254  * ocxlflash_fops_get_context() - get the context associated with the file
1255  * @file:	File associated with the adapter context.
1256  *
1257  * Return: pointer to the context
1258  */
1259 static void *ocxlflash_fops_get_context(struct file *file)
1260 {
1261 	return file->private_data;
1262 }
1263 
1264 /**
1265  * ocxlflash_afu_irq() - interrupt handler for user contexts
1266  * @irq:	Interrupt number.
1267  * @data:	Private data provided at interrupt registration, the context.
1268  *
1269  * Return: Always return IRQ_HANDLED.
1270  */
1271 static irqreturn_t ocxlflash_afu_irq(int irq, void *data)
1272 {
1273 	struct ocxlflash_context *ctx = data;
1274 	struct device *dev = ctx->hw_afu->dev;
1275 	int i;
1276 
1277 	dev_dbg(dev, "%s: Interrupt raised for pe %i virq %i\n",
1278 		__func__, ctx->pe, irq);
1279 
1280 	for (i = 0; i < ctx->num_irqs; i++) {
1281 		if (ctx->irqs[i].virq == irq)
1282 			break;
1283 	}
1284 	if (unlikely(i >= ctx->num_irqs)) {
1285 		dev_err(dev, "%s: Received AFU IRQ out of range\n", __func__);
1286 		goto out;
1287 	}
1288 
1289 	spin_lock(&ctx->slock);
1290 	set_bit(i - 1, &ctx->irq_bitmap);
1291 	ctx->pending_irq = true;
1292 	spin_unlock(&ctx->slock);
1293 
1294 	wake_up_all(&ctx->wq);
1295 out:
1296 	return IRQ_HANDLED;
1297 }
1298 
1299 /**
1300  * ocxlflash_start_work() - start a user context
1301  * @ctx_cookie:	Context to be started.
1302  * @num_irqs:	Number of interrupts requested.
1303  *
1304  * Return: 0 on success, -errno on failure
1305  */
1306 static int ocxlflash_start_work(void *ctx_cookie, u64 num_irqs)
1307 {
1308 	struct ocxlflash_context *ctx = ctx_cookie;
1309 	struct ocxl_hw_afu *afu = ctx->hw_afu;
1310 	struct device *dev = afu->dev;
1311 	char *name;
1312 	int rc = 0;
1313 	int i;
1314 
1315 	rc = alloc_afu_irqs(ctx, num_irqs);
1316 	if (unlikely(rc < 0)) {
1317 		dev_err(dev, "%s: alloc_afu_irqs failed rc=%d\n", __func__, rc);
1318 		goto out;
1319 	}
1320 
1321 	for (i = 0; i < num_irqs; i++) {
1322 		name = kasprintf(GFP_KERNEL, "ocxlflash-%s-pe%i-%i",
1323 				 dev_name(dev), ctx->pe, i);
1324 		rc = afu_map_irq(0, ctx, i, ocxlflash_afu_irq, ctx, name);
1325 		kfree(name);
1326 		if (unlikely(rc < 0)) {
1327 			dev_err(dev, "%s: afu_map_irq failed rc=%d\n",
1328 				__func__, rc);
1329 			goto err;
1330 		}
1331 	}
1332 
1333 	rc = start_context(ctx);
1334 	if (unlikely(rc)) {
1335 		dev_err(dev, "%s: start_context failed rc=%d\n", __func__, rc);
1336 		goto err;
1337 	}
1338 out:
1339 	return rc;
1340 err:
1341 	for (i = i-1; i >= 0; i--)
1342 		afu_unmap_irq(0, ctx, i, ctx);
1343 	free_afu_irqs(ctx);
1344 	goto out;
1345 };
1346 
1347 /**
1348  * ocxlflash_fd_mmap() - mmap handler for adapter file descriptor
1349  * @file:	File installed with adapter file descriptor.
1350  * @vma:	VM area associated with mapping.
1351  *
1352  * Return: 0 on success, -errno on failure
1353  */
1354 static int ocxlflash_fd_mmap(struct file *file, struct vm_area_struct *vma)
1355 {
1356 	return afu_mmap(file, vma);
1357 }
1358 
1359 /**
1360  * ocxlflash_fd_release() - release the context associated with the file
1361  * @inode:	File inode pointer.
1362  * @file:	File associated with the adapter context.
1363  *
1364  * Return: 0 on success, -errno on failure
1365  */
1366 static int ocxlflash_fd_release(struct inode *inode, struct file *file)
1367 {
1368 	return afu_release(inode, file);
1369 }
1370 
1371 /* Backend ops to ocxlflash services */
1372 const struct cxlflash_backend_ops cxlflash_ocxl_ops = {
1373 	.module			= THIS_MODULE,
1374 	.psa_map		= ocxlflash_psa_map,
1375 	.psa_unmap		= ocxlflash_psa_unmap,
1376 	.process_element	= ocxlflash_process_element,
1377 	.map_afu_irq		= ocxlflash_map_afu_irq,
1378 	.unmap_afu_irq		= ocxlflash_unmap_afu_irq,
1379 	.get_irq_objhndl	= ocxlflash_get_irq_objhndl,
1380 	.start_context		= ocxlflash_start_context,
1381 	.stop_context		= ocxlflash_stop_context,
1382 	.afu_reset		= ocxlflash_afu_reset,
1383 	.set_master		= ocxlflash_set_master,
1384 	.get_context		= ocxlflash_get_context,
1385 	.dev_context_init	= ocxlflash_dev_context_init,
1386 	.release_context	= ocxlflash_release_context,
1387 	.perst_reloads_same_image = ocxlflash_perst_reloads_same_image,
1388 	.read_adapter_vpd	= ocxlflash_read_adapter_vpd,
1389 	.allocate_afu_irqs	= ocxlflash_allocate_afu_irqs,
1390 	.free_afu_irqs		= ocxlflash_free_afu_irqs,
1391 	.create_afu		= ocxlflash_create_afu,
1392 	.destroy_afu		= ocxlflash_destroy_afu,
1393 	.get_fd			= ocxlflash_get_fd,
1394 	.fops_get_context	= ocxlflash_fops_get_context,
1395 	.start_work		= ocxlflash_start_work,
1396 	.fd_mmap		= ocxlflash_fd_mmap,
1397 	.fd_release		= ocxlflash_fd_release,
1398 };
1399