xref: /linux/drivers/gpu/drm/xe/xe_preempt_fence.c (revision 3f41368fbfe1b3d5922d317fe1a0a0cab6846802)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_preempt_fence.h"
7 
8 #include <linux/slab.h>
9 
10 #include "xe_exec_queue.h"
11 #include "xe_vm.h"
12 
13 static void preempt_fence_work_func(struct work_struct *w)
14 {
15 	bool cookie = dma_fence_begin_signalling();
16 	struct xe_preempt_fence *pfence =
17 		container_of(w, typeof(*pfence), preempt_work);
18 	struct xe_exec_queue *q = pfence->q;
19 
20 	if (pfence->error)
21 		dma_fence_set_error(&pfence->base, pfence->error);
22 	else
23 		q->ops->suspend_wait(q);
24 
25 	dma_fence_signal(&pfence->base);
26 	dma_fence_end_signalling(cookie);
27 
28 	xe_vm_queue_rebind_worker(q->vm);
29 
30 	xe_exec_queue_put(q);
31 }
32 
33 static const char *
34 preempt_fence_get_driver_name(struct dma_fence *fence)
35 {
36 	return "xe";
37 }
38 
39 static const char *
40 preempt_fence_get_timeline_name(struct dma_fence *fence)
41 {
42 	return "preempt";
43 }
44 
45 static bool preempt_fence_enable_signaling(struct dma_fence *fence)
46 {
47 	struct xe_preempt_fence *pfence =
48 		container_of(fence, typeof(*pfence), base);
49 	struct xe_exec_queue *q = pfence->q;
50 
51 	pfence->error = q->ops->suspend(q);
52 	queue_work(q->vm->xe->preempt_fence_wq, &pfence->preempt_work);
53 	return true;
54 }
55 
56 static const struct dma_fence_ops preempt_fence_ops = {
57 	.get_driver_name = preempt_fence_get_driver_name,
58 	.get_timeline_name = preempt_fence_get_timeline_name,
59 	.enable_signaling = preempt_fence_enable_signaling,
60 };
61 
62 /**
63  * xe_preempt_fence_alloc() - Allocate a preempt fence with minimal
64  * initialization
65  *
66  * Allocate a preempt fence, and initialize its list head.
67  * If the preempt_fence allocated has been armed with
68  * xe_preempt_fence_arm(), it must be freed using dma_fence_put(). If not,
69  * it must be freed using xe_preempt_fence_free().
70  *
71  * Return: A struct xe_preempt_fence pointer used for calling into
72  * xe_preempt_fence_arm() or xe_preempt_fence_free().
73  * An error pointer on error.
74  */
75 struct xe_preempt_fence *xe_preempt_fence_alloc(void)
76 {
77 	struct xe_preempt_fence *pfence;
78 
79 	pfence = kmalloc(sizeof(*pfence), GFP_KERNEL);
80 	if (!pfence)
81 		return ERR_PTR(-ENOMEM);
82 
83 	INIT_LIST_HEAD(&pfence->link);
84 	INIT_WORK(&pfence->preempt_work, preempt_fence_work_func);
85 
86 	return pfence;
87 }
88 
89 /**
90  * xe_preempt_fence_free() - Free a preempt fence allocated using
91  * xe_preempt_fence_alloc().
92  * @pfence: pointer obtained from xe_preempt_fence_alloc();
93  *
94  * Free a preempt fence that has not yet been armed.
95  */
96 void xe_preempt_fence_free(struct xe_preempt_fence *pfence)
97 {
98 	list_del(&pfence->link);
99 	kfree(pfence);
100 }
101 
102 /**
103  * xe_preempt_fence_arm() - Arm a preempt fence allocated using
104  * xe_preempt_fence_alloc().
105  * @pfence: The struct xe_preempt_fence pointer returned from
106  *          xe_preempt_fence_alloc().
107  * @q: The struct xe_exec_queue used for arming.
108  * @context: The dma-fence context used for arming.
109  * @seqno: The dma-fence seqno used for arming.
110  *
111  * Inserts the preempt fence into @context's timeline, takes @link off any
112  * list, and registers the struct xe_exec_queue as the xe_engine to be preempted.
113  *
114  * Return: A pointer to a struct dma_fence embedded into the preempt fence.
115  * This function doesn't error.
116  */
117 struct dma_fence *
118 xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
119 		     u64 context, u32 seqno)
120 {
121 	list_del_init(&pfence->link);
122 	pfence->q = xe_exec_queue_get(q);
123 	dma_fence_init(&pfence->base, &preempt_fence_ops,
124 		      &q->compute.lock, context, seqno);
125 
126 	return &pfence->base;
127 }
128 
129 /**
130  * xe_preempt_fence_create() - Helper to create and arm a preempt fence.
131  * @q: The struct xe_exec_queue used for arming.
132  * @context: The dma-fence context used for arming.
133  * @seqno: The dma-fence seqno used for arming.
134  *
135  * Allocates and inserts the preempt fence into @context's timeline,
136  * and registers @e as the struct xe_exec_queue to be preempted.
137  *
138  * Return: A pointer to the resulting struct dma_fence on success. An error
139  * pointer on error. In particular if allocation fails it returns
140  * ERR_PTR(-ENOMEM);
141  */
142 struct dma_fence *
143 xe_preempt_fence_create(struct xe_exec_queue *q,
144 			u64 context, u32 seqno)
145 {
146 	struct xe_preempt_fence *pfence;
147 
148 	pfence = xe_preempt_fence_alloc();
149 	if (IS_ERR(pfence))
150 		return ERR_CAST(pfence);
151 
152 	return xe_preempt_fence_arm(pfence, q, context, seqno);
153 }
154 
155 bool xe_fence_is_xe_preempt(const struct dma_fence *fence)
156 {
157 	return fence->ops == &preempt_fence_ops;
158 }
159