xref: /linux/drivers/gpu/drm/xe/xe_tlb_inval_job.c (revision ec2e0fb07d789976c601bec19ecced7a501c3705)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2025 Intel Corporation
4  */
5 
6 #include "xe_assert.h"
7 #include "xe_dep_job_types.h"
8 #include "xe_dep_scheduler.h"
9 #include "xe_exec_queue.h"
10 #include "xe_gt_types.h"
11 #include "xe_tlb_inval.h"
12 #include "xe_tlb_inval_job.h"
13 #include "xe_migrate.h"
14 #include "xe_pm.h"
15 
16 /** struct xe_tlb_inval_job - TLB invalidation job */
17 struct xe_tlb_inval_job {
18 	/** @dep: base generic dependency Xe job */
19 	struct xe_dep_job dep;
20 	/** @tlb_inval: TLB invalidation client */
21 	struct xe_tlb_inval *tlb_inval;
22 	/** @q: exec queue issuing the invalidate */
23 	struct xe_exec_queue *q;
24 	/** @refcount: ref count of this job */
25 	struct kref refcount;
26 	/**
27 	 * @fence: dma fence to indicate completion. 1 way relationship - job
28 	 * can safely reference fence, fence cannot safely reference job.
29 	 */
30 	struct dma_fence *fence;
31 	/** @start: Start address to invalidate */
32 	u64 start;
33 	/** @end: End address to invalidate */
34 	u64 end;
35 	/** @asid: Address space ID to invalidate */
36 	u32 asid;
37 	/** @fence_armed: Fence has been armed */
38 	bool fence_armed;
39 };
40 
41 static struct dma_fence *xe_tlb_inval_job_run(struct xe_dep_job *dep_job)
42 {
43 	struct xe_tlb_inval_job *job =
44 		container_of(dep_job, typeof(*job), dep);
45 	struct xe_tlb_inval_fence *ifence =
46 		container_of(job->fence, typeof(*ifence), base);
47 
48 	xe_tlb_inval_range(job->tlb_inval, ifence, job->start,
49 			   job->end, job->asid);
50 
51 	return job->fence;
52 }
53 
54 static void xe_tlb_inval_job_free(struct xe_dep_job *dep_job)
55 {
56 	struct xe_tlb_inval_job *job =
57 		container_of(dep_job, typeof(*job), dep);
58 
59 	/* Pairs with get in xe_tlb_inval_job_push */
60 	xe_tlb_inval_job_put(job);
61 }
62 
63 static const struct xe_dep_job_ops dep_job_ops = {
64 	.run_job = xe_tlb_inval_job_run,
65 	.free_job = xe_tlb_inval_job_free,
66 };
67 
68 /**
69  * xe_tlb_inval_job_create() - TLB invalidation job create
70  * @q: exec queue issuing the invalidate
71  * @tlb_inval: TLB invalidation client
72  * @dep_scheduler: Dependency scheduler for job
73  * @start: Start address to invalidate
74  * @end: End address to invalidate
75  * @asid: Address space ID to invalidate
76  *
77  * Create a TLB invalidation job and initialize internal fields. The caller is
78  * responsible for releasing the creation reference.
79  *
80  * Return: TLB invalidation job object on success, ERR_PTR failure
81  */
82 struct xe_tlb_inval_job *
83 xe_tlb_inval_job_create(struct xe_exec_queue *q, struct xe_tlb_inval *tlb_inval,
84 			struct xe_dep_scheduler *dep_scheduler, u64 start,
85 			u64 end, u32 asid)
86 {
87 	struct xe_tlb_inval_job *job;
88 	struct drm_sched_entity *entity =
89 		xe_dep_scheduler_entity(dep_scheduler);
90 	struct xe_tlb_inval_fence *ifence;
91 	int err;
92 
93 	job = kmalloc(sizeof(*job), GFP_KERNEL);
94 	if (!job)
95 		return ERR_PTR(-ENOMEM);
96 
97 	job->q = q;
98 	job->tlb_inval = tlb_inval;
99 	job->start = start;
100 	job->end = end;
101 	job->asid = asid;
102 	job->fence_armed = false;
103 	job->dep.ops = &dep_job_ops;
104 	kref_init(&job->refcount);
105 	xe_exec_queue_get(q);	/* Pairs with put in xe_tlb_inval_job_destroy */
106 
107 	ifence = kmalloc(sizeof(*ifence), GFP_KERNEL);
108 	if (!ifence) {
109 		err = -ENOMEM;
110 		goto err_job;
111 	}
112 	job->fence = &ifence->base;
113 
114 	err = drm_sched_job_init(&job->dep.drm, entity, 1, NULL,
115 				 q->xef ? q->xef->drm->client_id : 0);
116 	if (err)
117 		goto err_fence;
118 
119 	/* Pairs with put in xe_tlb_inval_job_destroy */
120 	xe_pm_runtime_get_noresume(gt_to_xe(q->gt));
121 
122 	return job;
123 
124 err_fence:
125 	kfree(ifence);
126 err_job:
127 	xe_exec_queue_put(q);
128 	kfree(job);
129 
130 	return ERR_PTR(err);
131 }
132 
133 static void xe_tlb_inval_job_destroy(struct kref *ref)
134 {
135 	struct xe_tlb_inval_job *job = container_of(ref, typeof(*job),
136 						    refcount);
137 	struct xe_tlb_inval_fence *ifence =
138 		container_of(job->fence, typeof(*ifence), base);
139 	struct xe_exec_queue *q = job->q;
140 	struct xe_device *xe = gt_to_xe(q->gt);
141 
142 	if (!job->fence_armed)
143 		kfree(ifence);
144 	else
145 		/* Ref from xe_tlb_inval_fence_init */
146 		dma_fence_put(job->fence);
147 
148 	drm_sched_job_cleanup(&job->dep.drm);
149 	kfree(job);
150 	xe_exec_queue_put(q);	/* Pairs with get from xe_tlb_inval_job_create */
151 	xe_pm_runtime_put(xe);	/* Pairs with get from xe_tlb_inval_job_create */
152 }
153 
154 /**
155  * xe_tlb_inval_alloc_dep() - TLB invalidation job alloc dependency
156  * @job: TLB invalidation job to alloc dependency for
157  *
158  * Allocate storage for a dependency in the TLB invalidation fence. This
159  * function should be called at most once per job and must be paired with
160  * xe_tlb_inval_job_push being called with a real fence.
161  *
162  * Return: 0 on success, -errno on failure
163  */
164 int xe_tlb_inval_job_alloc_dep(struct xe_tlb_inval_job *job)
165 {
166 	xe_assert(gt_to_xe(job->q->gt), !xa_load(&job->dep.drm.dependencies, 0));
167 	might_alloc(GFP_KERNEL);
168 
169 	return drm_sched_job_add_dependency(&job->dep.drm,
170 					    dma_fence_get_stub());
171 }
172 
173 /**
174  * xe_tlb_inval_job_push() - TLB invalidation job push
175  * @job: TLB invalidation job to push
176  * @m: The migration object being used
177  * @fence: Dependency for TLB invalidation job
178  *
179  * Pushes a TLB invalidation job for execution, using @fence as a dependency.
180  * Storage for @fence must be preallocated with xe_tlb_inval_job_alloc_dep
181  * prior to this call if @fence is not signaled. Takes a reference to the job’s
182  * finished fence, which the caller is responsible for releasing, and return it
183  * to the caller. This function is safe to be called in the path of reclaim.
184  *
185  * Return: Job's finished fence on success, cannot fail
186  */
187 struct dma_fence *xe_tlb_inval_job_push(struct xe_tlb_inval_job *job,
188 					struct xe_migrate *m,
189 					struct dma_fence *fence)
190 {
191 	struct xe_tlb_inval_fence *ifence =
192 		container_of(job->fence, typeof(*ifence), base);
193 
194 	if (!dma_fence_is_signaled(fence)) {
195 		void *ptr;
196 
197 		/*
198 		 * Can be in path of reclaim, hence the preallocation of fence
199 		 * storage in xe_tlb_inval_job_alloc_dep. Verify caller did
200 		 * this correctly.
201 		 */
202 		xe_assert(gt_to_xe(job->q->gt),
203 			  xa_load(&job->dep.drm.dependencies, 0) ==
204 			  dma_fence_get_stub());
205 
206 		dma_fence_get(fence);	/* ref released once dependency processed by scheduler */
207 		ptr = xa_store(&job->dep.drm.dependencies, 0, fence,
208 			       GFP_ATOMIC);
209 		xe_assert(gt_to_xe(job->q->gt), !xa_is_err(ptr));
210 	}
211 
212 	xe_tlb_inval_job_get(job);	/* Pairs with put in free_job */
213 	job->fence_armed = true;
214 
215 	/*
216 	 * We need the migration lock to protect the job's seqno and the spsc
217 	 * queue, only taken on migration queue, user queues protected dma-resv
218 	 * VM lock.
219 	 */
220 	xe_migrate_job_lock(m, job->q);
221 
222 	/* Creation ref pairs with put in xe_tlb_inval_job_destroy */
223 	xe_tlb_inval_fence_init(job->tlb_inval, ifence, false);
224 	dma_fence_get(job->fence);	/* Pairs with put in DRM scheduler */
225 
226 	drm_sched_job_arm(&job->dep.drm);
227 	/*
228 	 * caller ref, get must be done before job push as it could immediately
229 	 * signal and free.
230 	 */
231 	dma_fence_get(&job->dep.drm.s_fence->finished);
232 	drm_sched_entity_push_job(&job->dep.drm);
233 
234 	xe_migrate_job_unlock(m, job->q);
235 
236 	/*
237 	 * Not using job->fence, as it has its own dma-fence context, which does
238 	 * not allow TLB invalidation fences on the same queue, GT tuple to
239 	 * be squashed in dma-resv/DRM scheduler. Instead, we use the DRM scheduler
240 	 * context and job's finished fence, which enables squashing.
241 	 */
242 	return &job->dep.drm.s_fence->finished;
243 }
244 
245 /**
246  * xe_tlb_inval_job_get() - Get a reference to TLB invalidation job
247  * @job: TLB invalidation job object
248  *
249  * Increment the TLB invalidation job's reference count
250  */
251 void xe_tlb_inval_job_get(struct xe_tlb_inval_job *job)
252 {
253 	kref_get(&job->refcount);
254 }
255 
256 /**
257  * xe_tlb_inval_job_put() - Put a reference to TLB invalidation job
258  * @job: TLB invalidation job object
259  *
260  * Decrement the TLB invalidation job's reference count, call
261  * xe_tlb_inval_job_destroy when reference count == 0. Skips decrement if
262  * input @job is NULL or IS_ERR.
263  */
264 void xe_tlb_inval_job_put(struct xe_tlb_inval_job *job)
265 {
266 	if (!IS_ERR_OR_NULL(job))
267 		kref_put(&job->refcount, xe_tlb_inval_job_destroy);
268 }
269