xref: /linux/drivers/gpu/drm/xe/xe_validation.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 #include "xe_bo.h"
6 #include <drm/drm_exec.h>
7 #include <drm/drm_gem.h>
8 #include <drm/drm_gpuvm.h>
9 
10 #include "xe_assert.h"
11 #include "xe_validation.h"
12 
13 #ifdef CONFIG_DRM_XE_DEBUG
14 /**
15  * xe_validation_assert_exec() - Assert that the drm_exec pointer is suitable
16  * for validation.
17  * @xe: Pointer to the xe device.
18  * @exec: The drm_exec pointer to check.
19  * @obj: Pointer to the object subject to validation.
20  *
21  * NULL exec pointers are not allowed.
22  * For XE_VALIDATION_UNIMPLEMENTED, no checking.
23  * For XE_VLIDATION_OPT_OUT, check that the caller is a kunit test
24  * For XE_VALIDATION_UNSUPPORTED, check that the object subject to
25  * validation is a dma-buf, for which support for ww locking is
26  * not in place in the dma-buf layer.
27  */
28 void xe_validation_assert_exec(const struct xe_device *xe,
29 			       const struct drm_exec *exec,
30 			       const struct drm_gem_object *obj)
31 {
32 	xe_assert(xe, exec);
33 	if (IS_ERR(exec)) {
34 		switch (PTR_ERR(exec)) {
35 		case __XE_VAL_UNIMPLEMENTED:
36 			break;
37 		case __XE_VAL_UNSUPPORTED:
38 			xe_assert(xe, !!obj->dma_buf);
39 			break;
40 #if IS_ENABLED(CONFIG_KUNIT)
41 		case __XE_VAL_OPT_OUT:
42 			xe_assert(xe, current->kunit_test);
43 			break;
44 #endif
45 		default:
46 			xe_assert(xe, false);
47 		}
48 	}
49 }
50 #endif
51 
52 static int xe_validation_lock(struct xe_validation_ctx *ctx)
53 {
54 	struct xe_validation_device *val = ctx->val;
55 	int ret = 0;
56 
57 	if (ctx->val_flags.interruptible) {
58 		if (ctx->request_exclusive)
59 			ret = down_write_killable(&val->lock);
60 		else
61 			ret = down_read_interruptible(&val->lock);
62 	} else {
63 		if (ctx->request_exclusive)
64 			down_write(&val->lock);
65 		else
66 			down_read(&val->lock);
67 	}
68 
69 	if (!ret) {
70 		ctx->lock_held = true;
71 		ctx->lock_held_exclusive = ctx->request_exclusive;
72 	}
73 
74 	return ret;
75 }
76 
77 static int xe_validation_trylock(struct xe_validation_ctx *ctx)
78 {
79 	struct xe_validation_device *val = ctx->val;
80 	bool locked;
81 
82 	if (ctx->request_exclusive)
83 		locked = down_write_trylock(&val->lock);
84 	else
85 		locked = down_read_trylock(&val->lock);
86 
87 	if (locked) {
88 		ctx->lock_held = true;
89 		ctx->lock_held_exclusive = ctx->request_exclusive;
90 	}
91 
92 	return locked ? 0 : -EWOULDBLOCK;
93 }
94 
95 static void xe_validation_unlock(struct xe_validation_ctx *ctx)
96 {
97 	if (!ctx->lock_held)
98 		return;
99 
100 	if (ctx->lock_held_exclusive)
101 		up_write(&ctx->val->lock);
102 	else
103 		up_read(&ctx->val->lock);
104 
105 	ctx->lock_held = false;
106 }
107 
108 /**
109  * xe_validation_ctx_init() - Initialize an xe_validation_ctx
110  * @ctx: The xe_validation_ctx to initialize.
111  * @val: The xe_validation_device representing the validation domain.
112  * @exec: The struct drm_exec to use for the transaction. May be NULL.
113  * @flags: The flags to use for initialization.
114  *
115  * Initialize and lock a an xe_validation transaction using the validation domain
116  * represented by @val. Also initialize the drm_exec object forwarding parts of
117  * @flags to the drm_exec initialization. The @flags.exclusive flag should
118  * typically be set to false to avoid locking out other validators from the
119  * domain until an OOM is hit. For testing- or final attempt purposes it can,
120  * however, be set to true.
121  *
122  * Return: %0 on success, %-EINTR if interruptible initial locking failed with a
123  * signal pending. If @flags.no_block is set to true, a failed trylock
124  * returns %-EWOULDBLOCK.
125  */
126 int xe_validation_ctx_init(struct xe_validation_ctx *ctx, struct xe_validation_device *val,
127 			   struct drm_exec *exec, const struct xe_val_flags flags)
128 {
129 	int ret;
130 
131 	ctx->exec = exec;
132 	ctx->val = val;
133 	ctx->lock_held = false;
134 	ctx->lock_held_exclusive = false;
135 	ctx->request_exclusive = flags.exclusive;
136 	ctx->val_flags = flags;
137 	ctx->exec_flags = 0;
138 	ctx->nr = 0;
139 
140 	if (flags.no_block)
141 		ret = xe_validation_trylock(ctx);
142 	else
143 		ret = xe_validation_lock(ctx);
144 	if (ret)
145 		return ret;
146 
147 	if (exec) {
148 		if (flags.interruptible)
149 			ctx->exec_flags |= DRM_EXEC_INTERRUPTIBLE_WAIT;
150 		if (flags.exec_ignore_duplicates)
151 			ctx->exec_flags |= DRM_EXEC_IGNORE_DUPLICATES;
152 		drm_exec_init(exec, ctx->exec_flags, ctx->nr);
153 	}
154 
155 	return 0;
156 }
157 
158 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
159 /*
160  * This abuses both drm_exec and ww_mutex internals and should be
161  * replaced by checking for -EDEADLK when we can make TTM
162  * stop converting -EDEADLK to -ENOMEM.
163  * An alternative is to not have exhaustive eviction with
164  * CONFIG_DEBUG_WW_MUTEX_SLOWPATH until that happens.
165  */
166 static bool xe_validation_contention_injected(struct drm_exec *exec)
167 {
168 	return !!exec->ticket.contending_lock;
169 }
170 
171 #else
172 
173 static bool xe_validation_contention_injected(struct drm_exec *exec)
174 {
175 	return false;
176 }
177 
178 #endif
179 
180 static bool __xe_validation_should_retry(struct xe_validation_ctx *ctx, int ret)
181 {
182 	if (ret == -ENOMEM &&
183 	    ((ctx->request_exclusive &&
184 	      xe_validation_contention_injected(ctx->exec)) ||
185 	     !ctx->request_exclusive)) {
186 		ctx->request_exclusive = true;
187 		return true;
188 	}
189 
190 	return false;
191 }
192 
193 /**
194  * xe_validation_exec_lock() - Perform drm_gpuvm_exec_lock within a validation
195  * transaction.
196  * @ctx: An uninitialized xe_validation_ctx.
197  * @vm_exec: An initialized struct vm_exec.
198  * @val: The validation domain.
199  *
200  * The drm_gpuvm_exec_lock() function internally initializes its drm_exec
201  * transaction and therefore doesn't lend itself very well to be using
202  * xe_validation_ctx_init(). Provide a helper that takes an uninitialized
203  * xe_validation_ctx and calls drm_gpuvm_exec_lock() with OOM retry.
204  *
205  * Return: %0 on success, negative error code on failure.
206  */
207 int xe_validation_exec_lock(struct xe_validation_ctx *ctx,
208 			    struct drm_gpuvm_exec *vm_exec,
209 			    struct xe_validation_device *val)
210 {
211 	int ret;
212 
213 	memset(ctx, 0, sizeof(*ctx));
214 	ctx->exec = &vm_exec->exec;
215 	ctx->exec_flags = vm_exec->flags;
216 	ctx->val = val;
217 	if (ctx->exec_flags & DRM_EXEC_INTERRUPTIBLE_WAIT)
218 		ctx->val_flags.interruptible = 1;
219 	if (ctx->exec_flags & DRM_EXEC_IGNORE_DUPLICATES)
220 		ctx->val_flags.exec_ignore_duplicates = 1;
221 retry:
222 	ret = xe_validation_lock(ctx);
223 	if (ret)
224 		return ret;
225 
226 	ret = drm_gpuvm_exec_lock(vm_exec);
227 	if (ret) {
228 		xe_validation_unlock(ctx);
229 		if (__xe_validation_should_retry(ctx, ret))
230 			goto retry;
231 	}
232 
233 	return ret;
234 }
235 
236 /**
237  * xe_validation_ctx_fini() - Finalize a validation transaction
238  * @ctx: The Validation transaction to finalize.
239  *
240  * Finalize a validation transaction and its related drm_exec transaction.
241  */
242 void xe_validation_ctx_fini(struct xe_validation_ctx *ctx)
243 {
244 	if (ctx->exec)
245 		drm_exec_fini(ctx->exec);
246 	xe_validation_unlock(ctx);
247 }
248 
249 /**
250  * xe_validation_should_retry() - Determine if a validation transaction should retry
251  * @ctx: The validation transaction.
252  * @ret: Pointer to a return value variable.
253  *
254  * Determines whether a validation transaction should retry based on the
255  * internal transaction state and the return value pointed to by @ret.
256  * If a validation should be retried, the transaction is prepared for that,
257  * and the validation locked might be re-locked in exclusive mode, and *@ret
258  * is set to %0. If the re-locking errors, typically due to interruptible
259  * locking with signal pending, *@ret is instead set to -EINTR and the
260  * function returns %false.
261  *
262  * Return: %true if validation should be retried, %false otherwise.
263  */
264 bool xe_validation_should_retry(struct xe_validation_ctx *ctx, int *ret)
265 {
266 	if (__xe_validation_should_retry(ctx, *ret)) {
267 		drm_exec_fini(ctx->exec);
268 		*ret = 0;
269 		if (ctx->request_exclusive != ctx->lock_held_exclusive) {
270 			xe_validation_unlock(ctx);
271 			*ret = xe_validation_lock(ctx);
272 		}
273 		drm_exec_init(ctx->exec, ctx->exec_flags, ctx->nr);
274 		return !*ret;
275 	}
276 
277 	return false;
278 }
279