xref: /linux/drivers/gpu/drm/i915/gem/i915_gem_context.h (revision a4eb44a6435d6d8f9e642407a4a06f65eb90ca04)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #ifndef __I915_GEM_CONTEXT_H__
8 #define __I915_GEM_CONTEXT_H__
9 
10 #include "i915_gem_context_types.h"
11 
12 #include "gt/intel_context.h"
13 
14 #include "i915_drv.h"
15 #include "i915_gem.h"
16 #include "i915_scheduler.h"
17 #include "intel_device_info.h"
18 
19 struct drm_device;
20 struct drm_file;
21 
22 static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
23 {
24 	return test_bit(CONTEXT_CLOSED, &ctx->flags);
25 }
26 
27 static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx)
28 {
29 	GEM_BUG_ON(i915_gem_context_is_closed(ctx));
30 	set_bit(CONTEXT_CLOSED, &ctx->flags);
31 }
32 
33 static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx)
34 {
35 	return test_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
36 }
37 
38 static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx)
39 {
40 	set_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
41 }
42 
43 static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx)
44 {
45 	clear_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
46 }
47 
48 static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx)
49 {
50 	return test_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
51 }
52 
53 static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx)
54 {
55 	set_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
56 }
57 
58 static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
59 {
60 	clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
61 }
62 
63 static inline bool i915_gem_context_is_recoverable(const struct i915_gem_context *ctx)
64 {
65 	return test_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
66 }
67 
68 static inline void i915_gem_context_set_recoverable(struct i915_gem_context *ctx)
69 {
70 	set_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
71 }
72 
73 static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *ctx)
74 {
75 	clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
76 }
77 
78 static inline bool i915_gem_context_is_persistent(const struct i915_gem_context *ctx)
79 {
80 	return test_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
81 }
82 
83 static inline void i915_gem_context_set_persistence(struct i915_gem_context *ctx)
84 {
85 	set_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
86 }
87 
88 static inline void i915_gem_context_clear_persistence(struct i915_gem_context *ctx)
89 {
90 	clear_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
91 }
92 
93 static inline bool
94 i915_gem_context_user_engines(const struct i915_gem_context *ctx)
95 {
96 	return test_bit(CONTEXT_USER_ENGINES, &ctx->flags);
97 }
98 
99 static inline void
100 i915_gem_context_set_user_engines(struct i915_gem_context *ctx)
101 {
102 	set_bit(CONTEXT_USER_ENGINES, &ctx->flags);
103 }
104 
105 static inline void
106 i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
107 {
108 	clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
109 }
110 
111 static inline bool
112 i915_gem_context_uses_protected_content(const struct i915_gem_context *ctx)
113 {
114 	return ctx->uses_protected_content;
115 }
116 
117 /* i915_gem_context.c */
118 void i915_gem_init__contexts(struct drm_i915_private *i915);
119 
120 int i915_gem_context_open(struct drm_i915_private *i915,
121 			  struct drm_file *file);
122 void i915_gem_context_close(struct drm_file *file);
123 
124 void i915_gem_context_release(struct kref *ctx_ref);
125 
126 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
127 			     struct drm_file *file);
128 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
129 			      struct drm_file *file);
130 
131 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
132 				  struct drm_file *file);
133 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
134 				   struct drm_file *file);
135 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
136 				    struct drm_file *file_priv);
137 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
138 				    struct drm_file *file_priv);
139 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
140 				       struct drm_file *file);
141 
142 struct i915_gem_context *
143 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id);
144 
145 static inline struct i915_gem_context *
146 i915_gem_context_get(struct i915_gem_context *ctx)
147 {
148 	kref_get(&ctx->ref);
149 	return ctx;
150 }
151 
152 static inline void i915_gem_context_put(struct i915_gem_context *ctx)
153 {
154 	kref_put(&ctx->ref, i915_gem_context_release);
155 }
156 
157 static inline struct i915_address_space *
158 i915_gem_context_vm(struct i915_gem_context *ctx)
159 {
160 	return rcu_dereference_protected(ctx->vm, lockdep_is_held(&ctx->mutex));
161 }
162 
163 static inline bool i915_gem_context_has_full_ppgtt(struct i915_gem_context *ctx)
164 {
165 	GEM_BUG_ON(!!ctx->vm != HAS_FULL_PPGTT(ctx->i915));
166 
167 	return !!ctx->vm;
168 }
169 
170 static inline struct i915_address_space *
171 i915_gem_context_get_eb_vm(struct i915_gem_context *ctx)
172 {
173 	struct i915_address_space *vm;
174 
175 	vm = ctx->vm;
176 	if (!vm)
177 		vm = &ctx->i915->ggtt.vm;
178 	vm = i915_vm_get(vm);
179 
180 	return vm;
181 }
182 
183 static inline struct i915_gem_engines *
184 i915_gem_context_engines(struct i915_gem_context *ctx)
185 {
186 	return rcu_dereference_protected(ctx->engines,
187 					 lockdep_is_held(&ctx->engines_mutex));
188 }
189 
190 static inline struct i915_gem_engines *
191 i915_gem_context_lock_engines(struct i915_gem_context *ctx)
192 	__acquires(&ctx->engines_mutex)
193 {
194 	mutex_lock(&ctx->engines_mutex);
195 	return i915_gem_context_engines(ctx);
196 }
197 
198 static inline void
199 i915_gem_context_unlock_engines(struct i915_gem_context *ctx)
200 	__releases(&ctx->engines_mutex)
201 {
202 	mutex_unlock(&ctx->engines_mutex);
203 }
204 
205 static inline struct intel_context *
206 i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx)
207 {
208 	struct intel_context *ce;
209 
210 	rcu_read_lock(); {
211 		struct i915_gem_engines *e = rcu_dereference(ctx->engines);
212 		if (unlikely(!e)) /* context was closed! */
213 			ce = ERR_PTR(-ENOENT);
214 		else if (likely(idx < e->num_engines && e->engines[idx]))
215 			ce = intel_context_get(e->engines[idx]);
216 		else
217 			ce = ERR_PTR(-EINVAL);
218 	} rcu_read_unlock();
219 
220 	return ce;
221 }
222 
223 static inline void
224 i915_gem_engines_iter_init(struct i915_gem_engines_iter *it,
225 			   struct i915_gem_engines *engines)
226 {
227 	it->engines = engines;
228 	it->idx = 0;
229 }
230 
231 struct intel_context *
232 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it);
233 
234 #define for_each_gem_engine(ce, engines, it) \
235 	for (i915_gem_engines_iter_init(&(it), (engines)); \
236 	     ((ce) = i915_gem_engines_iter_next(&(it)));)
237 
238 void i915_gem_context_module_exit(void);
239 int i915_gem_context_module_init(void);
240 
241 struct i915_lut_handle *i915_lut_handle_alloc(void);
242 void i915_lut_handle_free(struct i915_lut_handle *lut);
243 
244 int i915_gem_user_to_context_sseu(struct intel_gt *gt,
245 				  const struct drm_i915_gem_context_param_sseu *user,
246 				  struct intel_sseu *context);
247 
248 #endif /* !__I915_GEM_CONTEXT_H__ */
249