1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2016 Intel Corporation 5 */ 6 7 #ifndef __I915_GEM_CONTEXT_H__ 8 #define __I915_GEM_CONTEXT_H__ 9 10 #include "i915_gem_context_types.h" 11 12 #include "gt/intel_context.h" 13 14 #include "i915_drv.h" 15 #include "i915_gem.h" 16 #include "i915_scheduler.h" 17 #include "intel_device_info.h" 18 19 struct drm_device; 20 struct drm_file; 21 22 static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx) 23 { 24 return test_bit(CONTEXT_CLOSED, &ctx->flags); 25 } 26 27 static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx) 28 { 29 GEM_BUG_ON(i915_gem_context_is_closed(ctx)); 30 set_bit(CONTEXT_CLOSED, &ctx->flags); 31 } 32 33 static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx) 34 { 35 return test_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags); 36 } 37 38 static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx) 39 { 40 set_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags); 41 } 42 43 static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx) 44 { 45 clear_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags); 46 } 47 48 static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx) 49 { 50 return test_bit(UCONTEXT_BANNABLE, &ctx->user_flags); 51 } 52 53 static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx) 54 { 55 set_bit(UCONTEXT_BANNABLE, &ctx->user_flags); 56 } 57 58 static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx) 59 { 60 clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags); 61 } 62 63 static inline bool i915_gem_context_is_recoverable(const struct i915_gem_context *ctx) 64 { 65 return test_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); 66 } 67 68 static inline void i915_gem_context_set_recoverable(struct i915_gem_context *ctx) 69 { 70 set_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); 71 } 72 73 static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *ctx) 74 { 75 clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); 76 } 77 78 static inline bool i915_gem_context_is_persistent(const struct i915_gem_context *ctx) 79 { 80 return test_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags); 81 } 82 83 static inline void i915_gem_context_set_persistence(struct i915_gem_context *ctx) 84 { 85 set_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags); 86 } 87 88 static inline void i915_gem_context_clear_persistence(struct i915_gem_context *ctx) 89 { 90 clear_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags); 91 } 92 93 static inline bool 94 i915_gem_context_user_engines(const struct i915_gem_context *ctx) 95 { 96 return test_bit(CONTEXT_USER_ENGINES, &ctx->flags); 97 } 98 99 static inline void 100 i915_gem_context_set_user_engines(struct i915_gem_context *ctx) 101 { 102 set_bit(CONTEXT_USER_ENGINES, &ctx->flags); 103 } 104 105 static inline void 106 i915_gem_context_clear_user_engines(struct i915_gem_context *ctx) 107 { 108 clear_bit(CONTEXT_USER_ENGINES, &ctx->flags); 109 } 110 111 /* i915_gem_context.c */ 112 void i915_gem_init__contexts(struct drm_i915_private *i915); 113 void i915_gem_driver_release__contexts(struct drm_i915_private *i915); 114 115 int i915_gem_context_open(struct drm_i915_private *i915, 116 struct drm_file *file); 117 void i915_gem_context_close(struct drm_file *file); 118 119 void i915_gem_context_release(struct kref *ctx_ref); 120 121 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, 122 struct drm_file *file); 123 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, 124 struct drm_file *file); 125 126 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 127 struct drm_file *file); 128 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 129 struct drm_file *file); 130 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 131 struct drm_file *file_priv); 132 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 133 struct drm_file *file_priv); 134 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, 135 struct drm_file *file); 136 137 static inline struct i915_gem_context * 138 i915_gem_context_get(struct i915_gem_context *ctx) 139 { 140 kref_get(&ctx->ref); 141 return ctx; 142 } 143 144 static inline void i915_gem_context_put(struct i915_gem_context *ctx) 145 { 146 kref_put(&ctx->ref, i915_gem_context_release); 147 } 148 149 static inline struct i915_address_space * 150 i915_gem_context_vm(struct i915_gem_context *ctx) 151 { 152 return rcu_dereference_protected(ctx->vm, lockdep_is_held(&ctx->mutex)); 153 } 154 155 static inline struct i915_address_space * 156 i915_gem_context_get_vm_rcu(struct i915_gem_context *ctx) 157 { 158 struct i915_address_space *vm; 159 160 rcu_read_lock(); 161 vm = rcu_dereference(ctx->vm); 162 if (!vm) 163 vm = &ctx->i915->ggtt.vm; 164 vm = i915_vm_get(vm); 165 rcu_read_unlock(); 166 167 return vm; 168 } 169 170 static inline struct i915_gem_engines * 171 i915_gem_context_engines(struct i915_gem_context *ctx) 172 { 173 return rcu_dereference_protected(ctx->engines, 174 lockdep_is_held(&ctx->engines_mutex)); 175 } 176 177 static inline struct i915_gem_engines * 178 i915_gem_context_lock_engines(struct i915_gem_context *ctx) 179 __acquires(&ctx->engines_mutex) 180 { 181 mutex_lock(&ctx->engines_mutex); 182 return i915_gem_context_engines(ctx); 183 } 184 185 static inline void 186 i915_gem_context_unlock_engines(struct i915_gem_context *ctx) 187 __releases(&ctx->engines_mutex) 188 { 189 mutex_unlock(&ctx->engines_mutex); 190 } 191 192 static inline struct intel_context * 193 i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx) 194 { 195 struct intel_context *ce; 196 197 rcu_read_lock(); { 198 struct i915_gem_engines *e = rcu_dereference(ctx->engines); 199 if (unlikely(!e)) /* context was closed! */ 200 ce = ERR_PTR(-ENOENT); 201 else if (likely(idx < e->num_engines && e->engines[idx])) 202 ce = intel_context_get(e->engines[idx]); 203 else 204 ce = ERR_PTR(-EINVAL); 205 } rcu_read_unlock(); 206 207 return ce; 208 } 209 210 static inline void 211 i915_gem_engines_iter_init(struct i915_gem_engines_iter *it, 212 struct i915_gem_engines *engines) 213 { 214 it->engines = engines; 215 it->idx = 0; 216 } 217 218 struct intel_context * 219 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it); 220 221 #define for_each_gem_engine(ce, engines, it) \ 222 for (i915_gem_engines_iter_init(&(it), (engines)); \ 223 ((ce) = i915_gem_engines_iter_next(&(it)));) 224 225 struct i915_lut_handle *i915_lut_handle_alloc(void); 226 void i915_lut_handle_free(struct i915_lut_handle *lut); 227 228 int i915_gem_user_to_context_sseu(struct drm_i915_private *i915, 229 const struct drm_i915_gem_context_param_sseu *user, 230 struct intel_sseu *context); 231 232 #endif /* !__I915_GEM_CONTEXT_H__ */ 233