1 /* 2 * Copyright © 2017 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/sched/mm.h> 26 27 #include "display/intel_frontbuffer.h" 28 #include "gt/intel_gt.h" 29 #include "i915_drv.h" 30 #include "i915_gem_clflush.h" 31 #include "i915_gem_context.h" 32 #include "i915_gem_mman.h" 33 #include "i915_gem_object.h" 34 #include "i915_globals.h" 35 #include "i915_trace.h" 36 37 static struct i915_global_object { 38 struct i915_global base; 39 struct kmem_cache *slab_objects; 40 } global; 41 42 struct drm_i915_gem_object *i915_gem_object_alloc(void) 43 { 44 return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL); 45 } 46 47 void i915_gem_object_free(struct drm_i915_gem_object *obj) 48 { 49 return kmem_cache_free(global.slab_objects, obj); 50 } 51 52 void i915_gem_object_init(struct drm_i915_gem_object *obj, 53 const struct drm_i915_gem_object_ops *ops, 54 struct lock_class_key *key) 55 { 56 __mutex_init(&obj->mm.lock, ops->name ?: "obj->mm.lock", key); 57 58 spin_lock_init(&obj->vma.lock); 59 INIT_LIST_HEAD(&obj->vma.list); 60 61 INIT_LIST_HEAD(&obj->mm.link); 62 63 INIT_LIST_HEAD(&obj->lut_list); 64 65 spin_lock_init(&obj->mmo.lock); 66 obj->mmo.offsets = RB_ROOT; 67 68 init_rcu_head(&obj->rcu); 69 70 obj->ops = ops; 71 72 obj->mm.madv = I915_MADV_WILLNEED; 73 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); 74 mutex_init(&obj->mm.get_page.lock); 75 76 if (IS_ENABLED(CONFIG_LOCKDEP) && i915_gem_object_is_shrinkable(obj)) 77 i915_gem_shrinker_taints_mutex(to_i915(obj->base.dev), 78 &obj->mm.lock); 79 } 80 81 /** 82 * Mark up the object's coherency levels for a given cache_level 83 * @obj: #drm_i915_gem_object 84 * @cache_level: cache level 85 */ 86 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, 87 unsigned int cache_level) 88 { 89 obj->cache_level = cache_level; 90 91 if (cache_level != I915_CACHE_NONE) 92 obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ | 93 I915_BO_CACHE_COHERENT_FOR_WRITE); 94 else if (HAS_LLC(to_i915(obj->base.dev))) 95 obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ; 96 else 97 obj->cache_coherent = 0; 98 99 obj->cache_dirty = 100 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE); 101 } 102 103 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) 104 { 105 struct drm_i915_gem_object *obj = to_intel_bo(gem); 106 struct drm_i915_file_private *fpriv = file->driver_priv; 107 struct i915_mmap_offset *mmo, *mn; 108 struct i915_lut_handle *lut, *ln; 109 LIST_HEAD(close); 110 111 i915_gem_object_lock(obj); 112 list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) { 113 struct i915_gem_context *ctx = lut->ctx; 114 115 if (ctx->file_priv != fpriv) 116 continue; 117 118 i915_gem_context_get(ctx); 119 list_move(&lut->obj_link, &close); 120 } 121 i915_gem_object_unlock(obj); 122 123 spin_lock(&obj->mmo.lock); 124 rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) 125 drm_vma_node_revoke(&mmo->vma_node, file); 126 spin_unlock(&obj->mmo.lock); 127 128 list_for_each_entry_safe(lut, ln, &close, obj_link) { 129 struct i915_gem_context *ctx = lut->ctx; 130 struct i915_vma *vma; 131 132 /* 133 * We allow the process to have multiple handles to the same 134 * vma, in the same fd namespace, by virtue of flink/open. 135 */ 136 137 mutex_lock(&ctx->mutex); 138 vma = radix_tree_delete(&ctx->handles_vma, lut->handle); 139 if (vma) { 140 GEM_BUG_ON(vma->obj != obj); 141 GEM_BUG_ON(!atomic_read(&vma->open_count)); 142 i915_vma_close(vma); 143 } 144 mutex_unlock(&ctx->mutex); 145 146 i915_gem_context_put(lut->ctx); 147 i915_lut_handle_free(lut); 148 i915_gem_object_put(obj); 149 } 150 } 151 152 static void __i915_gem_free_object_rcu(struct rcu_head *head) 153 { 154 struct drm_i915_gem_object *obj = 155 container_of(head, typeof(*obj), rcu); 156 struct drm_i915_private *i915 = to_i915(obj->base.dev); 157 158 dma_resv_fini(&obj->base._resv); 159 i915_gem_object_free(obj); 160 161 GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); 162 atomic_dec(&i915->mm.free_count); 163 } 164 165 static void __i915_gem_free_objects(struct drm_i915_private *i915, 166 struct llist_node *freed) 167 { 168 struct drm_i915_gem_object *obj, *on; 169 170 llist_for_each_entry_safe(obj, on, freed, freed) { 171 struct i915_mmap_offset *mmo, *mn; 172 173 trace_i915_gem_object_destroy(obj); 174 175 if (!list_empty(&obj->vma.list)) { 176 struct i915_vma *vma; 177 178 /* 179 * Note that the vma keeps an object reference while 180 * it is active, so it *should* not sleep while we 181 * destroy it. Our debug code errs insits it *might*. 182 * For the moment, play along. 183 */ 184 spin_lock(&obj->vma.lock); 185 while ((vma = list_first_entry_or_null(&obj->vma.list, 186 struct i915_vma, 187 obj_link))) { 188 GEM_BUG_ON(vma->obj != obj); 189 spin_unlock(&obj->vma.lock); 190 191 __i915_vma_put(vma); 192 193 spin_lock(&obj->vma.lock); 194 } 195 spin_unlock(&obj->vma.lock); 196 } 197 198 i915_gem_object_release_mmap(obj); 199 200 rbtree_postorder_for_each_entry_safe(mmo, mn, 201 &obj->mmo.offsets, 202 offset) { 203 drm_vma_offset_remove(obj->base.dev->vma_offset_manager, 204 &mmo->vma_node); 205 kfree(mmo); 206 } 207 obj->mmo.offsets = RB_ROOT; 208 209 GEM_BUG_ON(obj->userfault_count); 210 GEM_BUG_ON(!list_empty(&obj->lut_list)); 211 212 atomic_set(&obj->mm.pages_pin_count, 0); 213 __i915_gem_object_put_pages(obj); 214 GEM_BUG_ON(i915_gem_object_has_pages(obj)); 215 bitmap_free(obj->bit_17); 216 217 if (obj->base.import_attach) 218 drm_prime_gem_destroy(&obj->base, NULL); 219 220 drm_gem_free_mmap_offset(&obj->base); 221 222 if (obj->ops->release) 223 obj->ops->release(obj); 224 225 /* But keep the pointer alive for RCU-protected lookups */ 226 call_rcu(&obj->rcu, __i915_gem_free_object_rcu); 227 cond_resched(); 228 } 229 } 230 231 void i915_gem_flush_free_objects(struct drm_i915_private *i915) 232 { 233 struct llist_node *freed = llist_del_all(&i915->mm.free_list); 234 235 if (unlikely(freed)) 236 __i915_gem_free_objects(i915, freed); 237 } 238 239 static void __i915_gem_free_work(struct work_struct *work) 240 { 241 struct drm_i915_private *i915 = 242 container_of(work, struct drm_i915_private, mm.free_work); 243 244 i915_gem_flush_free_objects(i915); 245 } 246 247 void i915_gem_free_object(struct drm_gem_object *gem_obj) 248 { 249 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 250 struct drm_i915_private *i915 = to_i915(obj->base.dev); 251 252 GEM_BUG_ON(i915_gem_object_is_framebuffer(obj)); 253 254 /* 255 * Before we free the object, make sure any pure RCU-only 256 * read-side critical sections are complete, e.g. 257 * i915_gem_busy_ioctl(). For the corresponding synchronized 258 * lookup see i915_gem_object_lookup_rcu(). 259 */ 260 atomic_inc(&i915->mm.free_count); 261 262 /* 263 * This serializes freeing with the shrinker. Since the free 264 * is delayed, first by RCU then by the workqueue, we want the 265 * shrinker to be able to free pages of unreferenced objects, 266 * or else we may oom whilst there are plenty of deferred 267 * freed objects. 268 */ 269 i915_gem_object_make_unshrinkable(obj); 270 271 /* 272 * Since we require blocking on struct_mutex to unbind the freed 273 * object from the GPU before releasing resources back to the 274 * system, we can not do that directly from the RCU callback (which may 275 * be a softirq context), but must instead then defer that work onto a 276 * kthread. We use the RCU callback rather than move the freed object 277 * directly onto the work queue so that we can mix between using the 278 * worker and performing frees directly from subsequent allocations for 279 * crude but effective memory throttling. 280 */ 281 if (llist_add(&obj->freed, &i915->mm.free_list)) 282 queue_work(i915->wq, &i915->mm.free_work); 283 } 284 285 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) 286 { 287 return !(obj->cache_level == I915_CACHE_NONE || 288 obj->cache_level == I915_CACHE_WT); 289 } 290 291 void 292 i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, 293 unsigned int flush_domains) 294 { 295 struct i915_vma *vma; 296 297 assert_object_held(obj); 298 299 if (!(obj->write_domain & flush_domains)) 300 return; 301 302 switch (obj->write_domain) { 303 case I915_GEM_DOMAIN_GTT: 304 spin_lock(&obj->vma.lock); 305 for_each_ggtt_vma(vma, obj) { 306 if (i915_vma_unset_ggtt_write(vma)) 307 intel_gt_flush_ggtt_writes(vma->vm->gt); 308 } 309 spin_unlock(&obj->vma.lock); 310 311 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); 312 break; 313 314 case I915_GEM_DOMAIN_WC: 315 wmb(); 316 break; 317 318 case I915_GEM_DOMAIN_CPU: 319 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); 320 break; 321 322 case I915_GEM_DOMAIN_RENDER: 323 if (gpu_write_needs_clflush(obj)) 324 obj->cache_dirty = true; 325 break; 326 } 327 328 obj->write_domain = 0; 329 } 330 331 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, 332 enum fb_op_origin origin) 333 { 334 struct intel_frontbuffer *front; 335 336 front = __intel_frontbuffer_get(obj); 337 if (front) { 338 intel_frontbuffer_flush(front, origin); 339 intel_frontbuffer_put(front); 340 } 341 } 342 343 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, 344 enum fb_op_origin origin) 345 { 346 struct intel_frontbuffer *front; 347 348 front = __intel_frontbuffer_get(obj); 349 if (front) { 350 intel_frontbuffer_invalidate(front, origin); 351 intel_frontbuffer_put(front); 352 } 353 } 354 355 void i915_gem_init__objects(struct drm_i915_private *i915) 356 { 357 INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); 358 } 359 360 static void i915_global_objects_shrink(void) 361 { 362 kmem_cache_shrink(global.slab_objects); 363 } 364 365 static void i915_global_objects_exit(void) 366 { 367 kmem_cache_destroy(global.slab_objects); 368 } 369 370 static struct i915_global_object global = { { 371 .shrink = i915_global_objects_shrink, 372 .exit = i915_global_objects_exit, 373 } }; 374 375 int __init i915_global_objects_init(void) 376 { 377 global.slab_objects = 378 KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); 379 if (!global.slab_objects) 380 return -ENOMEM; 381 382 i915_global_register(&global.base); 383 return 0; 384 } 385 386 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 387 #include "selftests/huge_gem_object.c" 388 #include "selftests/huge_pages.c" 389 #include "selftests/i915_gem_object.c" 390 #include "selftests/i915_gem_coherency.c" 391 #endif 392