1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2011-2012 Intel Corporation 5 */ 6 7 /* 8 * This file implements HW context support. On gen5+ a HW context consists of an 9 * opaque GPU object which is referenced at times of context saves and restores. 10 * With RC6 enabled, the context is also referenced as the GPU enters and exists 11 * from RC6 (GPU has it's own internal power context, except on gen5). Though 12 * something like a context does exist for the media ring, the code only 13 * supports contexts for the render ring. 14 * 15 * In software, there is a distinction between contexts created by the user, 16 * and the default HW context. The default HW context is used by GPU clients 17 * that do not request setup of their own hardware context. The default 18 * context's state is never restored to help prevent programming errors. This 19 * would happen if a client ran and piggy-backed off another clients GPU state. 20 * The default context only exists to give the GPU some offset to load as the 21 * current to invoke a save of the context we actually care about. In fact, the 22 * code could likely be constructed, albeit in a more complicated fashion, to 23 * never use the default context, though that limits the driver's ability to 24 * swap out, and/or destroy other contexts. 25 * 26 * All other contexts are created as a request by the GPU client. These contexts 27 * store GPU state, and thus allow GPU clients to not re-emit state (and 28 * potentially query certain state) at any time. The kernel driver makes 29 * certain that the appropriate commands are inserted. 30 * 31 * The context life cycle is semi-complicated in that context BOs may live 32 * longer than the context itself because of the way the hardware, and object 33 * tracking works. Below is a very crude representation of the state machine 34 * describing the context life. 35 * refcount pincount active 36 * S0: initial state 0 0 0 37 * S1: context created 1 0 0 38 * S2: context is currently running 2 1 X 39 * S3: GPU referenced, but not current 2 0 1 40 * S4: context is current, but destroyed 1 1 0 41 * S5: like S3, but destroyed 1 0 1 42 * 43 * The most common (but not all) transitions: 44 * S0->S1: client creates a context 45 * S1->S2: client submits execbuf with context 46 * S2->S3: other clients submits execbuf with context 47 * S3->S1: context object was retired 48 * S3->S2: clients submits another execbuf 49 * S2->S4: context destroy called with current context 50 * S3->S5->S0: destroy path 51 * S4->S5->S0: destroy path on current context 52 * 53 * There are two confusing terms used above: 54 * The "current context" means the context which is currently running on the 55 * GPU. The GPU has loaded its state already and has stored away the gtt 56 * offset of the BO. The GPU is not actively referencing the data at this 57 * offset, but it will on the next context switch. The only way to avoid this 58 * is to do a GPU reset. 59 * 60 * An "active context' is one which was previously the "current context" and is 61 * on the active list waiting for the next context switch to occur. Until this 62 * happens, the object must remain at the same gtt offset. It is therefore 63 * possible to destroy a context, but it is still active. 64 * 65 */ 66 67 #include <linux/log2.h> 68 #include <linux/nospec.h> 69 70 #include <drm/i915_drm.h> 71 72 #include "gt/gen6_ppgtt.h" 73 #include "gt/intel_context.h" 74 #include "gt/intel_engine_heartbeat.h" 75 #include "gt/intel_engine_user.h" 76 #include "gt/intel_ring.h" 77 78 #include "i915_gem_context.h" 79 #include "i915_globals.h" 80 #include "i915_trace.h" 81 #include "i915_user_extensions.h" 82 83 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 84 85 static struct i915_global_gem_context { 86 struct i915_global base; 87 struct kmem_cache *slab_luts; 88 } global; 89 90 struct i915_lut_handle *i915_lut_handle_alloc(void) 91 { 92 return kmem_cache_alloc(global.slab_luts, GFP_KERNEL); 93 } 94 95 void i915_lut_handle_free(struct i915_lut_handle *lut) 96 { 97 return kmem_cache_free(global.slab_luts, lut); 98 } 99 100 static void lut_close(struct i915_gem_context *ctx) 101 { 102 struct radix_tree_iter iter; 103 void __rcu **slot; 104 105 lockdep_assert_held(&ctx->mutex); 106 107 rcu_read_lock(); 108 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { 109 struct i915_vma *vma = rcu_dereference_raw(*slot); 110 struct drm_i915_gem_object *obj = vma->obj; 111 struct i915_lut_handle *lut; 112 113 if (!kref_get_unless_zero(&obj->base.refcount)) 114 continue; 115 116 rcu_read_unlock(); 117 i915_gem_object_lock(obj); 118 list_for_each_entry(lut, &obj->lut_list, obj_link) { 119 if (lut->ctx != ctx) 120 continue; 121 122 if (lut->handle != iter.index) 123 continue; 124 125 list_del(&lut->obj_link); 126 break; 127 } 128 i915_gem_object_unlock(obj); 129 rcu_read_lock(); 130 131 if (&lut->obj_link != &obj->lut_list) { 132 i915_lut_handle_free(lut); 133 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); 134 if (atomic_dec_and_test(&vma->open_count) && 135 !i915_vma_is_ggtt(vma)) 136 i915_vma_close(vma); 137 i915_gem_object_put(obj); 138 } 139 140 i915_gem_object_put(obj); 141 } 142 rcu_read_unlock(); 143 } 144 145 static struct intel_context * 146 lookup_user_engine(struct i915_gem_context *ctx, 147 unsigned long flags, 148 const struct i915_engine_class_instance *ci) 149 #define LOOKUP_USER_INDEX BIT(0) 150 { 151 int idx; 152 153 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx)) 154 return ERR_PTR(-EINVAL); 155 156 if (!i915_gem_context_user_engines(ctx)) { 157 struct intel_engine_cs *engine; 158 159 engine = intel_engine_lookup_user(ctx->i915, 160 ci->engine_class, 161 ci->engine_instance); 162 if (!engine) 163 return ERR_PTR(-EINVAL); 164 165 idx = engine->legacy_idx; 166 } else { 167 idx = ci->engine_instance; 168 } 169 170 return i915_gem_context_get_engine(ctx, idx); 171 } 172 173 static struct i915_address_space * 174 context_get_vm_rcu(struct i915_gem_context *ctx) 175 { 176 GEM_BUG_ON(!rcu_access_pointer(ctx->vm)); 177 178 do { 179 struct i915_address_space *vm; 180 181 /* 182 * We do not allow downgrading from full-ppgtt [to a shared 183 * global gtt], so ctx->vm cannot become NULL. 184 */ 185 vm = rcu_dereference(ctx->vm); 186 if (!kref_get_unless_zero(&vm->ref)) 187 continue; 188 189 /* 190 * This ppgtt may have be reallocated between 191 * the read and the kref, and reassigned to a third 192 * context. In order to avoid inadvertent sharing 193 * of this ppgtt with that third context (and not 194 * src), we have to confirm that we have the same 195 * ppgtt after passing through the strong memory 196 * barrier implied by a successful 197 * kref_get_unless_zero(). 198 * 199 * Once we have acquired the current ppgtt of ctx, 200 * we no longer care if it is released from ctx, as 201 * it cannot be reallocated elsewhere. 202 */ 203 204 if (vm == rcu_access_pointer(ctx->vm)) 205 return rcu_pointer_handoff(vm); 206 207 i915_vm_put(vm); 208 } while (1); 209 } 210 211 static void intel_context_set_gem(struct intel_context *ce, 212 struct i915_gem_context *ctx) 213 { 214 GEM_BUG_ON(rcu_access_pointer(ce->gem_context)); 215 RCU_INIT_POINTER(ce->gem_context, ctx); 216 217 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) 218 ce->ring = __intel_context_ring_size(SZ_16K); 219 220 if (rcu_access_pointer(ctx->vm)) { 221 struct i915_address_space *vm; 222 223 rcu_read_lock(); 224 vm = context_get_vm_rcu(ctx); /* hmm */ 225 rcu_read_unlock(); 226 227 i915_vm_put(ce->vm); 228 ce->vm = vm; 229 } 230 231 GEM_BUG_ON(ce->timeline); 232 if (ctx->timeline) 233 ce->timeline = intel_timeline_get(ctx->timeline); 234 235 if (ctx->sched.priority >= I915_PRIORITY_NORMAL && 236 intel_engine_has_semaphores(ce->engine)) 237 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); 238 } 239 240 static void __free_engines(struct i915_gem_engines *e, unsigned int count) 241 { 242 while (count--) { 243 if (!e->engines[count]) 244 continue; 245 246 RCU_INIT_POINTER(e->engines[count]->gem_context, NULL); 247 intel_context_put(e->engines[count]); 248 } 249 kfree(e); 250 } 251 252 static void free_engines(struct i915_gem_engines *e) 253 { 254 __free_engines(e, e->num_engines); 255 } 256 257 static void free_engines_rcu(struct rcu_head *rcu) 258 { 259 free_engines(container_of(rcu, struct i915_gem_engines, rcu)); 260 } 261 262 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) 263 { 264 const struct intel_gt *gt = &ctx->i915->gt; 265 struct intel_engine_cs *engine; 266 struct i915_gem_engines *e; 267 enum intel_engine_id id; 268 269 e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL); 270 if (!e) 271 return ERR_PTR(-ENOMEM); 272 273 e->ctx = ctx; 274 275 for_each_engine(engine, gt, id) { 276 struct intel_context *ce; 277 278 if (engine->legacy_idx == INVALID_ENGINE) 279 continue; 280 281 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES); 282 GEM_BUG_ON(e->engines[engine->legacy_idx]); 283 284 ce = intel_context_create(engine); 285 if (IS_ERR(ce)) { 286 __free_engines(e, e->num_engines + 1); 287 return ERR_CAST(ce); 288 } 289 290 intel_context_set_gem(ce, ctx); 291 292 e->engines[engine->legacy_idx] = ce; 293 e->num_engines = max(e->num_engines, engine->legacy_idx); 294 } 295 e->num_engines++; 296 297 return e; 298 } 299 300 static void i915_gem_context_free(struct i915_gem_context *ctx) 301 { 302 GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 303 304 spin_lock(&ctx->i915->gem.contexts.lock); 305 list_del(&ctx->link); 306 spin_unlock(&ctx->i915->gem.contexts.lock); 307 308 free_engines(rcu_access_pointer(ctx->engines)); 309 mutex_destroy(&ctx->engines_mutex); 310 311 if (ctx->timeline) 312 intel_timeline_put(ctx->timeline); 313 314 put_pid(ctx->pid); 315 mutex_destroy(&ctx->mutex); 316 317 kfree_rcu(ctx, rcu); 318 } 319 320 static void contexts_free_all(struct llist_node *list) 321 { 322 struct i915_gem_context *ctx, *cn; 323 324 llist_for_each_entry_safe(ctx, cn, list, free_link) 325 i915_gem_context_free(ctx); 326 } 327 328 static void contexts_flush_free(struct i915_gem_contexts *gc) 329 { 330 contexts_free_all(llist_del_all(&gc->free_list)); 331 } 332 333 static void contexts_free_worker(struct work_struct *work) 334 { 335 struct i915_gem_contexts *gc = 336 container_of(work, typeof(*gc), free_work); 337 338 contexts_flush_free(gc); 339 } 340 341 void i915_gem_context_release(struct kref *ref) 342 { 343 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); 344 struct i915_gem_contexts *gc = &ctx->i915->gem.contexts; 345 346 trace_i915_context_free(ctx); 347 if (llist_add(&ctx->free_link, &gc->free_list)) 348 schedule_work(&gc->free_work); 349 } 350 351 static inline struct i915_gem_engines * 352 __context_engines_static(const struct i915_gem_context *ctx) 353 { 354 return rcu_dereference_protected(ctx->engines, true); 355 } 356 357 static bool __reset_engine(struct intel_engine_cs *engine) 358 { 359 struct intel_gt *gt = engine->gt; 360 bool success = false; 361 362 if (!intel_has_reset_engine(gt)) 363 return false; 364 365 if (!test_and_set_bit(I915_RESET_ENGINE + engine->id, 366 >->reset.flags)) { 367 success = intel_engine_reset(engine, NULL) == 0; 368 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, 369 >->reset.flags); 370 } 371 372 return success; 373 } 374 375 static void __reset_context(struct i915_gem_context *ctx, 376 struct intel_engine_cs *engine) 377 { 378 intel_gt_handle_error(engine->gt, engine->mask, 0, 379 "context closure in %s", ctx->name); 380 } 381 382 static bool __cancel_engine(struct intel_engine_cs *engine) 383 { 384 /* 385 * Send a "high priority pulse" down the engine to cause the 386 * current request to be momentarily preempted. (If it fails to 387 * be preempted, it will be reset). As we have marked our context 388 * as banned, any incomplete request, including any running, will 389 * be skipped following the preemption. 390 * 391 * If there is no hangchecking (one of the reasons why we try to 392 * cancel the context) and no forced preemption, there may be no 393 * means by which we reset the GPU and evict the persistent hog. 394 * Ergo if we are unable to inject a preemptive pulse that can 395 * kill the banned context, we fallback to doing a local reset 396 * instead. 397 */ 398 if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) && 399 !intel_engine_pulse(engine)) 400 return true; 401 402 /* If we are unable to send a pulse, try resetting this engine. */ 403 return __reset_engine(engine); 404 } 405 406 static struct intel_engine_cs *__active_engine(struct i915_request *rq) 407 { 408 struct intel_engine_cs *engine, *locked; 409 410 /* 411 * Serialise with __i915_request_submit() so that it sees 412 * is-banned?, or we know the request is already inflight. 413 */ 414 locked = READ_ONCE(rq->engine); 415 spin_lock_irq(&locked->active.lock); 416 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) { 417 spin_unlock(&locked->active.lock); 418 spin_lock(&engine->active.lock); 419 locked = engine; 420 } 421 422 engine = NULL; 423 if (i915_request_is_active(rq) && rq->fence.error != -EIO) 424 engine = rq->engine; 425 426 spin_unlock_irq(&locked->active.lock); 427 428 return engine; 429 } 430 431 static struct intel_engine_cs *active_engine(struct intel_context *ce) 432 { 433 struct intel_engine_cs *engine = NULL; 434 struct i915_request *rq; 435 436 if (!ce->timeline) 437 return NULL; 438 439 mutex_lock(&ce->timeline->mutex); 440 list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { 441 if (i915_request_completed(rq)) 442 break; 443 444 /* Check with the backend if the request is inflight */ 445 engine = __active_engine(rq); 446 if (engine) 447 break; 448 } 449 mutex_unlock(&ce->timeline->mutex); 450 451 return engine; 452 } 453 454 static void kill_engines(struct i915_gem_engines *engines) 455 { 456 struct i915_gem_engines_iter it; 457 struct intel_context *ce; 458 459 /* 460 * Map the user's engine back to the actual engines; one virtual 461 * engine will be mapped to multiple engines, and using ctx->engine[] 462 * the same engine may be have multiple instances in the user's map. 463 * However, we only care about pending requests, so only include 464 * engines on which there are incomplete requests. 465 */ 466 for_each_gem_engine(ce, engines, it) { 467 struct intel_engine_cs *engine; 468 469 if (intel_context_set_banned(ce)) 470 continue; 471 472 /* 473 * Check the current active state of this context; if we 474 * are currently executing on the GPU we need to evict 475 * ourselves. On the other hand, if we haven't yet been 476 * submitted to the GPU or if everything is complete, 477 * we have nothing to do. 478 */ 479 engine = active_engine(ce); 480 481 /* First attempt to gracefully cancel the context */ 482 if (engine && !__cancel_engine(engine)) 483 /* 484 * If we are unable to send a preemptive pulse to bump 485 * the context from the GPU, we have to resort to a full 486 * reset. We hope the collateral damage is worth it. 487 */ 488 __reset_context(engines->ctx, engine); 489 } 490 } 491 492 static void kill_stale_engines(struct i915_gem_context *ctx) 493 { 494 struct i915_gem_engines *pos, *next; 495 unsigned long flags; 496 497 spin_lock_irqsave(&ctx->stale.lock, flags); 498 list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) { 499 if (!i915_sw_fence_await(&pos->fence)) 500 continue; 501 502 spin_unlock_irqrestore(&ctx->stale.lock, flags); 503 504 kill_engines(pos); 505 506 spin_lock_irqsave(&ctx->stale.lock, flags); 507 list_safe_reset_next(pos, next, link); 508 list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */ 509 510 i915_sw_fence_complete(&pos->fence); 511 } 512 spin_unlock_irqrestore(&ctx->stale.lock, flags); 513 } 514 515 static void kill_context(struct i915_gem_context *ctx) 516 { 517 kill_stale_engines(ctx); 518 kill_engines(__context_engines_static(ctx)); 519 } 520 521 static void set_closed_name(struct i915_gem_context *ctx) 522 { 523 char *s; 524 525 /* Replace '[]' with '<>' to indicate closed in debug prints */ 526 527 s = strrchr(ctx->name, '['); 528 if (!s) 529 return; 530 531 *s = '<'; 532 533 s = strchr(s + 1, ']'); 534 if (s) 535 *s = '>'; 536 } 537 538 static void context_close(struct i915_gem_context *ctx) 539 { 540 struct i915_address_space *vm; 541 542 i915_gem_context_set_closed(ctx); 543 set_closed_name(ctx); 544 545 mutex_lock(&ctx->mutex); 546 547 vm = i915_gem_context_vm(ctx); 548 if (vm) 549 i915_vm_close(vm); 550 551 ctx->file_priv = ERR_PTR(-EBADF); 552 553 /* 554 * The LUT uses the VMA as a backpointer to unref the object, 555 * so we need to clear the LUT before we close all the VMA (inside 556 * the ppgtt). 557 */ 558 lut_close(ctx); 559 560 mutex_unlock(&ctx->mutex); 561 562 /* 563 * If the user has disabled hangchecking, we can not be sure that 564 * the batches will ever complete after the context is closed, 565 * keeping the context and all resources pinned forever. So in this 566 * case we opt to forcibly kill off all remaining requests on 567 * context close. 568 */ 569 if (!i915_gem_context_is_persistent(ctx) || 570 !i915_modparams.enable_hangcheck) 571 kill_context(ctx); 572 573 i915_gem_context_put(ctx); 574 } 575 576 static int __context_set_persistence(struct i915_gem_context *ctx, bool state) 577 { 578 if (i915_gem_context_is_persistent(ctx) == state) 579 return 0; 580 581 if (state) { 582 /* 583 * Only contexts that are short-lived [that will expire or be 584 * reset] are allowed to survive past termination. We require 585 * hangcheck to ensure that the persistent requests are healthy. 586 */ 587 if (!i915_modparams.enable_hangcheck) 588 return -EINVAL; 589 590 i915_gem_context_set_persistence(ctx); 591 } else { 592 /* To cancel a context we use "preempt-to-idle" */ 593 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) 594 return -ENODEV; 595 596 /* 597 * If the cancel fails, we then need to reset, cleanly! 598 * 599 * If the per-engine reset fails, all hope is lost! We resort 600 * to a full GPU reset in that unlikely case, but realistically 601 * if the engine could not reset, the full reset does not fare 602 * much better. The damage has been done. 603 * 604 * However, if we cannot reset an engine by itself, we cannot 605 * cleanup a hanging persistent context without causing 606 * colateral damage, and we should not pretend we can by 607 * exposing the interface. 608 */ 609 if (!intel_has_reset_engine(&ctx->i915->gt)) 610 return -ENODEV; 611 612 i915_gem_context_clear_persistence(ctx); 613 } 614 615 return 0; 616 } 617 618 static struct i915_gem_context * 619 __create_context(struct drm_i915_private *i915) 620 { 621 struct i915_gem_context *ctx; 622 struct i915_gem_engines *e; 623 int err; 624 int i; 625 626 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 627 if (!ctx) 628 return ERR_PTR(-ENOMEM); 629 630 kref_init(&ctx->ref); 631 ctx->i915 = i915; 632 ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); 633 mutex_init(&ctx->mutex); 634 635 spin_lock_init(&ctx->stale.lock); 636 INIT_LIST_HEAD(&ctx->stale.engines); 637 638 mutex_init(&ctx->engines_mutex); 639 e = default_engines(ctx); 640 if (IS_ERR(e)) { 641 err = PTR_ERR(e); 642 goto err_free; 643 } 644 RCU_INIT_POINTER(ctx->engines, e); 645 646 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); 647 648 /* NB: Mark all slices as needing a remap so that when the context first 649 * loads it will restore whatever remap state already exists. If there 650 * is no remap info, it will be a NOP. */ 651 ctx->remap_slice = ALL_L3_SLICES(i915); 652 653 i915_gem_context_set_bannable(ctx); 654 i915_gem_context_set_recoverable(ctx); 655 __context_set_persistence(ctx, true /* cgroup hook? */); 656 657 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) 658 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; 659 660 spin_lock(&i915->gem.contexts.lock); 661 list_add_tail(&ctx->link, &i915->gem.contexts.list); 662 spin_unlock(&i915->gem.contexts.lock); 663 664 return ctx; 665 666 err_free: 667 kfree(ctx); 668 return ERR_PTR(err); 669 } 670 671 static void 672 context_apply_all(struct i915_gem_context *ctx, 673 void (*fn)(struct intel_context *ce, void *data), 674 void *data) 675 { 676 struct i915_gem_engines_iter it; 677 struct intel_context *ce; 678 679 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) 680 fn(ce, data); 681 i915_gem_context_unlock_engines(ctx); 682 } 683 684 static void __apply_ppgtt(struct intel_context *ce, void *vm) 685 { 686 i915_vm_put(ce->vm); 687 ce->vm = i915_vm_get(vm); 688 } 689 690 static struct i915_address_space * 691 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm) 692 { 693 struct i915_address_space *old = i915_gem_context_vm(ctx); 694 695 GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old)); 696 697 rcu_assign_pointer(ctx->vm, i915_vm_open(vm)); 698 context_apply_all(ctx, __apply_ppgtt, vm); 699 700 return old; 701 } 702 703 static void __assign_ppgtt(struct i915_gem_context *ctx, 704 struct i915_address_space *vm) 705 { 706 if (vm == rcu_access_pointer(ctx->vm)) 707 return; 708 709 vm = __set_ppgtt(ctx, vm); 710 if (vm) 711 i915_vm_close(vm); 712 } 713 714 static void __set_timeline(struct intel_timeline **dst, 715 struct intel_timeline *src) 716 { 717 struct intel_timeline *old = *dst; 718 719 *dst = src ? intel_timeline_get(src) : NULL; 720 721 if (old) 722 intel_timeline_put(old); 723 } 724 725 static void __apply_timeline(struct intel_context *ce, void *timeline) 726 { 727 __set_timeline(&ce->timeline, timeline); 728 } 729 730 static void __assign_timeline(struct i915_gem_context *ctx, 731 struct intel_timeline *timeline) 732 { 733 __set_timeline(&ctx->timeline, timeline); 734 context_apply_all(ctx, __apply_timeline, timeline); 735 } 736 737 static struct i915_gem_context * 738 i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags) 739 { 740 struct i915_gem_context *ctx; 741 742 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE && 743 !HAS_EXECLISTS(i915)) 744 return ERR_PTR(-EINVAL); 745 746 /* Reap the stale contexts */ 747 contexts_flush_free(&i915->gem.contexts); 748 749 ctx = __create_context(i915); 750 if (IS_ERR(ctx)) 751 return ctx; 752 753 if (HAS_FULL_PPGTT(i915)) { 754 struct i915_ppgtt *ppgtt; 755 756 ppgtt = i915_ppgtt_create(&i915->gt); 757 if (IS_ERR(ppgtt)) { 758 drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n", 759 PTR_ERR(ppgtt)); 760 context_close(ctx); 761 return ERR_CAST(ppgtt); 762 } 763 764 mutex_lock(&ctx->mutex); 765 __assign_ppgtt(ctx, &ppgtt->vm); 766 mutex_unlock(&ctx->mutex); 767 768 i915_vm_put(&ppgtt->vm); 769 } 770 771 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { 772 struct intel_timeline *timeline; 773 774 timeline = intel_timeline_create(&i915->gt, NULL); 775 if (IS_ERR(timeline)) { 776 context_close(ctx); 777 return ERR_CAST(timeline); 778 } 779 780 __assign_timeline(ctx, timeline); 781 intel_timeline_put(timeline); 782 } 783 784 trace_i915_context_create(ctx); 785 786 return ctx; 787 } 788 789 static void init_contexts(struct i915_gem_contexts *gc) 790 { 791 spin_lock_init(&gc->lock); 792 INIT_LIST_HEAD(&gc->list); 793 794 INIT_WORK(&gc->free_work, contexts_free_worker); 795 init_llist_head(&gc->free_list); 796 } 797 798 void i915_gem_init__contexts(struct drm_i915_private *i915) 799 { 800 init_contexts(&i915->gem.contexts); 801 drm_dbg(&i915->drm, "%s context support initialized\n", 802 DRIVER_CAPS(i915)->has_logical_contexts ? 803 "logical" : "fake"); 804 } 805 806 void i915_gem_driver_release__contexts(struct drm_i915_private *i915) 807 { 808 flush_work(&i915->gem.contexts.free_work); 809 } 810 811 static int gem_context_register(struct i915_gem_context *ctx, 812 struct drm_i915_file_private *fpriv, 813 u32 *id) 814 { 815 struct i915_address_space *vm; 816 int ret; 817 818 ctx->file_priv = fpriv; 819 820 mutex_lock(&ctx->mutex); 821 vm = i915_gem_context_vm(ctx); 822 if (vm) 823 WRITE_ONCE(vm->file, fpriv); /* XXX */ 824 mutex_unlock(&ctx->mutex); 825 826 ctx->pid = get_task_pid(current, PIDTYPE_PID); 827 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]", 828 current->comm, pid_nr(ctx->pid)); 829 830 /* And finally expose ourselves to userspace via the idr */ 831 ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL); 832 if (ret) 833 put_pid(fetch_and_zero(&ctx->pid)); 834 835 return ret; 836 } 837 838 int i915_gem_context_open(struct drm_i915_private *i915, 839 struct drm_file *file) 840 { 841 struct drm_i915_file_private *file_priv = file->driver_priv; 842 struct i915_gem_context *ctx; 843 int err; 844 u32 id; 845 846 xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC); 847 848 /* 0 reserved for invalid/unassigned ppgtt */ 849 xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1); 850 851 ctx = i915_gem_create_context(i915, 0); 852 if (IS_ERR(ctx)) { 853 err = PTR_ERR(ctx); 854 goto err; 855 } 856 857 err = gem_context_register(ctx, file_priv, &id); 858 if (err < 0) 859 goto err_ctx; 860 861 GEM_BUG_ON(id); 862 return 0; 863 864 err_ctx: 865 context_close(ctx); 866 err: 867 xa_destroy(&file_priv->vm_xa); 868 xa_destroy(&file_priv->context_xa); 869 return err; 870 } 871 872 void i915_gem_context_close(struct drm_file *file) 873 { 874 struct drm_i915_file_private *file_priv = file->driver_priv; 875 struct drm_i915_private *i915 = file_priv->dev_priv; 876 struct i915_address_space *vm; 877 struct i915_gem_context *ctx; 878 unsigned long idx; 879 880 xa_for_each(&file_priv->context_xa, idx, ctx) 881 context_close(ctx); 882 xa_destroy(&file_priv->context_xa); 883 884 xa_for_each(&file_priv->vm_xa, idx, vm) 885 i915_vm_put(vm); 886 xa_destroy(&file_priv->vm_xa); 887 888 contexts_flush_free(&i915->gem.contexts); 889 } 890 891 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, 892 struct drm_file *file) 893 { 894 struct drm_i915_private *i915 = to_i915(dev); 895 struct drm_i915_gem_vm_control *args = data; 896 struct drm_i915_file_private *file_priv = file->driver_priv; 897 struct i915_ppgtt *ppgtt; 898 u32 id; 899 int err; 900 901 if (!HAS_FULL_PPGTT(i915)) 902 return -ENODEV; 903 904 if (args->flags) 905 return -EINVAL; 906 907 ppgtt = i915_ppgtt_create(&i915->gt); 908 if (IS_ERR(ppgtt)) 909 return PTR_ERR(ppgtt); 910 911 ppgtt->vm.file = file_priv; 912 913 if (args->extensions) { 914 err = i915_user_extensions(u64_to_user_ptr(args->extensions), 915 NULL, 0, 916 ppgtt); 917 if (err) 918 goto err_put; 919 } 920 921 err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm, 922 xa_limit_32b, GFP_KERNEL); 923 if (err) 924 goto err_put; 925 926 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ 927 args->vm_id = id; 928 return 0; 929 930 err_put: 931 i915_vm_put(&ppgtt->vm); 932 return err; 933 } 934 935 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, 936 struct drm_file *file) 937 { 938 struct drm_i915_file_private *file_priv = file->driver_priv; 939 struct drm_i915_gem_vm_control *args = data; 940 struct i915_address_space *vm; 941 942 if (args->flags) 943 return -EINVAL; 944 945 if (args->extensions) 946 return -EINVAL; 947 948 vm = xa_erase(&file_priv->vm_xa, args->vm_id); 949 if (!vm) 950 return -ENOENT; 951 952 i915_vm_put(vm); 953 return 0; 954 } 955 956 struct context_barrier_task { 957 struct i915_active base; 958 void (*task)(void *data); 959 void *data; 960 }; 961 962 __i915_active_call 963 static void cb_retire(struct i915_active *base) 964 { 965 struct context_barrier_task *cb = container_of(base, typeof(*cb), base); 966 967 if (cb->task) 968 cb->task(cb->data); 969 970 i915_active_fini(&cb->base); 971 kfree(cb); 972 } 973 974 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault); 975 static int context_barrier_task(struct i915_gem_context *ctx, 976 intel_engine_mask_t engines, 977 bool (*skip)(struct intel_context *ce, void *data), 978 int (*emit)(struct i915_request *rq, void *data), 979 void (*task)(void *data), 980 void *data) 981 { 982 struct context_barrier_task *cb; 983 struct i915_gem_engines_iter it; 984 struct intel_context *ce; 985 int err = 0; 986 987 GEM_BUG_ON(!task); 988 989 cb = kmalloc(sizeof(*cb), GFP_KERNEL); 990 if (!cb) 991 return -ENOMEM; 992 993 i915_active_init(&cb->base, NULL, cb_retire); 994 err = i915_active_acquire(&cb->base); 995 if (err) { 996 kfree(cb); 997 return err; 998 } 999 1000 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 1001 struct i915_request *rq; 1002 1003 if (I915_SELFTEST_ONLY(context_barrier_inject_fault & 1004 ce->engine->mask)) { 1005 err = -ENXIO; 1006 break; 1007 } 1008 1009 if (!(ce->engine->mask & engines)) 1010 continue; 1011 1012 if (skip && skip(ce, data)) 1013 continue; 1014 1015 rq = intel_context_create_request(ce); 1016 if (IS_ERR(rq)) { 1017 err = PTR_ERR(rq); 1018 break; 1019 } 1020 1021 err = 0; 1022 if (emit) 1023 err = emit(rq, data); 1024 if (err == 0) 1025 err = i915_active_add_request(&cb->base, rq); 1026 1027 i915_request_add(rq); 1028 if (err) 1029 break; 1030 } 1031 i915_gem_context_unlock_engines(ctx); 1032 1033 cb->task = err ? NULL : task; /* caller needs to unwind instead */ 1034 cb->data = data; 1035 1036 i915_active_release(&cb->base); 1037 1038 return err; 1039 } 1040 1041 static int get_ppgtt(struct drm_i915_file_private *file_priv, 1042 struct i915_gem_context *ctx, 1043 struct drm_i915_gem_context_param *args) 1044 { 1045 struct i915_address_space *vm; 1046 int err; 1047 u32 id; 1048 1049 if (!rcu_access_pointer(ctx->vm)) 1050 return -ENODEV; 1051 1052 rcu_read_lock(); 1053 vm = context_get_vm_rcu(ctx); 1054 rcu_read_unlock(); 1055 if (!vm) 1056 return -ENODEV; 1057 1058 err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL); 1059 if (err) 1060 goto err_put; 1061 1062 i915_vm_open(vm); 1063 1064 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ 1065 args->value = id; 1066 args->size = 0; 1067 1068 err_put: 1069 i915_vm_put(vm); 1070 return err; 1071 } 1072 1073 static void set_ppgtt_barrier(void *data) 1074 { 1075 struct i915_address_space *old = data; 1076 1077 if (INTEL_GEN(old->i915) < 8) 1078 gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old)); 1079 1080 i915_vm_close(old); 1081 } 1082 1083 static int emit_ppgtt_update(struct i915_request *rq, void *data) 1084 { 1085 struct i915_address_space *vm = rq->context->vm; 1086 struct intel_engine_cs *engine = rq->engine; 1087 u32 base = engine->mmio_base; 1088 u32 *cs; 1089 int i; 1090 1091 if (i915_vm_is_4lvl(vm)) { 1092 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1093 const dma_addr_t pd_daddr = px_dma(ppgtt->pd); 1094 1095 cs = intel_ring_begin(rq, 6); 1096 if (IS_ERR(cs)) 1097 return PTR_ERR(cs); 1098 1099 *cs++ = MI_LOAD_REGISTER_IMM(2); 1100 1101 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0)); 1102 *cs++ = upper_32_bits(pd_daddr); 1103 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0)); 1104 *cs++ = lower_32_bits(pd_daddr); 1105 1106 *cs++ = MI_NOOP; 1107 intel_ring_advance(rq, cs); 1108 } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) { 1109 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1110 int err; 1111 1112 /* Magic required to prevent forcewake errors! */ 1113 err = engine->emit_flush(rq, EMIT_INVALIDATE); 1114 if (err) 1115 return err; 1116 1117 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); 1118 if (IS_ERR(cs)) 1119 return PTR_ERR(cs); 1120 1121 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; 1122 for (i = GEN8_3LVL_PDPES; i--; ) { 1123 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 1124 1125 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); 1126 *cs++ = upper_32_bits(pd_daddr); 1127 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); 1128 *cs++ = lower_32_bits(pd_daddr); 1129 } 1130 *cs++ = MI_NOOP; 1131 intel_ring_advance(rq, cs); 1132 } 1133 1134 return 0; 1135 } 1136 1137 static bool skip_ppgtt_update(struct intel_context *ce, void *data) 1138 { 1139 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) 1140 return true; 1141 1142 if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915)) 1143 return false; 1144 1145 if (!atomic_read(&ce->pin_count)) 1146 return true; 1147 1148 /* ppGTT is not part of the legacy context image */ 1149 if (gen6_ppgtt_pin(i915_vm_to_ppgtt(ce->vm))) 1150 return true; 1151 1152 return false; 1153 } 1154 1155 static int set_ppgtt(struct drm_i915_file_private *file_priv, 1156 struct i915_gem_context *ctx, 1157 struct drm_i915_gem_context_param *args) 1158 { 1159 struct i915_address_space *vm, *old; 1160 int err; 1161 1162 if (args->size) 1163 return -EINVAL; 1164 1165 if (!rcu_access_pointer(ctx->vm)) 1166 return -ENODEV; 1167 1168 if (upper_32_bits(args->value)) 1169 return -ENOENT; 1170 1171 rcu_read_lock(); 1172 vm = xa_load(&file_priv->vm_xa, args->value); 1173 if (vm && !kref_get_unless_zero(&vm->ref)) 1174 vm = NULL; 1175 rcu_read_unlock(); 1176 if (!vm) 1177 return -ENOENT; 1178 1179 err = mutex_lock_interruptible(&ctx->mutex); 1180 if (err) 1181 goto out; 1182 1183 if (i915_gem_context_is_closed(ctx)) { 1184 err = -ENOENT; 1185 goto unlock; 1186 } 1187 1188 if (vm == rcu_access_pointer(ctx->vm)) 1189 goto unlock; 1190 1191 /* Teardown the existing obj:vma cache, it will have to be rebuilt. */ 1192 lut_close(ctx); 1193 1194 old = __set_ppgtt(ctx, vm); 1195 1196 /* 1197 * We need to flush any requests using the current ppgtt before 1198 * we release it as the requests do not hold a reference themselves, 1199 * only indirectly through the context. 1200 */ 1201 err = context_barrier_task(ctx, ALL_ENGINES, 1202 skip_ppgtt_update, 1203 emit_ppgtt_update, 1204 set_ppgtt_barrier, 1205 old); 1206 if (err) { 1207 i915_vm_close(__set_ppgtt(ctx, old)); 1208 i915_vm_close(old); 1209 } 1210 1211 unlock: 1212 mutex_unlock(&ctx->mutex); 1213 out: 1214 i915_vm_put(vm); 1215 return err; 1216 } 1217 1218 static int 1219 user_to_context_sseu(struct drm_i915_private *i915, 1220 const struct drm_i915_gem_context_param_sseu *user, 1221 struct intel_sseu *context) 1222 { 1223 const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu; 1224 1225 /* No zeros in any field. */ 1226 if (!user->slice_mask || !user->subslice_mask || 1227 !user->min_eus_per_subslice || !user->max_eus_per_subslice) 1228 return -EINVAL; 1229 1230 /* Max > min. */ 1231 if (user->max_eus_per_subslice < user->min_eus_per_subslice) 1232 return -EINVAL; 1233 1234 /* 1235 * Some future proofing on the types since the uAPI is wider than the 1236 * current internal implementation. 1237 */ 1238 if (overflows_type(user->slice_mask, context->slice_mask) || 1239 overflows_type(user->subslice_mask, context->subslice_mask) || 1240 overflows_type(user->min_eus_per_subslice, 1241 context->min_eus_per_subslice) || 1242 overflows_type(user->max_eus_per_subslice, 1243 context->max_eus_per_subslice)) 1244 return -EINVAL; 1245 1246 /* Check validity against hardware. */ 1247 if (user->slice_mask & ~device->slice_mask) 1248 return -EINVAL; 1249 1250 if (user->subslice_mask & ~device->subslice_mask[0]) 1251 return -EINVAL; 1252 1253 if (user->max_eus_per_subslice > device->max_eus_per_subslice) 1254 return -EINVAL; 1255 1256 context->slice_mask = user->slice_mask; 1257 context->subslice_mask = user->subslice_mask; 1258 context->min_eus_per_subslice = user->min_eus_per_subslice; 1259 context->max_eus_per_subslice = user->max_eus_per_subslice; 1260 1261 /* Part specific restrictions. */ 1262 if (IS_GEN(i915, 11)) { 1263 unsigned int hw_s = hweight8(device->slice_mask); 1264 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]); 1265 unsigned int req_s = hweight8(context->slice_mask); 1266 unsigned int req_ss = hweight8(context->subslice_mask); 1267 1268 /* 1269 * Only full subslice enablement is possible if more than one 1270 * slice is turned on. 1271 */ 1272 if (req_s > 1 && req_ss != hw_ss_per_s) 1273 return -EINVAL; 1274 1275 /* 1276 * If more than four (SScount bitfield limit) subslices are 1277 * requested then the number has to be even. 1278 */ 1279 if (req_ss > 4 && (req_ss & 1)) 1280 return -EINVAL; 1281 1282 /* 1283 * If only one slice is enabled and subslice count is below the 1284 * device full enablement, it must be at most half of the all 1285 * available subslices. 1286 */ 1287 if (req_s == 1 && req_ss < hw_ss_per_s && 1288 req_ss > (hw_ss_per_s / 2)) 1289 return -EINVAL; 1290 1291 /* ABI restriction - VME use case only. */ 1292 1293 /* All slices or one slice only. */ 1294 if (req_s != 1 && req_s != hw_s) 1295 return -EINVAL; 1296 1297 /* 1298 * Half subslices or full enablement only when one slice is 1299 * enabled. 1300 */ 1301 if (req_s == 1 && 1302 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2))) 1303 return -EINVAL; 1304 1305 /* No EU configuration changes. */ 1306 if ((user->min_eus_per_subslice != 1307 device->max_eus_per_subslice) || 1308 (user->max_eus_per_subslice != 1309 device->max_eus_per_subslice)) 1310 return -EINVAL; 1311 } 1312 1313 return 0; 1314 } 1315 1316 static int set_sseu(struct i915_gem_context *ctx, 1317 struct drm_i915_gem_context_param *args) 1318 { 1319 struct drm_i915_private *i915 = ctx->i915; 1320 struct drm_i915_gem_context_param_sseu user_sseu; 1321 struct intel_context *ce; 1322 struct intel_sseu sseu; 1323 unsigned long lookup; 1324 int ret; 1325 1326 if (args->size < sizeof(user_sseu)) 1327 return -EINVAL; 1328 1329 if (!IS_GEN(i915, 11)) 1330 return -ENODEV; 1331 1332 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 1333 sizeof(user_sseu))) 1334 return -EFAULT; 1335 1336 if (user_sseu.rsvd) 1337 return -EINVAL; 1338 1339 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 1340 return -EINVAL; 1341 1342 lookup = 0; 1343 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 1344 lookup |= LOOKUP_USER_INDEX; 1345 1346 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 1347 if (IS_ERR(ce)) 1348 return PTR_ERR(ce); 1349 1350 /* Only render engine supports RPCS configuration. */ 1351 if (ce->engine->class != RENDER_CLASS) { 1352 ret = -ENODEV; 1353 goto out_ce; 1354 } 1355 1356 ret = user_to_context_sseu(i915, &user_sseu, &sseu); 1357 if (ret) 1358 goto out_ce; 1359 1360 ret = intel_context_reconfigure_sseu(ce, sseu); 1361 if (ret) 1362 goto out_ce; 1363 1364 args->size = sizeof(user_sseu); 1365 1366 out_ce: 1367 intel_context_put(ce); 1368 return ret; 1369 } 1370 1371 struct set_engines { 1372 struct i915_gem_context *ctx; 1373 struct i915_gem_engines *engines; 1374 }; 1375 1376 static int 1377 set_engines__load_balance(struct i915_user_extension __user *base, void *data) 1378 { 1379 struct i915_context_engines_load_balance __user *ext = 1380 container_of_user(base, typeof(*ext), base); 1381 const struct set_engines *set = data; 1382 struct drm_i915_private *i915 = set->ctx->i915; 1383 struct intel_engine_cs *stack[16]; 1384 struct intel_engine_cs **siblings; 1385 struct intel_context *ce; 1386 u16 num_siblings, idx; 1387 unsigned int n; 1388 int err; 1389 1390 if (!HAS_EXECLISTS(i915)) 1391 return -ENODEV; 1392 1393 if (intel_uc_uses_guc_submission(&i915->gt.uc)) 1394 return -ENODEV; /* not implement yet */ 1395 1396 if (get_user(idx, &ext->engine_index)) 1397 return -EFAULT; 1398 1399 if (idx >= set->engines->num_engines) { 1400 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", 1401 idx, set->engines->num_engines); 1402 return -EINVAL; 1403 } 1404 1405 idx = array_index_nospec(idx, set->engines->num_engines); 1406 if (set->engines->engines[idx]) { 1407 drm_dbg(&i915->drm, 1408 "Invalid placement[%d], already occupied\n", idx); 1409 return -EEXIST; 1410 } 1411 1412 if (get_user(num_siblings, &ext->num_siblings)) 1413 return -EFAULT; 1414 1415 err = check_user_mbz(&ext->flags); 1416 if (err) 1417 return err; 1418 1419 err = check_user_mbz(&ext->mbz64); 1420 if (err) 1421 return err; 1422 1423 siblings = stack; 1424 if (num_siblings > ARRAY_SIZE(stack)) { 1425 siblings = kmalloc_array(num_siblings, 1426 sizeof(*siblings), 1427 GFP_KERNEL); 1428 if (!siblings) 1429 return -ENOMEM; 1430 } 1431 1432 for (n = 0; n < num_siblings; n++) { 1433 struct i915_engine_class_instance ci; 1434 1435 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { 1436 err = -EFAULT; 1437 goto out_siblings; 1438 } 1439 1440 siblings[n] = intel_engine_lookup_user(i915, 1441 ci.engine_class, 1442 ci.engine_instance); 1443 if (!siblings[n]) { 1444 drm_dbg(&i915->drm, 1445 "Invalid sibling[%d]: { class:%d, inst:%d }\n", 1446 n, ci.engine_class, ci.engine_instance); 1447 err = -EINVAL; 1448 goto out_siblings; 1449 } 1450 } 1451 1452 ce = intel_execlists_create_virtual(siblings, n); 1453 if (IS_ERR(ce)) { 1454 err = PTR_ERR(ce); 1455 goto out_siblings; 1456 } 1457 1458 intel_context_set_gem(ce, set->ctx); 1459 1460 if (cmpxchg(&set->engines->engines[idx], NULL, ce)) { 1461 intel_context_put(ce); 1462 err = -EEXIST; 1463 goto out_siblings; 1464 } 1465 1466 out_siblings: 1467 if (siblings != stack) 1468 kfree(siblings); 1469 1470 return err; 1471 } 1472 1473 static int 1474 set_engines__bond(struct i915_user_extension __user *base, void *data) 1475 { 1476 struct i915_context_engines_bond __user *ext = 1477 container_of_user(base, typeof(*ext), base); 1478 const struct set_engines *set = data; 1479 struct drm_i915_private *i915 = set->ctx->i915; 1480 struct i915_engine_class_instance ci; 1481 struct intel_engine_cs *virtual; 1482 struct intel_engine_cs *master; 1483 u16 idx, num_bonds; 1484 int err, n; 1485 1486 if (get_user(idx, &ext->virtual_index)) 1487 return -EFAULT; 1488 1489 if (idx >= set->engines->num_engines) { 1490 drm_dbg(&i915->drm, 1491 "Invalid index for virtual engine: %d >= %d\n", 1492 idx, set->engines->num_engines); 1493 return -EINVAL; 1494 } 1495 1496 idx = array_index_nospec(idx, set->engines->num_engines); 1497 if (!set->engines->engines[idx]) { 1498 drm_dbg(&i915->drm, "Invalid engine at %d\n", idx); 1499 return -EINVAL; 1500 } 1501 virtual = set->engines->engines[idx]->engine; 1502 1503 err = check_user_mbz(&ext->flags); 1504 if (err) 1505 return err; 1506 1507 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { 1508 err = check_user_mbz(&ext->mbz64[n]); 1509 if (err) 1510 return err; 1511 } 1512 1513 if (copy_from_user(&ci, &ext->master, sizeof(ci))) 1514 return -EFAULT; 1515 1516 master = intel_engine_lookup_user(i915, 1517 ci.engine_class, ci.engine_instance); 1518 if (!master) { 1519 drm_dbg(&i915->drm, 1520 "Unrecognised master engine: { class:%u, instance:%u }\n", 1521 ci.engine_class, ci.engine_instance); 1522 return -EINVAL; 1523 } 1524 1525 if (get_user(num_bonds, &ext->num_bonds)) 1526 return -EFAULT; 1527 1528 for (n = 0; n < num_bonds; n++) { 1529 struct intel_engine_cs *bond; 1530 1531 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) 1532 return -EFAULT; 1533 1534 bond = intel_engine_lookup_user(i915, 1535 ci.engine_class, 1536 ci.engine_instance); 1537 if (!bond) { 1538 drm_dbg(&i915->drm, 1539 "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n", 1540 n, ci.engine_class, ci.engine_instance); 1541 return -EINVAL; 1542 } 1543 1544 /* 1545 * A non-virtual engine has no siblings to choose between; and 1546 * a submit fence will always be directed to the one engine. 1547 */ 1548 if (intel_engine_is_virtual(virtual)) { 1549 err = intel_virtual_engine_attach_bond(virtual, 1550 master, 1551 bond); 1552 if (err) 1553 return err; 1554 } 1555 } 1556 1557 return 0; 1558 } 1559 1560 static const i915_user_extension_fn set_engines__extensions[] = { 1561 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance, 1562 [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond, 1563 }; 1564 1565 static int engines_notify(struct i915_sw_fence *fence, 1566 enum i915_sw_fence_notify state) 1567 { 1568 struct i915_gem_engines *engines = 1569 container_of(fence, typeof(*engines), fence); 1570 1571 switch (state) { 1572 case FENCE_COMPLETE: 1573 if (!list_empty(&engines->link)) { 1574 struct i915_gem_context *ctx = engines->ctx; 1575 unsigned long flags; 1576 1577 spin_lock_irqsave(&ctx->stale.lock, flags); 1578 list_del(&engines->link); 1579 spin_unlock_irqrestore(&ctx->stale.lock, flags); 1580 } 1581 break; 1582 1583 case FENCE_FREE: 1584 init_rcu_head(&engines->rcu); 1585 call_rcu(&engines->rcu, free_engines_rcu); 1586 break; 1587 } 1588 1589 return NOTIFY_DONE; 1590 } 1591 1592 static void engines_idle_release(struct i915_gem_engines *engines) 1593 { 1594 struct i915_gem_engines_iter it; 1595 struct intel_context *ce; 1596 unsigned long flags; 1597 1598 GEM_BUG_ON(!engines); 1599 i915_sw_fence_init(&engines->fence, engines_notify); 1600 1601 INIT_LIST_HEAD(&engines->link); 1602 spin_lock_irqsave(&engines->ctx->stale.lock, flags); 1603 if (!i915_gem_context_is_closed(engines->ctx)) 1604 list_add(&engines->link, &engines->ctx->stale.engines); 1605 spin_unlock_irqrestore(&engines->ctx->stale.lock, flags); 1606 if (list_empty(&engines->link)) /* raced, already closed */ 1607 goto kill; 1608 1609 for_each_gem_engine(ce, engines, it) { 1610 struct dma_fence *fence; 1611 int err; 1612 1613 if (!ce->timeline) 1614 continue; 1615 1616 fence = i915_active_fence_get(&ce->timeline->last_request); 1617 if (!fence) 1618 continue; 1619 1620 err = i915_sw_fence_await_dma_fence(&engines->fence, 1621 fence, 0, 1622 GFP_KERNEL); 1623 1624 dma_fence_put(fence); 1625 if (err < 0) 1626 goto kill; 1627 } 1628 goto out; 1629 1630 kill: 1631 kill_engines(engines); 1632 out: 1633 i915_sw_fence_commit(&engines->fence); 1634 } 1635 1636 static int 1637 set_engines(struct i915_gem_context *ctx, 1638 const struct drm_i915_gem_context_param *args) 1639 { 1640 struct drm_i915_private *i915 = ctx->i915; 1641 struct i915_context_param_engines __user *user = 1642 u64_to_user_ptr(args->value); 1643 struct set_engines set = { .ctx = ctx }; 1644 unsigned int num_engines, n; 1645 u64 extensions; 1646 int err; 1647 1648 if (!args->size) { /* switch back to legacy user_ring_map */ 1649 if (!i915_gem_context_user_engines(ctx)) 1650 return 0; 1651 1652 set.engines = default_engines(ctx); 1653 if (IS_ERR(set.engines)) 1654 return PTR_ERR(set.engines); 1655 1656 goto replace; 1657 } 1658 1659 BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines))); 1660 if (args->size < sizeof(*user) || 1661 !IS_ALIGNED(args->size, sizeof(*user->engines))) { 1662 drm_dbg(&i915->drm, "Invalid size for engine array: %d\n", 1663 args->size); 1664 return -EINVAL; 1665 } 1666 1667 /* 1668 * Note that I915_EXEC_RING_MASK limits execbuf to only using the 1669 * first 64 engines defined here. 1670 */ 1671 num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); 1672 1673 set.engines = kmalloc(struct_size(set.engines, engines, num_engines), 1674 GFP_KERNEL); 1675 if (!set.engines) 1676 return -ENOMEM; 1677 1678 set.engines->ctx = ctx; 1679 1680 for (n = 0; n < num_engines; n++) { 1681 struct i915_engine_class_instance ci; 1682 struct intel_engine_cs *engine; 1683 struct intel_context *ce; 1684 1685 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { 1686 __free_engines(set.engines, n); 1687 return -EFAULT; 1688 } 1689 1690 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID && 1691 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) { 1692 set.engines->engines[n] = NULL; 1693 continue; 1694 } 1695 1696 engine = intel_engine_lookup_user(ctx->i915, 1697 ci.engine_class, 1698 ci.engine_instance); 1699 if (!engine) { 1700 drm_dbg(&i915->drm, 1701 "Invalid engine[%d]: { class:%d, instance:%d }\n", 1702 n, ci.engine_class, ci.engine_instance); 1703 __free_engines(set.engines, n); 1704 return -ENOENT; 1705 } 1706 1707 ce = intel_context_create(engine); 1708 if (IS_ERR(ce)) { 1709 __free_engines(set.engines, n); 1710 return PTR_ERR(ce); 1711 } 1712 1713 intel_context_set_gem(ce, ctx); 1714 1715 set.engines->engines[n] = ce; 1716 } 1717 set.engines->num_engines = num_engines; 1718 1719 err = -EFAULT; 1720 if (!get_user(extensions, &user->extensions)) 1721 err = i915_user_extensions(u64_to_user_ptr(extensions), 1722 set_engines__extensions, 1723 ARRAY_SIZE(set_engines__extensions), 1724 &set); 1725 if (err) { 1726 free_engines(set.engines); 1727 return err; 1728 } 1729 1730 replace: 1731 mutex_lock(&ctx->engines_mutex); 1732 if (args->size) 1733 i915_gem_context_set_user_engines(ctx); 1734 else 1735 i915_gem_context_clear_user_engines(ctx); 1736 set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1); 1737 mutex_unlock(&ctx->engines_mutex); 1738 1739 /* Keep track of old engine sets for kill_context() */ 1740 engines_idle_release(set.engines); 1741 1742 return 0; 1743 } 1744 1745 static struct i915_gem_engines * 1746 __copy_engines(struct i915_gem_engines *e) 1747 { 1748 struct i915_gem_engines *copy; 1749 unsigned int n; 1750 1751 copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL); 1752 if (!copy) 1753 return ERR_PTR(-ENOMEM); 1754 1755 for (n = 0; n < e->num_engines; n++) { 1756 if (e->engines[n]) 1757 copy->engines[n] = intel_context_get(e->engines[n]); 1758 else 1759 copy->engines[n] = NULL; 1760 } 1761 copy->num_engines = n; 1762 1763 return copy; 1764 } 1765 1766 static int 1767 get_engines(struct i915_gem_context *ctx, 1768 struct drm_i915_gem_context_param *args) 1769 { 1770 struct i915_context_param_engines __user *user; 1771 struct i915_gem_engines *e; 1772 size_t n, count, size; 1773 int err = 0; 1774 1775 err = mutex_lock_interruptible(&ctx->engines_mutex); 1776 if (err) 1777 return err; 1778 1779 e = NULL; 1780 if (i915_gem_context_user_engines(ctx)) 1781 e = __copy_engines(i915_gem_context_engines(ctx)); 1782 mutex_unlock(&ctx->engines_mutex); 1783 if (IS_ERR_OR_NULL(e)) { 1784 args->size = 0; 1785 return PTR_ERR_OR_ZERO(e); 1786 } 1787 1788 count = e->num_engines; 1789 1790 /* Be paranoid in case we have an impedance mismatch */ 1791 if (!check_struct_size(user, engines, count, &size)) { 1792 err = -EINVAL; 1793 goto err_free; 1794 } 1795 if (overflows_type(size, args->size)) { 1796 err = -EINVAL; 1797 goto err_free; 1798 } 1799 1800 if (!args->size) { 1801 args->size = size; 1802 goto err_free; 1803 } 1804 1805 if (args->size < size) { 1806 err = -EINVAL; 1807 goto err_free; 1808 } 1809 1810 user = u64_to_user_ptr(args->value); 1811 if (!access_ok(user, size)) { 1812 err = -EFAULT; 1813 goto err_free; 1814 } 1815 1816 if (put_user(0, &user->extensions)) { 1817 err = -EFAULT; 1818 goto err_free; 1819 } 1820 1821 for (n = 0; n < count; n++) { 1822 struct i915_engine_class_instance ci = { 1823 .engine_class = I915_ENGINE_CLASS_INVALID, 1824 .engine_instance = I915_ENGINE_CLASS_INVALID_NONE, 1825 }; 1826 1827 if (e->engines[n]) { 1828 ci.engine_class = e->engines[n]->engine->uabi_class; 1829 ci.engine_instance = e->engines[n]->engine->uabi_instance; 1830 } 1831 1832 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) { 1833 err = -EFAULT; 1834 goto err_free; 1835 } 1836 } 1837 1838 args->size = size; 1839 1840 err_free: 1841 free_engines(e); 1842 return err; 1843 } 1844 1845 static int 1846 set_persistence(struct i915_gem_context *ctx, 1847 const struct drm_i915_gem_context_param *args) 1848 { 1849 if (args->size) 1850 return -EINVAL; 1851 1852 return __context_set_persistence(ctx, args->value); 1853 } 1854 1855 static void __apply_priority(struct intel_context *ce, void *arg) 1856 { 1857 struct i915_gem_context *ctx = arg; 1858 1859 if (!intel_engine_has_semaphores(ce->engine)) 1860 return; 1861 1862 if (ctx->sched.priority >= I915_PRIORITY_NORMAL) 1863 intel_context_set_use_semaphores(ce); 1864 else 1865 intel_context_clear_use_semaphores(ce); 1866 } 1867 1868 static int set_priority(struct i915_gem_context *ctx, 1869 const struct drm_i915_gem_context_param *args) 1870 { 1871 s64 priority = args->value; 1872 1873 if (args->size) 1874 return -EINVAL; 1875 1876 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) 1877 return -ENODEV; 1878 1879 if (priority > I915_CONTEXT_MAX_USER_PRIORITY || 1880 priority < I915_CONTEXT_MIN_USER_PRIORITY) 1881 return -EINVAL; 1882 1883 if (priority > I915_CONTEXT_DEFAULT_PRIORITY && 1884 !capable(CAP_SYS_NICE)) 1885 return -EPERM; 1886 1887 ctx->sched.priority = I915_USER_PRIORITY(priority); 1888 context_apply_all(ctx, __apply_priority, ctx); 1889 1890 return 0; 1891 } 1892 1893 static int ctx_setparam(struct drm_i915_file_private *fpriv, 1894 struct i915_gem_context *ctx, 1895 struct drm_i915_gem_context_param *args) 1896 { 1897 int ret = 0; 1898 1899 switch (args->param) { 1900 case I915_CONTEXT_PARAM_NO_ZEROMAP: 1901 if (args->size) 1902 ret = -EINVAL; 1903 else if (args->value) 1904 set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 1905 else 1906 clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 1907 break; 1908 1909 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 1910 if (args->size) 1911 ret = -EINVAL; 1912 else if (args->value) 1913 i915_gem_context_set_no_error_capture(ctx); 1914 else 1915 i915_gem_context_clear_no_error_capture(ctx); 1916 break; 1917 1918 case I915_CONTEXT_PARAM_BANNABLE: 1919 if (args->size) 1920 ret = -EINVAL; 1921 else if (!capable(CAP_SYS_ADMIN) && !args->value) 1922 ret = -EPERM; 1923 else if (args->value) 1924 i915_gem_context_set_bannable(ctx); 1925 else 1926 i915_gem_context_clear_bannable(ctx); 1927 break; 1928 1929 case I915_CONTEXT_PARAM_RECOVERABLE: 1930 if (args->size) 1931 ret = -EINVAL; 1932 else if (args->value) 1933 i915_gem_context_set_recoverable(ctx); 1934 else 1935 i915_gem_context_clear_recoverable(ctx); 1936 break; 1937 1938 case I915_CONTEXT_PARAM_PRIORITY: 1939 ret = set_priority(ctx, args); 1940 break; 1941 1942 case I915_CONTEXT_PARAM_SSEU: 1943 ret = set_sseu(ctx, args); 1944 break; 1945 1946 case I915_CONTEXT_PARAM_VM: 1947 ret = set_ppgtt(fpriv, ctx, args); 1948 break; 1949 1950 case I915_CONTEXT_PARAM_ENGINES: 1951 ret = set_engines(ctx, args); 1952 break; 1953 1954 case I915_CONTEXT_PARAM_PERSISTENCE: 1955 ret = set_persistence(ctx, args); 1956 break; 1957 1958 case I915_CONTEXT_PARAM_BAN_PERIOD: 1959 default: 1960 ret = -EINVAL; 1961 break; 1962 } 1963 1964 return ret; 1965 } 1966 1967 struct create_ext { 1968 struct i915_gem_context *ctx; 1969 struct drm_i915_file_private *fpriv; 1970 }; 1971 1972 static int create_setparam(struct i915_user_extension __user *ext, void *data) 1973 { 1974 struct drm_i915_gem_context_create_ext_setparam local; 1975 const struct create_ext *arg = data; 1976 1977 if (copy_from_user(&local, ext, sizeof(local))) 1978 return -EFAULT; 1979 1980 if (local.param.ctx_id) 1981 return -EINVAL; 1982 1983 return ctx_setparam(arg->fpriv, arg->ctx, &local.param); 1984 } 1985 1986 static int clone_engines(struct i915_gem_context *dst, 1987 struct i915_gem_context *src) 1988 { 1989 struct i915_gem_engines *e = i915_gem_context_lock_engines(src); 1990 struct i915_gem_engines *clone; 1991 bool user_engines; 1992 unsigned long n; 1993 1994 clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL); 1995 if (!clone) 1996 goto err_unlock; 1997 1998 clone->ctx = dst; 1999 2000 for (n = 0; n < e->num_engines; n++) { 2001 struct intel_engine_cs *engine; 2002 2003 if (!e->engines[n]) { 2004 clone->engines[n] = NULL; 2005 continue; 2006 } 2007 engine = e->engines[n]->engine; 2008 2009 /* 2010 * Virtual engines are singletons; they can only exist 2011 * inside a single context, because they embed their 2012 * HW context... As each virtual context implies a single 2013 * timeline (each engine can only dequeue a single request 2014 * at any time), it would be surprising for two contexts 2015 * to use the same engine. So let's create a copy of 2016 * the virtual engine instead. 2017 */ 2018 if (intel_engine_is_virtual(engine)) 2019 clone->engines[n] = 2020 intel_execlists_clone_virtual(engine); 2021 else 2022 clone->engines[n] = intel_context_create(engine); 2023 if (IS_ERR_OR_NULL(clone->engines[n])) { 2024 __free_engines(clone, n); 2025 goto err_unlock; 2026 } 2027 2028 intel_context_set_gem(clone->engines[n], dst); 2029 } 2030 clone->num_engines = n; 2031 2032 user_engines = i915_gem_context_user_engines(src); 2033 i915_gem_context_unlock_engines(src); 2034 2035 /* Serialised by constructor */ 2036 free_engines(__context_engines_static(dst)); 2037 RCU_INIT_POINTER(dst->engines, clone); 2038 if (user_engines) 2039 i915_gem_context_set_user_engines(dst); 2040 else 2041 i915_gem_context_clear_user_engines(dst); 2042 return 0; 2043 2044 err_unlock: 2045 i915_gem_context_unlock_engines(src); 2046 return -ENOMEM; 2047 } 2048 2049 static int clone_flags(struct i915_gem_context *dst, 2050 struct i915_gem_context *src) 2051 { 2052 dst->user_flags = src->user_flags; 2053 return 0; 2054 } 2055 2056 static int clone_schedattr(struct i915_gem_context *dst, 2057 struct i915_gem_context *src) 2058 { 2059 dst->sched = src->sched; 2060 return 0; 2061 } 2062 2063 static int clone_sseu(struct i915_gem_context *dst, 2064 struct i915_gem_context *src) 2065 { 2066 struct i915_gem_engines *e = i915_gem_context_lock_engines(src); 2067 struct i915_gem_engines *clone; 2068 unsigned long n; 2069 int err; 2070 2071 /* no locking required; sole access under constructor*/ 2072 clone = __context_engines_static(dst); 2073 if (e->num_engines != clone->num_engines) { 2074 err = -EINVAL; 2075 goto unlock; 2076 } 2077 2078 for (n = 0; n < e->num_engines; n++) { 2079 struct intel_context *ce = e->engines[n]; 2080 2081 if (clone->engines[n]->engine->class != ce->engine->class) { 2082 /* Must have compatible engine maps! */ 2083 err = -EINVAL; 2084 goto unlock; 2085 } 2086 2087 /* serialises with set_sseu */ 2088 err = intel_context_lock_pinned(ce); 2089 if (err) 2090 goto unlock; 2091 2092 clone->engines[n]->sseu = ce->sseu; 2093 intel_context_unlock_pinned(ce); 2094 } 2095 2096 err = 0; 2097 unlock: 2098 i915_gem_context_unlock_engines(src); 2099 return err; 2100 } 2101 2102 static int clone_timeline(struct i915_gem_context *dst, 2103 struct i915_gem_context *src) 2104 { 2105 if (src->timeline) 2106 __assign_timeline(dst, src->timeline); 2107 2108 return 0; 2109 } 2110 2111 static int clone_vm(struct i915_gem_context *dst, 2112 struct i915_gem_context *src) 2113 { 2114 struct i915_address_space *vm; 2115 int err = 0; 2116 2117 if (!rcu_access_pointer(src->vm)) 2118 return 0; 2119 2120 rcu_read_lock(); 2121 vm = context_get_vm_rcu(src); 2122 rcu_read_unlock(); 2123 2124 if (!mutex_lock_interruptible(&dst->mutex)) { 2125 __assign_ppgtt(dst, vm); 2126 mutex_unlock(&dst->mutex); 2127 } else { 2128 err = -EINTR; 2129 } 2130 2131 i915_vm_put(vm); 2132 return err; 2133 } 2134 2135 static int create_clone(struct i915_user_extension __user *ext, void *data) 2136 { 2137 static int (* const fn[])(struct i915_gem_context *dst, 2138 struct i915_gem_context *src) = { 2139 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y 2140 MAP(ENGINES, clone_engines), 2141 MAP(FLAGS, clone_flags), 2142 MAP(SCHEDATTR, clone_schedattr), 2143 MAP(SSEU, clone_sseu), 2144 MAP(TIMELINE, clone_timeline), 2145 MAP(VM, clone_vm), 2146 #undef MAP 2147 }; 2148 struct drm_i915_gem_context_create_ext_clone local; 2149 const struct create_ext *arg = data; 2150 struct i915_gem_context *dst = arg->ctx; 2151 struct i915_gem_context *src; 2152 int err, bit; 2153 2154 if (copy_from_user(&local, ext, sizeof(local))) 2155 return -EFAULT; 2156 2157 BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) != 2158 I915_CONTEXT_CLONE_UNKNOWN); 2159 2160 if (local.flags & I915_CONTEXT_CLONE_UNKNOWN) 2161 return -EINVAL; 2162 2163 if (local.rsvd) 2164 return -EINVAL; 2165 2166 rcu_read_lock(); 2167 src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id); 2168 rcu_read_unlock(); 2169 if (!src) 2170 return -ENOENT; 2171 2172 GEM_BUG_ON(src == dst); 2173 2174 for (bit = 0; bit < ARRAY_SIZE(fn); bit++) { 2175 if (!(local.flags & BIT(bit))) 2176 continue; 2177 2178 err = fn[bit](dst, src); 2179 if (err) 2180 return err; 2181 } 2182 2183 return 0; 2184 } 2185 2186 static const i915_user_extension_fn create_extensions[] = { 2187 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, 2188 [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone, 2189 }; 2190 2191 static bool client_is_banned(struct drm_i915_file_private *file_priv) 2192 { 2193 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; 2194 } 2195 2196 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 2197 struct drm_file *file) 2198 { 2199 struct drm_i915_private *i915 = to_i915(dev); 2200 struct drm_i915_gem_context_create_ext *args = data; 2201 struct create_ext ext_data; 2202 int ret; 2203 u32 id; 2204 2205 if (!DRIVER_CAPS(i915)->has_logical_contexts) 2206 return -ENODEV; 2207 2208 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) 2209 return -EINVAL; 2210 2211 ret = intel_gt_terminally_wedged(&i915->gt); 2212 if (ret) 2213 return ret; 2214 2215 ext_data.fpriv = file->driver_priv; 2216 if (client_is_banned(ext_data.fpriv)) { 2217 drm_dbg(&i915->drm, 2218 "client %s[%d] banned from creating ctx\n", 2219 current->comm, task_pid_nr(current)); 2220 return -EIO; 2221 } 2222 2223 ext_data.ctx = i915_gem_create_context(i915, args->flags); 2224 if (IS_ERR(ext_data.ctx)) 2225 return PTR_ERR(ext_data.ctx); 2226 2227 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { 2228 ret = i915_user_extensions(u64_to_user_ptr(args->extensions), 2229 create_extensions, 2230 ARRAY_SIZE(create_extensions), 2231 &ext_data); 2232 if (ret) 2233 goto err_ctx; 2234 } 2235 2236 ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id); 2237 if (ret < 0) 2238 goto err_ctx; 2239 2240 args->ctx_id = id; 2241 drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id); 2242 2243 return 0; 2244 2245 err_ctx: 2246 context_close(ext_data.ctx); 2247 return ret; 2248 } 2249 2250 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 2251 struct drm_file *file) 2252 { 2253 struct drm_i915_gem_context_destroy *args = data; 2254 struct drm_i915_file_private *file_priv = file->driver_priv; 2255 struct i915_gem_context *ctx; 2256 2257 if (args->pad != 0) 2258 return -EINVAL; 2259 2260 if (!args->ctx_id) 2261 return -ENOENT; 2262 2263 ctx = xa_erase(&file_priv->context_xa, args->ctx_id); 2264 if (!ctx) 2265 return -ENOENT; 2266 2267 context_close(ctx); 2268 return 0; 2269 } 2270 2271 static int get_sseu(struct i915_gem_context *ctx, 2272 struct drm_i915_gem_context_param *args) 2273 { 2274 struct drm_i915_gem_context_param_sseu user_sseu; 2275 struct intel_context *ce; 2276 unsigned long lookup; 2277 int err; 2278 2279 if (args->size == 0) 2280 goto out; 2281 else if (args->size < sizeof(user_sseu)) 2282 return -EINVAL; 2283 2284 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 2285 sizeof(user_sseu))) 2286 return -EFAULT; 2287 2288 if (user_sseu.rsvd) 2289 return -EINVAL; 2290 2291 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 2292 return -EINVAL; 2293 2294 lookup = 0; 2295 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 2296 lookup |= LOOKUP_USER_INDEX; 2297 2298 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 2299 if (IS_ERR(ce)) 2300 return PTR_ERR(ce); 2301 2302 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */ 2303 if (err) { 2304 intel_context_put(ce); 2305 return err; 2306 } 2307 2308 user_sseu.slice_mask = ce->sseu.slice_mask; 2309 user_sseu.subslice_mask = ce->sseu.subslice_mask; 2310 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; 2311 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; 2312 2313 intel_context_unlock_pinned(ce); 2314 intel_context_put(ce); 2315 2316 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, 2317 sizeof(user_sseu))) 2318 return -EFAULT; 2319 2320 out: 2321 args->size = sizeof(user_sseu); 2322 2323 return 0; 2324 } 2325 2326 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 2327 struct drm_file *file) 2328 { 2329 struct drm_i915_file_private *file_priv = file->driver_priv; 2330 struct drm_i915_gem_context_param *args = data; 2331 struct i915_gem_context *ctx; 2332 int ret = 0; 2333 2334 ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 2335 if (!ctx) 2336 return -ENOENT; 2337 2338 switch (args->param) { 2339 case I915_CONTEXT_PARAM_NO_ZEROMAP: 2340 args->size = 0; 2341 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 2342 break; 2343 2344 case I915_CONTEXT_PARAM_GTT_SIZE: 2345 args->size = 0; 2346 rcu_read_lock(); 2347 if (rcu_access_pointer(ctx->vm)) 2348 args->value = rcu_dereference(ctx->vm)->total; 2349 else 2350 args->value = to_i915(dev)->ggtt.vm.total; 2351 rcu_read_unlock(); 2352 break; 2353 2354 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 2355 args->size = 0; 2356 args->value = i915_gem_context_no_error_capture(ctx); 2357 break; 2358 2359 case I915_CONTEXT_PARAM_BANNABLE: 2360 args->size = 0; 2361 args->value = i915_gem_context_is_bannable(ctx); 2362 break; 2363 2364 case I915_CONTEXT_PARAM_RECOVERABLE: 2365 args->size = 0; 2366 args->value = i915_gem_context_is_recoverable(ctx); 2367 break; 2368 2369 case I915_CONTEXT_PARAM_PRIORITY: 2370 args->size = 0; 2371 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT; 2372 break; 2373 2374 case I915_CONTEXT_PARAM_SSEU: 2375 ret = get_sseu(ctx, args); 2376 break; 2377 2378 case I915_CONTEXT_PARAM_VM: 2379 ret = get_ppgtt(file_priv, ctx, args); 2380 break; 2381 2382 case I915_CONTEXT_PARAM_ENGINES: 2383 ret = get_engines(ctx, args); 2384 break; 2385 2386 case I915_CONTEXT_PARAM_PERSISTENCE: 2387 args->size = 0; 2388 args->value = i915_gem_context_is_persistent(ctx); 2389 break; 2390 2391 case I915_CONTEXT_PARAM_BAN_PERIOD: 2392 default: 2393 ret = -EINVAL; 2394 break; 2395 } 2396 2397 i915_gem_context_put(ctx); 2398 return ret; 2399 } 2400 2401 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 2402 struct drm_file *file) 2403 { 2404 struct drm_i915_file_private *file_priv = file->driver_priv; 2405 struct drm_i915_gem_context_param *args = data; 2406 struct i915_gem_context *ctx; 2407 int ret; 2408 2409 ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 2410 if (!ctx) 2411 return -ENOENT; 2412 2413 ret = ctx_setparam(file_priv, ctx, args); 2414 2415 i915_gem_context_put(ctx); 2416 return ret; 2417 } 2418 2419 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, 2420 void *data, struct drm_file *file) 2421 { 2422 struct drm_i915_private *i915 = to_i915(dev); 2423 struct drm_i915_reset_stats *args = data; 2424 struct i915_gem_context *ctx; 2425 int ret; 2426 2427 if (args->flags || args->pad) 2428 return -EINVAL; 2429 2430 ret = -ENOENT; 2431 rcu_read_lock(); 2432 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id); 2433 if (!ctx) 2434 goto out; 2435 2436 /* 2437 * We opt for unserialised reads here. This may result in tearing 2438 * in the extremely unlikely event of a GPU hang on this context 2439 * as we are querying them. If we need that extra layer of protection, 2440 * we should wrap the hangstats with a seqlock. 2441 */ 2442 2443 if (capable(CAP_SYS_ADMIN)) 2444 args->reset_count = i915_reset_count(&i915->gpu_error); 2445 else 2446 args->reset_count = 0; 2447 2448 args->batch_active = atomic_read(&ctx->guilty_count); 2449 args->batch_pending = atomic_read(&ctx->active_count); 2450 2451 ret = 0; 2452 out: 2453 rcu_read_unlock(); 2454 return ret; 2455 } 2456 2457 /* GEM context-engines iterator: for_each_gem_engine() */ 2458 struct intel_context * 2459 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) 2460 { 2461 const struct i915_gem_engines *e = it->engines; 2462 struct intel_context *ctx; 2463 2464 do { 2465 if (it->idx >= e->num_engines) 2466 return NULL; 2467 2468 ctx = e->engines[it->idx++]; 2469 } while (!ctx); 2470 2471 return ctx; 2472 } 2473 2474 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2475 #include "selftests/mock_context.c" 2476 #include "selftests/i915_gem_context.c" 2477 #endif 2478 2479 static void i915_global_gem_context_shrink(void) 2480 { 2481 kmem_cache_shrink(global.slab_luts); 2482 } 2483 2484 static void i915_global_gem_context_exit(void) 2485 { 2486 kmem_cache_destroy(global.slab_luts); 2487 } 2488 2489 static struct i915_global_gem_context global = { { 2490 .shrink = i915_global_gem_context_shrink, 2491 .exit = i915_global_gem_context_exit, 2492 } }; 2493 2494 int __init i915_global_gem_context_init(void) 2495 { 2496 global.slab_luts = KMEM_CACHE(i915_lut_handle, 0); 2497 if (!global.slab_luts) 2498 return -ENOMEM; 2499 2500 i915_global_register(&global.base); 2501 return 0; 2502 } 2503