1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_exec_queue.h" 7 8 #include <linux/nospec.h> 9 10 #include <drm/drm_device.h> 11 #include <drm/drm_drv.h> 12 #include <drm/drm_file.h> 13 #include <uapi/drm/xe_drm.h> 14 15 #include "xe_device.h" 16 #include "xe_gt.h" 17 #include "xe_hw_engine_class_sysfs.h" 18 #include "xe_hw_engine_group.h" 19 #include "xe_hw_fence.h" 20 #include "xe_irq.h" 21 #include "xe_lrc.h" 22 #include "xe_macros.h" 23 #include "xe_migrate.h" 24 #include "xe_pm.h" 25 #include "xe_ring_ops_types.h" 26 #include "xe_trace.h" 27 #include "xe_vm.h" 28 29 enum xe_exec_queue_sched_prop { 30 XE_EXEC_QUEUE_JOB_TIMEOUT = 0, 31 XE_EXEC_QUEUE_TIMESLICE = 1, 32 XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2, 33 XE_EXEC_QUEUE_SCHED_PROP_MAX = 3, 34 }; 35 36 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, 37 u64 extensions, int ext_number); 38 39 static void __xe_exec_queue_free(struct xe_exec_queue *q) 40 { 41 if (q->vm) 42 xe_vm_put(q->vm); 43 44 if (q->xef) 45 xe_file_put(q->xef); 46 47 kfree(q); 48 } 49 50 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe, 51 struct xe_vm *vm, 52 u32 logical_mask, 53 u16 width, struct xe_hw_engine *hwe, 54 u32 flags, u64 extensions) 55 { 56 struct xe_exec_queue *q; 57 struct xe_gt *gt = hwe->gt; 58 int err; 59 60 /* only kernel queues can be permanent */ 61 XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL)); 62 63 q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL); 64 if (!q) 65 return ERR_PTR(-ENOMEM); 66 67 kref_init(&q->refcount); 68 q->flags = flags; 69 q->hwe = hwe; 70 q->gt = gt; 71 q->class = hwe->class; 72 q->width = width; 73 q->msix_vec = XE_IRQ_DEFAULT_MSIX; 74 q->logical_mask = logical_mask; 75 q->fence_irq = >->fence_irq[hwe->class]; 76 q->ring_ops = gt->ring_ops[hwe->class]; 77 q->ops = gt->exec_queue_ops; 78 INIT_LIST_HEAD(&q->lr.link); 79 INIT_LIST_HEAD(&q->multi_gt_link); 80 INIT_LIST_HEAD(&q->hw_engine_group_link); 81 82 q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us; 83 q->sched_props.preempt_timeout_us = 84 hwe->eclass->sched_props.preempt_timeout_us; 85 q->sched_props.job_timeout_ms = 86 hwe->eclass->sched_props.job_timeout_ms; 87 if (q->flags & EXEC_QUEUE_FLAG_KERNEL && 88 q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY) 89 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL; 90 else 91 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL; 92 93 if (vm) 94 q->vm = xe_vm_get(vm); 95 96 if (extensions) { 97 /* 98 * may set q->usm, must come before xe_lrc_create(), 99 * may overwrite q->sched_props, must come before q->ops->init() 100 */ 101 err = exec_queue_user_extensions(xe, q, extensions, 0); 102 if (err) { 103 __xe_exec_queue_free(q); 104 return ERR_PTR(err); 105 } 106 } 107 108 return q; 109 } 110 111 static int __xe_exec_queue_init(struct xe_exec_queue *q) 112 { 113 struct xe_vm *vm = q->vm; 114 int i, err; 115 116 if (vm) { 117 err = xe_vm_lock(vm, true); 118 if (err) 119 return err; 120 } 121 122 for (i = 0; i < q->width; ++i) { 123 q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec); 124 if (IS_ERR(q->lrc[i])) { 125 err = PTR_ERR(q->lrc[i]); 126 goto err_unlock; 127 } 128 } 129 130 if (vm) 131 xe_vm_unlock(vm); 132 133 err = q->ops->init(q); 134 if (err) 135 goto err_lrc; 136 137 return 0; 138 139 err_unlock: 140 if (vm) 141 xe_vm_unlock(vm); 142 err_lrc: 143 for (i = i - 1; i >= 0; --i) 144 xe_lrc_put(q->lrc[i]); 145 return err; 146 } 147 148 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm, 149 u32 logical_mask, u16 width, 150 struct xe_hw_engine *hwe, u32 flags, 151 u64 extensions) 152 { 153 struct xe_exec_queue *q; 154 int err; 155 156 /* VMs for GSCCS queues (and only those) must have the XE_VM_FLAG_GSC flag */ 157 xe_assert(xe, !vm || (!!(vm->flags & XE_VM_FLAG_GSC) == !!(hwe->engine_id == XE_HW_ENGINE_GSCCS0))); 158 159 q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags, 160 extensions); 161 if (IS_ERR(q)) 162 return q; 163 164 err = __xe_exec_queue_init(q); 165 if (err) 166 goto err_post_alloc; 167 168 return q; 169 170 err_post_alloc: 171 __xe_exec_queue_free(q); 172 return ERR_PTR(err); 173 } 174 175 struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt, 176 struct xe_vm *vm, 177 enum xe_engine_class class, 178 u32 flags, u64 extensions) 179 { 180 struct xe_hw_engine *hwe, *hwe0 = NULL; 181 enum xe_hw_engine_id id; 182 u32 logical_mask = 0; 183 184 for_each_hw_engine(hwe, gt, id) { 185 if (xe_hw_engine_is_reserved(hwe)) 186 continue; 187 188 if (hwe->class == class) { 189 logical_mask |= BIT(hwe->logical_instance); 190 if (!hwe0) 191 hwe0 = hwe; 192 } 193 } 194 195 if (!logical_mask) 196 return ERR_PTR(-ENODEV); 197 198 return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, extensions); 199 } 200 201 /** 202 * xe_exec_queue_create_bind() - Create bind exec queue. 203 * @xe: Xe device. 204 * @tile: tile which bind exec queue belongs to. 205 * @flags: exec queue creation flags 206 * @extensions: exec queue creation extensions 207 * 208 * Normalize bind exec queue creation. Bind exec queue is tied to migration VM 209 * for access to physical memory required for page table programming. On a 210 * faulting devices the reserved copy engine instance must be used to avoid 211 * deadlocking (user binds cannot get stuck behind faults as kernel binds which 212 * resolve faults depend on user binds). On non-faulting devices any copy engine 213 * can be used. 214 * 215 * Returns exec queue on success, ERR_PTR on failure 216 */ 217 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe, 218 struct xe_tile *tile, 219 u32 flags, u64 extensions) 220 { 221 struct xe_gt *gt = tile->primary_gt; 222 struct xe_exec_queue *q; 223 struct xe_vm *migrate_vm; 224 225 migrate_vm = xe_migrate_get_vm(tile->migrate); 226 if (xe->info.has_usm) { 227 struct xe_hw_engine *hwe = xe_gt_hw_engine(gt, 228 XE_ENGINE_CLASS_COPY, 229 gt->usm.reserved_bcs_instance, 230 false); 231 232 if (!hwe) { 233 xe_vm_put(migrate_vm); 234 return ERR_PTR(-EINVAL); 235 } 236 237 q = xe_exec_queue_create(xe, migrate_vm, 238 BIT(hwe->logical_instance), 1, hwe, 239 flags, extensions); 240 } else { 241 q = xe_exec_queue_create_class(xe, gt, migrate_vm, 242 XE_ENGINE_CLASS_COPY, flags, 243 extensions); 244 } 245 xe_vm_put(migrate_vm); 246 247 return q; 248 } 249 ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO); 250 251 void xe_exec_queue_destroy(struct kref *ref) 252 { 253 struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount); 254 struct xe_exec_queue *eq, *next; 255 256 xe_exec_queue_last_fence_put_unlocked(q); 257 if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) { 258 list_for_each_entry_safe(eq, next, &q->multi_gt_list, 259 multi_gt_link) 260 xe_exec_queue_put(eq); 261 } 262 263 q->ops->fini(q); 264 } 265 266 void xe_exec_queue_fini(struct xe_exec_queue *q) 267 { 268 int i; 269 270 /* 271 * Before releasing our ref to lrc and xef, accumulate our run ticks 272 * and wakeup any waiters. 273 */ 274 xe_exec_queue_update_run_ticks(q); 275 if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal)) 276 wake_up_var(&q->xef->exec_queue.pending_removal); 277 278 for (i = 0; i < q->width; ++i) 279 xe_lrc_put(q->lrc[i]); 280 281 __xe_exec_queue_free(q); 282 } 283 284 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance) 285 { 286 switch (q->class) { 287 case XE_ENGINE_CLASS_RENDER: 288 snprintf(q->name, sizeof(q->name), "rcs%d", instance); 289 break; 290 case XE_ENGINE_CLASS_VIDEO_DECODE: 291 snprintf(q->name, sizeof(q->name), "vcs%d", instance); 292 break; 293 case XE_ENGINE_CLASS_VIDEO_ENHANCE: 294 snprintf(q->name, sizeof(q->name), "vecs%d", instance); 295 break; 296 case XE_ENGINE_CLASS_COPY: 297 snprintf(q->name, sizeof(q->name), "bcs%d", instance); 298 break; 299 case XE_ENGINE_CLASS_COMPUTE: 300 snprintf(q->name, sizeof(q->name), "ccs%d", instance); 301 break; 302 case XE_ENGINE_CLASS_OTHER: 303 snprintf(q->name, sizeof(q->name), "gsccs%d", instance); 304 break; 305 default: 306 XE_WARN_ON(q->class); 307 } 308 } 309 310 struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id) 311 { 312 struct xe_exec_queue *q; 313 314 mutex_lock(&xef->exec_queue.lock); 315 q = xa_load(&xef->exec_queue.xa, id); 316 if (q) 317 xe_exec_queue_get(q); 318 mutex_unlock(&xef->exec_queue.lock); 319 320 return q; 321 } 322 323 enum xe_exec_queue_priority 324 xe_exec_queue_device_get_max_priority(struct xe_device *xe) 325 { 326 return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH : 327 XE_EXEC_QUEUE_PRIORITY_NORMAL; 328 } 329 330 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q, 331 u64 value) 332 { 333 if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH)) 334 return -EINVAL; 335 336 if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe))) 337 return -EPERM; 338 339 q->sched_props.priority = value; 340 return 0; 341 } 342 343 static bool xe_exec_queue_enforce_schedule_limit(void) 344 { 345 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT) 346 return true; 347 #else 348 return !capable(CAP_SYS_NICE); 349 #endif 350 } 351 352 static void 353 xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass, 354 enum xe_exec_queue_sched_prop prop, 355 u32 *min, u32 *max) 356 { 357 switch (prop) { 358 case XE_EXEC_QUEUE_JOB_TIMEOUT: 359 *min = eclass->sched_props.job_timeout_min; 360 *max = eclass->sched_props.job_timeout_max; 361 break; 362 case XE_EXEC_QUEUE_TIMESLICE: 363 *min = eclass->sched_props.timeslice_min; 364 *max = eclass->sched_props.timeslice_max; 365 break; 366 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT: 367 *min = eclass->sched_props.preempt_timeout_min; 368 *max = eclass->sched_props.preempt_timeout_max; 369 break; 370 default: 371 break; 372 } 373 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT) 374 if (capable(CAP_SYS_NICE)) { 375 switch (prop) { 376 case XE_EXEC_QUEUE_JOB_TIMEOUT: 377 *min = XE_HW_ENGINE_JOB_TIMEOUT_MIN; 378 *max = XE_HW_ENGINE_JOB_TIMEOUT_MAX; 379 break; 380 case XE_EXEC_QUEUE_TIMESLICE: 381 *min = XE_HW_ENGINE_TIMESLICE_MIN; 382 *max = XE_HW_ENGINE_TIMESLICE_MAX; 383 break; 384 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT: 385 *min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN; 386 *max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX; 387 break; 388 default: 389 break; 390 } 391 } 392 #endif 393 } 394 395 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q, 396 u64 value) 397 { 398 u32 min = 0, max = 0; 399 400 xe_exec_queue_get_prop_minmax(q->hwe->eclass, 401 XE_EXEC_QUEUE_TIMESLICE, &min, &max); 402 403 if (xe_exec_queue_enforce_schedule_limit() && 404 !xe_hw_engine_timeout_in_range(value, min, max)) 405 return -EINVAL; 406 407 q->sched_props.timeslice_us = value; 408 return 0; 409 } 410 411 typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe, 412 struct xe_exec_queue *q, 413 u64 value); 414 415 static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = { 416 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority, 417 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice, 418 }; 419 420 static int exec_queue_user_ext_set_property(struct xe_device *xe, 421 struct xe_exec_queue *q, 422 u64 extension) 423 { 424 u64 __user *address = u64_to_user_ptr(extension); 425 struct drm_xe_ext_set_property ext; 426 int err; 427 u32 idx; 428 429 err = __copy_from_user(&ext, address, sizeof(ext)); 430 if (XE_IOCTL_DBG(xe, err)) 431 return -EFAULT; 432 433 if (XE_IOCTL_DBG(xe, ext.property >= 434 ARRAY_SIZE(exec_queue_set_property_funcs)) || 435 XE_IOCTL_DBG(xe, ext.pad) || 436 XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY && 437 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE)) 438 return -EINVAL; 439 440 idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs)); 441 if (!exec_queue_set_property_funcs[idx]) 442 return -EINVAL; 443 444 return exec_queue_set_property_funcs[idx](xe, q, ext.value); 445 } 446 447 typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe, 448 struct xe_exec_queue *q, 449 u64 extension); 450 451 static const xe_exec_queue_user_extension_fn exec_queue_user_extension_funcs[] = { 452 [DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property, 453 }; 454 455 #define MAX_USER_EXTENSIONS 16 456 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, 457 u64 extensions, int ext_number) 458 { 459 u64 __user *address = u64_to_user_ptr(extensions); 460 struct drm_xe_user_extension ext; 461 int err; 462 u32 idx; 463 464 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS)) 465 return -E2BIG; 466 467 err = __copy_from_user(&ext, address, sizeof(ext)); 468 if (XE_IOCTL_DBG(xe, err)) 469 return -EFAULT; 470 471 if (XE_IOCTL_DBG(xe, ext.pad) || 472 XE_IOCTL_DBG(xe, ext.name >= 473 ARRAY_SIZE(exec_queue_user_extension_funcs))) 474 return -EINVAL; 475 476 idx = array_index_nospec(ext.name, 477 ARRAY_SIZE(exec_queue_user_extension_funcs)); 478 err = exec_queue_user_extension_funcs[idx](xe, q, extensions); 479 if (XE_IOCTL_DBG(xe, err)) 480 return err; 481 482 if (ext.next_extension) 483 return exec_queue_user_extensions(xe, q, ext.next_extension, 484 ++ext_number); 485 486 return 0; 487 } 488 489 static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt, 490 struct drm_xe_engine_class_instance *eci, 491 u16 width, u16 num_placements) 492 { 493 int len = width * num_placements; 494 int i, j, n; 495 u16 class; 496 u16 gt_id; 497 u32 return_mask = 0, prev_mask; 498 499 if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) && 500 len > 1)) 501 return 0; 502 503 for (i = 0; i < width; ++i) { 504 u32 current_mask = 0; 505 506 for (j = 0; j < num_placements; ++j) { 507 struct xe_hw_engine *hwe; 508 509 n = j * width + i; 510 511 hwe = xe_hw_engine_lookup(xe, eci[n]); 512 if (XE_IOCTL_DBG(xe, !hwe)) 513 return 0; 514 515 if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe))) 516 return 0; 517 518 if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) || 519 XE_IOCTL_DBG(xe, n && eci[n].engine_class != class)) 520 return 0; 521 522 class = eci[n].engine_class; 523 gt_id = eci[n].gt_id; 524 525 if (width == 1 || !i) 526 return_mask |= BIT(eci[n].engine_instance); 527 current_mask |= BIT(eci[n].engine_instance); 528 } 529 530 /* Parallel submissions must be logically contiguous */ 531 if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1)) 532 return 0; 533 534 prev_mask = current_mask; 535 } 536 537 return return_mask; 538 } 539 540 int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, 541 struct drm_file *file) 542 { 543 struct xe_device *xe = to_xe_device(dev); 544 struct xe_file *xef = to_xe_file(file); 545 struct drm_xe_exec_queue_create *args = data; 546 struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE]; 547 struct drm_xe_engine_class_instance __user *user_eci = 548 u64_to_user_ptr(args->instances); 549 struct xe_hw_engine *hwe; 550 struct xe_vm *vm; 551 struct xe_gt *gt; 552 struct xe_tile *tile; 553 struct xe_exec_queue *q = NULL; 554 u32 logical_mask; 555 u32 id; 556 u32 len; 557 int err; 558 559 if (XE_IOCTL_DBG(xe, args->flags) || 560 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) 561 return -EINVAL; 562 563 len = args->width * args->num_placements; 564 if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE)) 565 return -EINVAL; 566 567 err = __copy_from_user(eci, user_eci, 568 sizeof(struct drm_xe_engine_class_instance) * 569 len); 570 if (XE_IOCTL_DBG(xe, err)) 571 return -EFAULT; 572 573 if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count)) 574 return -EINVAL; 575 576 if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) { 577 if (XE_IOCTL_DBG(xe, args->width != 1) || 578 XE_IOCTL_DBG(xe, args->num_placements != 1) || 579 XE_IOCTL_DBG(xe, eci[0].engine_instance != 0)) 580 return -EINVAL; 581 582 for_each_tile(tile, xe, id) { 583 struct xe_exec_queue *new; 584 u32 flags = EXEC_QUEUE_FLAG_VM; 585 586 if (id) 587 flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD; 588 589 new = xe_exec_queue_create_bind(xe, tile, flags, 590 args->extensions); 591 if (IS_ERR(new)) { 592 err = PTR_ERR(new); 593 if (q) 594 goto put_exec_queue; 595 return err; 596 } 597 if (id == 0) 598 q = new; 599 else 600 list_add_tail(&new->multi_gt_list, 601 &q->multi_gt_link); 602 } 603 } else { 604 gt = xe_device_get_gt(xe, eci[0].gt_id); 605 logical_mask = calc_validate_logical_mask(xe, gt, eci, 606 args->width, 607 args->num_placements); 608 if (XE_IOCTL_DBG(xe, !logical_mask)) 609 return -EINVAL; 610 611 hwe = xe_hw_engine_lookup(xe, eci[0]); 612 if (XE_IOCTL_DBG(xe, !hwe)) 613 return -EINVAL; 614 615 vm = xe_vm_lookup(xef, args->vm_id); 616 if (XE_IOCTL_DBG(xe, !vm)) 617 return -ENOENT; 618 619 err = down_read_interruptible(&vm->lock); 620 if (err) { 621 xe_vm_put(vm); 622 return err; 623 } 624 625 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) { 626 up_read(&vm->lock); 627 xe_vm_put(vm); 628 return -ENOENT; 629 } 630 631 q = xe_exec_queue_create(xe, vm, logical_mask, 632 args->width, hwe, 0, 633 args->extensions); 634 up_read(&vm->lock); 635 xe_vm_put(vm); 636 if (IS_ERR(q)) 637 return PTR_ERR(q); 638 639 if (xe_vm_in_preempt_fence_mode(vm)) { 640 q->lr.context = dma_fence_context_alloc(1); 641 642 err = xe_vm_add_compute_exec_queue(vm, q); 643 if (XE_IOCTL_DBG(xe, err)) 644 goto put_exec_queue; 645 } 646 647 if (q->vm && q->hwe->hw_engine_group) { 648 err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q); 649 if (err) 650 goto put_exec_queue; 651 } 652 } 653 654 q->xef = xe_file_get(xef); 655 656 /* user id alloc must always be last in ioctl to prevent UAF */ 657 err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL); 658 if (err) 659 goto kill_exec_queue; 660 661 args->exec_queue_id = id; 662 663 return 0; 664 665 kill_exec_queue: 666 xe_exec_queue_kill(q); 667 put_exec_queue: 668 xe_exec_queue_put(q); 669 return err; 670 } 671 672 int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data, 673 struct drm_file *file) 674 { 675 struct xe_device *xe = to_xe_device(dev); 676 struct xe_file *xef = to_xe_file(file); 677 struct drm_xe_exec_queue_get_property *args = data; 678 struct xe_exec_queue *q; 679 int ret; 680 681 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) 682 return -EINVAL; 683 684 q = xe_exec_queue_lookup(xef, args->exec_queue_id); 685 if (XE_IOCTL_DBG(xe, !q)) 686 return -ENOENT; 687 688 switch (args->property) { 689 case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN: 690 args->value = q->ops->reset_status(q); 691 ret = 0; 692 break; 693 default: 694 ret = -EINVAL; 695 } 696 697 xe_exec_queue_put(q); 698 699 return ret; 700 } 701 702 /** 703 * xe_exec_queue_is_lr() - Whether an exec_queue is long-running 704 * @q: The exec_queue 705 * 706 * Return: True if the exec_queue is long-running, false otherwise. 707 */ 708 bool xe_exec_queue_is_lr(struct xe_exec_queue *q) 709 { 710 return q->vm && xe_vm_in_lr_mode(q->vm) && 711 !(q->flags & EXEC_QUEUE_FLAG_VM); 712 } 713 714 static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q) 715 { 716 return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1; 717 } 718 719 /** 720 * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full 721 * @q: The exec_queue 722 * 723 * Return: True if the exec_queue's ring is full, false otherwise. 724 */ 725 bool xe_exec_queue_ring_full(struct xe_exec_queue *q) 726 { 727 struct xe_lrc *lrc = q->lrc[0]; 728 s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES; 729 730 return xe_exec_queue_num_job_inflight(q) >= max_job; 731 } 732 733 /** 734 * xe_exec_queue_is_idle() - Whether an exec_queue is idle. 735 * @q: The exec_queue 736 * 737 * FIXME: Need to determine what to use as the short-lived 738 * timeline lock for the exec_queues, so that the return value 739 * of this function becomes more than just an advisory 740 * snapshot in time. The timeline lock must protect the 741 * seqno from racing submissions on the same exec_queue. 742 * Typically vm->resv, but user-created timeline locks use the migrate vm 743 * and never grabs the migrate vm->resv so we have a race there. 744 * 745 * Return: True if the exec_queue is idle, false otherwise. 746 */ 747 bool xe_exec_queue_is_idle(struct xe_exec_queue *q) 748 { 749 if (xe_exec_queue_is_parallel(q)) { 750 int i; 751 752 for (i = 0; i < q->width; ++i) { 753 if (xe_lrc_seqno(q->lrc[i]) != 754 q->lrc[i]->fence_ctx.next_seqno - 1) 755 return false; 756 } 757 758 return true; 759 } 760 761 return xe_lrc_seqno(q->lrc[0]) == 762 q->lrc[0]->fence_ctx.next_seqno - 1; 763 } 764 765 /** 766 * xe_exec_queue_update_run_ticks() - Update run time in ticks for this exec queue 767 * from hw 768 * @q: The exec queue 769 * 770 * Update the timestamp saved by HW for this exec queue and save run ticks 771 * calculated by using the delta from last update. 772 */ 773 void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q) 774 { 775 struct xe_device *xe = gt_to_xe(q->gt); 776 struct xe_lrc *lrc; 777 u32 old_ts, new_ts; 778 int idx; 779 780 /* 781 * Jobs that are executed by kernel doesn't have a corresponding xe_file 782 * and thus are not accounted. 783 */ 784 if (!q->xef) 785 return; 786 787 /* Synchronize with unbind while holding the xe file open */ 788 if (!drm_dev_enter(&xe->drm, &idx)) 789 return; 790 /* 791 * Only sample the first LRC. For parallel submission, all of them are 792 * scheduled together and we compensate that below by multiplying by 793 * width - this may introduce errors if that premise is not true and 794 * they don't exit 100% aligned. On the other hand, looping through 795 * the LRCs and reading them in different time could also introduce 796 * errors. 797 */ 798 lrc = q->lrc[0]; 799 new_ts = xe_lrc_update_timestamp(lrc, &old_ts); 800 q->xef->run_ticks[q->class] += (new_ts - old_ts) * q->width; 801 802 drm_dev_exit(idx); 803 } 804 805 /** 806 * xe_exec_queue_kill - permanently stop all execution from an exec queue 807 * @q: The exec queue 808 * 809 * This function permanently stops all activity on an exec queue. If the queue 810 * is actively executing on the HW, it will be kicked off the engine; any 811 * pending jobs are discarded and all future submissions are rejected. 812 * This function is safe to call multiple times. 813 */ 814 void xe_exec_queue_kill(struct xe_exec_queue *q) 815 { 816 struct xe_exec_queue *eq = q, *next; 817 818 list_for_each_entry_safe(eq, next, &eq->multi_gt_list, 819 multi_gt_link) { 820 q->ops->kill(eq); 821 xe_vm_remove_compute_exec_queue(q->vm, eq); 822 } 823 824 q->ops->kill(q); 825 xe_vm_remove_compute_exec_queue(q->vm, q); 826 } 827 828 int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data, 829 struct drm_file *file) 830 { 831 struct xe_device *xe = to_xe_device(dev); 832 struct xe_file *xef = to_xe_file(file); 833 struct drm_xe_exec_queue_destroy *args = data; 834 struct xe_exec_queue *q; 835 836 if (XE_IOCTL_DBG(xe, args->pad) || 837 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) 838 return -EINVAL; 839 840 mutex_lock(&xef->exec_queue.lock); 841 q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id); 842 if (q) 843 atomic_inc(&xef->exec_queue.pending_removal); 844 mutex_unlock(&xef->exec_queue.lock); 845 846 if (XE_IOCTL_DBG(xe, !q)) 847 return -ENOENT; 848 849 if (q->vm && q->hwe->hw_engine_group) 850 xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q); 851 852 xe_exec_queue_kill(q); 853 854 trace_xe_exec_queue_close(q); 855 xe_exec_queue_put(q); 856 857 return 0; 858 } 859 860 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q, 861 struct xe_vm *vm) 862 { 863 if (q->flags & EXEC_QUEUE_FLAG_VM) { 864 lockdep_assert_held(&vm->lock); 865 } else { 866 xe_vm_assert_held(vm); 867 lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem); 868 } 869 } 870 871 /** 872 * xe_exec_queue_last_fence_put() - Drop ref to last fence 873 * @q: The exec queue 874 * @vm: The VM the engine does a bind or exec for 875 */ 876 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm) 877 { 878 xe_exec_queue_last_fence_lockdep_assert(q, vm); 879 880 xe_exec_queue_last_fence_put_unlocked(q); 881 } 882 883 /** 884 * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked 885 * @q: The exec queue 886 * 887 * Only safe to be called from xe_exec_queue_destroy(). 888 */ 889 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q) 890 { 891 if (q->last_fence) { 892 dma_fence_put(q->last_fence); 893 q->last_fence = NULL; 894 } 895 } 896 897 /** 898 * xe_exec_queue_last_fence_get() - Get last fence 899 * @q: The exec queue 900 * @vm: The VM the engine does a bind or exec for 901 * 902 * Get last fence, takes a ref 903 * 904 * Returns: last fence if not signaled, dma fence stub if signaled 905 */ 906 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q, 907 struct xe_vm *vm) 908 { 909 struct dma_fence *fence; 910 911 xe_exec_queue_last_fence_lockdep_assert(q, vm); 912 913 if (q->last_fence && 914 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) 915 xe_exec_queue_last_fence_put(q, vm); 916 917 fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); 918 dma_fence_get(fence); 919 return fence; 920 } 921 922 /** 923 * xe_exec_queue_last_fence_get_for_resume() - Get last fence 924 * @q: The exec queue 925 * @vm: The VM the engine does a bind or exec for 926 * 927 * Get last fence, takes a ref. Only safe to be called in the context of 928 * resuming the hw engine group's long-running exec queue, when the group 929 * semaphore is held. 930 * 931 * Returns: last fence if not signaled, dma fence stub if signaled 932 */ 933 struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q, 934 struct xe_vm *vm) 935 { 936 struct dma_fence *fence; 937 938 lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem); 939 940 if (q->last_fence && 941 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) 942 xe_exec_queue_last_fence_put_unlocked(q); 943 944 fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); 945 dma_fence_get(fence); 946 return fence; 947 } 948 949 /** 950 * xe_exec_queue_last_fence_set() - Set last fence 951 * @q: The exec queue 952 * @vm: The VM the engine does a bind or exec for 953 * @fence: The fence 954 * 955 * Set the last fence for the engine. Increases reference count for fence, when 956 * closing engine xe_exec_queue_last_fence_put should be called. 957 */ 958 void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm, 959 struct dma_fence *fence) 960 { 961 xe_exec_queue_last_fence_lockdep_assert(q, vm); 962 963 xe_exec_queue_last_fence_put(q, vm); 964 q->last_fence = dma_fence_get(fence); 965 } 966 967 /** 968 * xe_exec_queue_last_fence_test_dep - Test last fence dependency of queue 969 * @q: The exec queue 970 * @vm: The VM the engine does a bind or exec for 971 * 972 * Returns: 973 * -ETIME if there exists an unsignalled last fence dependency, zero otherwise. 974 */ 975 int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm) 976 { 977 struct dma_fence *fence; 978 int err = 0; 979 980 fence = xe_exec_queue_last_fence_get(q, vm); 981 if (fence) { 982 err = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ? 983 0 : -ETIME; 984 dma_fence_put(fence); 985 } 986 987 return err; 988 } 989