1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_exec_queue.h" 7 8 #include <linux/nospec.h> 9 10 #include <drm/drm_device.h> 11 #include <drm/drm_file.h> 12 #include <drm/xe_drm.h> 13 14 #include "xe_device.h" 15 #include "xe_gt.h" 16 #include "xe_hw_engine_class_sysfs.h" 17 #include "xe_hw_fence.h" 18 #include "xe_lrc.h" 19 #include "xe_macros.h" 20 #include "xe_migrate.h" 21 #include "xe_pm.h" 22 #include "xe_ring_ops_types.h" 23 #include "xe_trace.h" 24 #include "xe_vm.h" 25 26 enum xe_exec_queue_sched_prop { 27 XE_EXEC_QUEUE_JOB_TIMEOUT = 0, 28 XE_EXEC_QUEUE_TIMESLICE = 1, 29 XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2, 30 XE_EXEC_QUEUE_SCHED_PROP_MAX = 3, 31 }; 32 33 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, 34 u64 extensions, int ext_number); 35 36 static void __xe_exec_queue_free(struct xe_exec_queue *q) 37 { 38 if (q->vm) 39 xe_vm_put(q->vm); 40 41 if (q->xef) 42 xe_file_put(q->xef); 43 44 kfree(q); 45 } 46 47 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe, 48 struct xe_vm *vm, 49 u32 logical_mask, 50 u16 width, struct xe_hw_engine *hwe, 51 u32 flags, u64 extensions) 52 { 53 struct xe_exec_queue *q; 54 struct xe_gt *gt = hwe->gt; 55 int err; 56 57 /* only kernel queues can be permanent */ 58 XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL)); 59 60 q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL); 61 if (!q) 62 return ERR_PTR(-ENOMEM); 63 64 kref_init(&q->refcount); 65 q->flags = flags; 66 q->hwe = hwe; 67 q->gt = gt; 68 q->class = hwe->class; 69 q->width = width; 70 q->logical_mask = logical_mask; 71 q->fence_irq = >->fence_irq[hwe->class]; 72 q->ring_ops = gt->ring_ops[hwe->class]; 73 q->ops = gt->exec_queue_ops; 74 INIT_LIST_HEAD(&q->lr.link); 75 INIT_LIST_HEAD(&q->multi_gt_link); 76 77 q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us; 78 q->sched_props.preempt_timeout_us = 79 hwe->eclass->sched_props.preempt_timeout_us; 80 q->sched_props.job_timeout_ms = 81 hwe->eclass->sched_props.job_timeout_ms; 82 if (q->flags & EXEC_QUEUE_FLAG_KERNEL && 83 q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY) 84 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL; 85 else 86 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL; 87 88 if (vm) 89 q->vm = xe_vm_get(vm); 90 91 if (extensions) { 92 /* 93 * may set q->usm, must come before xe_lrc_create(), 94 * may overwrite q->sched_props, must come before q->ops->init() 95 */ 96 err = exec_queue_user_extensions(xe, q, extensions, 0); 97 if (err) { 98 __xe_exec_queue_free(q); 99 return ERR_PTR(err); 100 } 101 } 102 103 return q; 104 } 105 106 static int __xe_exec_queue_init(struct xe_exec_queue *q) 107 { 108 int i, err; 109 110 for (i = 0; i < q->width; ++i) { 111 q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K); 112 if (IS_ERR(q->lrc[i])) { 113 err = PTR_ERR(q->lrc[i]); 114 goto err_lrc; 115 } 116 } 117 118 err = q->ops->init(q); 119 if (err) 120 goto err_lrc; 121 122 return 0; 123 124 err_lrc: 125 for (i = i - 1; i >= 0; --i) 126 xe_lrc_put(q->lrc[i]); 127 return err; 128 } 129 130 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm, 131 u32 logical_mask, u16 width, 132 struct xe_hw_engine *hwe, u32 flags, 133 u64 extensions) 134 { 135 struct xe_exec_queue *q; 136 int err; 137 138 q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags, 139 extensions); 140 if (IS_ERR(q)) 141 return q; 142 143 if (vm) { 144 err = xe_vm_lock(vm, true); 145 if (err) 146 goto err_post_alloc; 147 } 148 149 err = __xe_exec_queue_init(q); 150 if (vm) 151 xe_vm_unlock(vm); 152 if (err) 153 goto err_post_alloc; 154 155 return q; 156 157 err_post_alloc: 158 __xe_exec_queue_free(q); 159 return ERR_PTR(err); 160 } 161 162 struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt, 163 struct xe_vm *vm, 164 enum xe_engine_class class, u32 flags) 165 { 166 struct xe_hw_engine *hwe, *hwe0 = NULL; 167 enum xe_hw_engine_id id; 168 u32 logical_mask = 0; 169 170 for_each_hw_engine(hwe, gt, id) { 171 if (xe_hw_engine_is_reserved(hwe)) 172 continue; 173 174 if (hwe->class == class) { 175 logical_mask |= BIT(hwe->logical_instance); 176 if (!hwe0) 177 hwe0 = hwe; 178 } 179 } 180 181 if (!logical_mask) 182 return ERR_PTR(-ENODEV); 183 184 return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, 0); 185 } 186 187 void xe_exec_queue_destroy(struct kref *ref) 188 { 189 struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount); 190 struct xe_exec_queue *eq, *next; 191 192 xe_exec_queue_last_fence_put_unlocked(q); 193 if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) { 194 list_for_each_entry_safe(eq, next, &q->multi_gt_list, 195 multi_gt_link) 196 xe_exec_queue_put(eq); 197 } 198 199 q->ops->fini(q); 200 } 201 202 void xe_exec_queue_fini(struct xe_exec_queue *q) 203 { 204 int i; 205 206 for (i = 0; i < q->width; ++i) 207 xe_lrc_put(q->lrc[i]); 208 __xe_exec_queue_free(q); 209 } 210 211 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance) 212 { 213 switch (q->class) { 214 case XE_ENGINE_CLASS_RENDER: 215 snprintf(q->name, sizeof(q->name), "rcs%d", instance); 216 break; 217 case XE_ENGINE_CLASS_VIDEO_DECODE: 218 snprintf(q->name, sizeof(q->name), "vcs%d", instance); 219 break; 220 case XE_ENGINE_CLASS_VIDEO_ENHANCE: 221 snprintf(q->name, sizeof(q->name), "vecs%d", instance); 222 break; 223 case XE_ENGINE_CLASS_COPY: 224 snprintf(q->name, sizeof(q->name), "bcs%d", instance); 225 break; 226 case XE_ENGINE_CLASS_COMPUTE: 227 snprintf(q->name, sizeof(q->name), "ccs%d", instance); 228 break; 229 case XE_ENGINE_CLASS_OTHER: 230 snprintf(q->name, sizeof(q->name), "gsccs%d", instance); 231 break; 232 default: 233 XE_WARN_ON(q->class); 234 } 235 } 236 237 struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id) 238 { 239 struct xe_exec_queue *q; 240 241 mutex_lock(&xef->exec_queue.lock); 242 q = xa_load(&xef->exec_queue.xa, id); 243 if (q) 244 xe_exec_queue_get(q); 245 mutex_unlock(&xef->exec_queue.lock); 246 247 return q; 248 } 249 250 enum xe_exec_queue_priority 251 xe_exec_queue_device_get_max_priority(struct xe_device *xe) 252 { 253 return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH : 254 XE_EXEC_QUEUE_PRIORITY_NORMAL; 255 } 256 257 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q, 258 u64 value) 259 { 260 if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH)) 261 return -EINVAL; 262 263 if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe))) 264 return -EPERM; 265 266 q->sched_props.priority = value; 267 return 0; 268 } 269 270 static bool xe_exec_queue_enforce_schedule_limit(void) 271 { 272 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT) 273 return true; 274 #else 275 return !capable(CAP_SYS_NICE); 276 #endif 277 } 278 279 static void 280 xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass, 281 enum xe_exec_queue_sched_prop prop, 282 u32 *min, u32 *max) 283 { 284 switch (prop) { 285 case XE_EXEC_QUEUE_JOB_TIMEOUT: 286 *min = eclass->sched_props.job_timeout_min; 287 *max = eclass->sched_props.job_timeout_max; 288 break; 289 case XE_EXEC_QUEUE_TIMESLICE: 290 *min = eclass->sched_props.timeslice_min; 291 *max = eclass->sched_props.timeslice_max; 292 break; 293 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT: 294 *min = eclass->sched_props.preempt_timeout_min; 295 *max = eclass->sched_props.preempt_timeout_max; 296 break; 297 default: 298 break; 299 } 300 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT) 301 if (capable(CAP_SYS_NICE)) { 302 switch (prop) { 303 case XE_EXEC_QUEUE_JOB_TIMEOUT: 304 *min = XE_HW_ENGINE_JOB_TIMEOUT_MIN; 305 *max = XE_HW_ENGINE_JOB_TIMEOUT_MAX; 306 break; 307 case XE_EXEC_QUEUE_TIMESLICE: 308 *min = XE_HW_ENGINE_TIMESLICE_MIN; 309 *max = XE_HW_ENGINE_TIMESLICE_MAX; 310 break; 311 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT: 312 *min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN; 313 *max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX; 314 break; 315 default: 316 break; 317 } 318 } 319 #endif 320 } 321 322 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q, 323 u64 value) 324 { 325 u32 min = 0, max = 0; 326 327 xe_exec_queue_get_prop_minmax(q->hwe->eclass, 328 XE_EXEC_QUEUE_TIMESLICE, &min, &max); 329 330 if (xe_exec_queue_enforce_schedule_limit() && 331 !xe_hw_engine_timeout_in_range(value, min, max)) 332 return -EINVAL; 333 334 q->sched_props.timeslice_us = value; 335 return 0; 336 } 337 338 typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe, 339 struct xe_exec_queue *q, 340 u64 value); 341 342 static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = { 343 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority, 344 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice, 345 }; 346 347 static int exec_queue_user_ext_set_property(struct xe_device *xe, 348 struct xe_exec_queue *q, 349 u64 extension) 350 { 351 u64 __user *address = u64_to_user_ptr(extension); 352 struct drm_xe_ext_set_property ext; 353 int err; 354 u32 idx; 355 356 err = __copy_from_user(&ext, address, sizeof(ext)); 357 if (XE_IOCTL_DBG(xe, err)) 358 return -EFAULT; 359 360 if (XE_IOCTL_DBG(xe, ext.property >= 361 ARRAY_SIZE(exec_queue_set_property_funcs)) || 362 XE_IOCTL_DBG(xe, ext.pad) || 363 XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY && 364 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE)) 365 return -EINVAL; 366 367 idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs)); 368 if (!exec_queue_set_property_funcs[idx]) 369 return -EINVAL; 370 371 return exec_queue_set_property_funcs[idx](xe, q, ext.value); 372 } 373 374 typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe, 375 struct xe_exec_queue *q, 376 u64 extension); 377 378 static const xe_exec_queue_user_extension_fn exec_queue_user_extension_funcs[] = { 379 [DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property, 380 }; 381 382 #define MAX_USER_EXTENSIONS 16 383 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, 384 u64 extensions, int ext_number) 385 { 386 u64 __user *address = u64_to_user_ptr(extensions); 387 struct drm_xe_user_extension ext; 388 int err; 389 u32 idx; 390 391 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS)) 392 return -E2BIG; 393 394 err = __copy_from_user(&ext, address, sizeof(ext)); 395 if (XE_IOCTL_DBG(xe, err)) 396 return -EFAULT; 397 398 if (XE_IOCTL_DBG(xe, ext.pad) || 399 XE_IOCTL_DBG(xe, ext.name >= 400 ARRAY_SIZE(exec_queue_user_extension_funcs))) 401 return -EINVAL; 402 403 idx = array_index_nospec(ext.name, 404 ARRAY_SIZE(exec_queue_user_extension_funcs)); 405 err = exec_queue_user_extension_funcs[idx](xe, q, extensions); 406 if (XE_IOCTL_DBG(xe, err)) 407 return err; 408 409 if (ext.next_extension) 410 return exec_queue_user_extensions(xe, q, ext.next_extension, 411 ++ext_number); 412 413 return 0; 414 } 415 416 static const enum xe_engine_class user_to_xe_engine_class[] = { 417 [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER, 418 [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY, 419 [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE, 420 [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE, 421 [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE, 422 }; 423 424 static struct xe_hw_engine * 425 find_hw_engine(struct xe_device *xe, 426 struct drm_xe_engine_class_instance eci) 427 { 428 u32 idx; 429 430 if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class)) 431 return NULL; 432 433 if (eci.gt_id >= xe->info.gt_count) 434 return NULL; 435 436 idx = array_index_nospec(eci.engine_class, 437 ARRAY_SIZE(user_to_xe_engine_class)); 438 439 return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id), 440 user_to_xe_engine_class[idx], 441 eci.engine_instance, true); 442 } 443 444 static u32 bind_exec_queue_logical_mask(struct xe_device *xe, struct xe_gt *gt, 445 struct drm_xe_engine_class_instance *eci, 446 u16 width, u16 num_placements) 447 { 448 struct xe_hw_engine *hwe; 449 enum xe_hw_engine_id id; 450 u32 logical_mask = 0; 451 452 if (XE_IOCTL_DBG(xe, width != 1)) 453 return 0; 454 if (XE_IOCTL_DBG(xe, num_placements != 1)) 455 return 0; 456 if (XE_IOCTL_DBG(xe, eci[0].engine_instance != 0)) 457 return 0; 458 459 eci[0].engine_class = DRM_XE_ENGINE_CLASS_COPY; 460 461 for_each_hw_engine(hwe, gt, id) { 462 if (xe_hw_engine_is_reserved(hwe)) 463 continue; 464 465 if (hwe->class == 466 user_to_xe_engine_class[DRM_XE_ENGINE_CLASS_COPY]) 467 logical_mask |= BIT(hwe->logical_instance); 468 } 469 470 return logical_mask; 471 } 472 473 static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt, 474 struct drm_xe_engine_class_instance *eci, 475 u16 width, u16 num_placements) 476 { 477 int len = width * num_placements; 478 int i, j, n; 479 u16 class; 480 u16 gt_id; 481 u32 return_mask = 0, prev_mask; 482 483 if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) && 484 len > 1)) 485 return 0; 486 487 for (i = 0; i < width; ++i) { 488 u32 current_mask = 0; 489 490 for (j = 0; j < num_placements; ++j) { 491 struct xe_hw_engine *hwe; 492 493 n = j * width + i; 494 495 hwe = find_hw_engine(xe, eci[n]); 496 if (XE_IOCTL_DBG(xe, !hwe)) 497 return 0; 498 499 if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe))) 500 return 0; 501 502 if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) || 503 XE_IOCTL_DBG(xe, n && eci[n].engine_class != class)) 504 return 0; 505 506 class = eci[n].engine_class; 507 gt_id = eci[n].gt_id; 508 509 if (width == 1 || !i) 510 return_mask |= BIT(eci[n].engine_instance); 511 current_mask |= BIT(eci[n].engine_instance); 512 } 513 514 /* Parallel submissions must be logically contiguous */ 515 if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1)) 516 return 0; 517 518 prev_mask = current_mask; 519 } 520 521 return return_mask; 522 } 523 524 int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, 525 struct drm_file *file) 526 { 527 struct xe_device *xe = to_xe_device(dev); 528 struct xe_file *xef = to_xe_file(file); 529 struct drm_xe_exec_queue_create *args = data; 530 struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE]; 531 struct drm_xe_engine_class_instance __user *user_eci = 532 u64_to_user_ptr(args->instances); 533 struct xe_hw_engine *hwe; 534 struct xe_vm *vm, *migrate_vm; 535 struct xe_gt *gt; 536 struct xe_exec_queue *q = NULL; 537 u32 logical_mask; 538 u32 id; 539 u32 len; 540 int err; 541 542 if (XE_IOCTL_DBG(xe, args->flags) || 543 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) 544 return -EINVAL; 545 546 len = args->width * args->num_placements; 547 if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE)) 548 return -EINVAL; 549 550 err = __copy_from_user(eci, user_eci, 551 sizeof(struct drm_xe_engine_class_instance) * 552 len); 553 if (XE_IOCTL_DBG(xe, err)) 554 return -EFAULT; 555 556 if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count)) 557 return -EINVAL; 558 559 if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) { 560 for_each_gt(gt, xe, id) { 561 struct xe_exec_queue *new; 562 u32 flags; 563 564 if (xe_gt_is_media_type(gt)) 565 continue; 566 567 eci[0].gt_id = gt->info.id; 568 logical_mask = bind_exec_queue_logical_mask(xe, gt, eci, 569 args->width, 570 args->num_placements); 571 if (XE_IOCTL_DBG(xe, !logical_mask)) 572 return -EINVAL; 573 574 hwe = find_hw_engine(xe, eci[0]); 575 if (XE_IOCTL_DBG(xe, !hwe)) 576 return -EINVAL; 577 578 /* The migration vm doesn't hold rpm ref */ 579 xe_pm_runtime_get_noresume(xe); 580 581 flags = EXEC_QUEUE_FLAG_VM | (id ? EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : 0); 582 583 migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate); 584 new = xe_exec_queue_create(xe, migrate_vm, logical_mask, 585 args->width, hwe, flags, 586 args->extensions); 587 588 xe_pm_runtime_put(xe); /* now held by engine */ 589 590 xe_vm_put(migrate_vm); 591 if (IS_ERR(new)) { 592 err = PTR_ERR(new); 593 if (q) 594 goto put_exec_queue; 595 return err; 596 } 597 if (id == 0) 598 q = new; 599 else 600 list_add_tail(&new->multi_gt_list, 601 &q->multi_gt_link); 602 } 603 } else { 604 gt = xe_device_get_gt(xe, eci[0].gt_id); 605 logical_mask = calc_validate_logical_mask(xe, gt, eci, 606 args->width, 607 args->num_placements); 608 if (XE_IOCTL_DBG(xe, !logical_mask)) 609 return -EINVAL; 610 611 hwe = find_hw_engine(xe, eci[0]); 612 if (XE_IOCTL_DBG(xe, !hwe)) 613 return -EINVAL; 614 615 vm = xe_vm_lookup(xef, args->vm_id); 616 if (XE_IOCTL_DBG(xe, !vm)) 617 return -ENOENT; 618 619 err = down_read_interruptible(&vm->lock); 620 if (err) { 621 xe_vm_put(vm); 622 return err; 623 } 624 625 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) { 626 up_read(&vm->lock); 627 xe_vm_put(vm); 628 return -ENOENT; 629 } 630 631 q = xe_exec_queue_create(xe, vm, logical_mask, 632 args->width, hwe, 0, 633 args->extensions); 634 up_read(&vm->lock); 635 xe_vm_put(vm); 636 if (IS_ERR(q)) 637 return PTR_ERR(q); 638 639 if (xe_vm_in_preempt_fence_mode(vm)) { 640 q->lr.context = dma_fence_context_alloc(1); 641 spin_lock_init(&q->lr.lock); 642 643 err = xe_vm_add_compute_exec_queue(vm, q); 644 if (XE_IOCTL_DBG(xe, err)) 645 goto put_exec_queue; 646 } 647 } 648 649 mutex_lock(&xef->exec_queue.lock); 650 err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL); 651 mutex_unlock(&xef->exec_queue.lock); 652 if (err) 653 goto kill_exec_queue; 654 655 args->exec_queue_id = id; 656 q->xef = xe_file_get(xef); 657 658 return 0; 659 660 kill_exec_queue: 661 xe_exec_queue_kill(q); 662 put_exec_queue: 663 xe_exec_queue_put(q); 664 return err; 665 } 666 667 int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data, 668 struct drm_file *file) 669 { 670 struct xe_device *xe = to_xe_device(dev); 671 struct xe_file *xef = to_xe_file(file); 672 struct drm_xe_exec_queue_get_property *args = data; 673 struct xe_exec_queue *q; 674 int ret; 675 676 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) 677 return -EINVAL; 678 679 q = xe_exec_queue_lookup(xef, args->exec_queue_id); 680 if (XE_IOCTL_DBG(xe, !q)) 681 return -ENOENT; 682 683 switch (args->property) { 684 case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN: 685 args->value = q->ops->reset_status(q); 686 ret = 0; 687 break; 688 default: 689 ret = -EINVAL; 690 } 691 692 xe_exec_queue_put(q); 693 694 return ret; 695 } 696 697 /** 698 * xe_exec_queue_is_lr() - Whether an exec_queue is long-running 699 * @q: The exec_queue 700 * 701 * Return: True if the exec_queue is long-running, false otherwise. 702 */ 703 bool xe_exec_queue_is_lr(struct xe_exec_queue *q) 704 { 705 return q->vm && xe_vm_in_lr_mode(q->vm) && 706 !(q->flags & EXEC_QUEUE_FLAG_VM); 707 } 708 709 static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q) 710 { 711 return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1; 712 } 713 714 /** 715 * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full 716 * @q: The exec_queue 717 * 718 * Return: True if the exec_queue's ring is full, false otherwise. 719 */ 720 bool xe_exec_queue_ring_full(struct xe_exec_queue *q) 721 { 722 struct xe_lrc *lrc = q->lrc[0]; 723 s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES; 724 725 return xe_exec_queue_num_job_inflight(q) >= max_job; 726 } 727 728 /** 729 * xe_exec_queue_is_idle() - Whether an exec_queue is idle. 730 * @q: The exec_queue 731 * 732 * FIXME: Need to determine what to use as the short-lived 733 * timeline lock for the exec_queues, so that the return value 734 * of this function becomes more than just an advisory 735 * snapshot in time. The timeline lock must protect the 736 * seqno from racing submissions on the same exec_queue. 737 * Typically vm->resv, but user-created timeline locks use the migrate vm 738 * and never grabs the migrate vm->resv so we have a race there. 739 * 740 * Return: True if the exec_queue is idle, false otherwise. 741 */ 742 bool xe_exec_queue_is_idle(struct xe_exec_queue *q) 743 { 744 if (xe_exec_queue_is_parallel(q)) { 745 int i; 746 747 for (i = 0; i < q->width; ++i) { 748 if (xe_lrc_seqno(q->lrc[i]) != 749 q->lrc[i]->fence_ctx.next_seqno - 1) 750 return false; 751 } 752 753 return true; 754 } 755 756 return xe_lrc_seqno(q->lrc[0]) == 757 q->lrc[0]->fence_ctx.next_seqno - 1; 758 } 759 760 /** 761 * xe_exec_queue_update_run_ticks() - Update run time in ticks for this exec queue 762 * from hw 763 * @q: The exec queue 764 * 765 * Update the timestamp saved by HW for this exec queue and save run ticks 766 * calculated by using the delta from last update. 767 */ 768 void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q) 769 { 770 struct xe_file *xef; 771 struct xe_lrc *lrc; 772 u32 old_ts, new_ts; 773 774 /* 775 * Jobs that are run during driver load may use an exec_queue, but are 776 * not associated with a user xe file, so avoid accumulating busyness 777 * for kernel specific work. 778 */ 779 if (!q->vm || !q->vm->xef) 780 return; 781 782 xef = q->vm->xef; 783 784 /* 785 * Only sample the first LRC. For parallel submission, all of them are 786 * scheduled together and we compensate that below by multiplying by 787 * width - this may introduce errors if that premise is not true and 788 * they don't exit 100% aligned. On the other hand, looping through 789 * the LRCs and reading them in different time could also introduce 790 * errors. 791 */ 792 lrc = q->lrc[0]; 793 new_ts = xe_lrc_update_timestamp(lrc, &old_ts); 794 xef->run_ticks[q->class] += (new_ts - old_ts) * q->width; 795 } 796 797 void xe_exec_queue_kill(struct xe_exec_queue *q) 798 { 799 struct xe_exec_queue *eq = q, *next; 800 801 list_for_each_entry_safe(eq, next, &eq->multi_gt_list, 802 multi_gt_link) { 803 q->ops->kill(eq); 804 xe_vm_remove_compute_exec_queue(q->vm, eq); 805 } 806 807 q->ops->kill(q); 808 xe_vm_remove_compute_exec_queue(q->vm, q); 809 } 810 811 int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data, 812 struct drm_file *file) 813 { 814 struct xe_device *xe = to_xe_device(dev); 815 struct xe_file *xef = to_xe_file(file); 816 struct drm_xe_exec_queue_destroy *args = data; 817 struct xe_exec_queue *q; 818 819 if (XE_IOCTL_DBG(xe, args->pad) || 820 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) 821 return -EINVAL; 822 823 mutex_lock(&xef->exec_queue.lock); 824 q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id); 825 mutex_unlock(&xef->exec_queue.lock); 826 if (XE_IOCTL_DBG(xe, !q)) 827 return -ENOENT; 828 829 xe_exec_queue_kill(q); 830 831 trace_xe_exec_queue_close(q); 832 xe_exec_queue_put(q); 833 834 return 0; 835 } 836 837 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q, 838 struct xe_vm *vm) 839 { 840 if (q->flags & EXEC_QUEUE_FLAG_VM) 841 lockdep_assert_held(&vm->lock); 842 else 843 xe_vm_assert_held(vm); 844 } 845 846 /** 847 * xe_exec_queue_last_fence_put() - Drop ref to last fence 848 * @q: The exec queue 849 * @vm: The VM the engine does a bind or exec for 850 */ 851 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm) 852 { 853 xe_exec_queue_last_fence_lockdep_assert(q, vm); 854 855 if (q->last_fence) { 856 dma_fence_put(q->last_fence); 857 q->last_fence = NULL; 858 } 859 } 860 861 /** 862 * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked 863 * @q: The exec queue 864 * 865 * Only safe to be called from xe_exec_queue_destroy(). 866 */ 867 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q) 868 { 869 if (q->last_fence) { 870 dma_fence_put(q->last_fence); 871 q->last_fence = NULL; 872 } 873 } 874 875 /** 876 * xe_exec_queue_last_fence_get() - Get last fence 877 * @q: The exec queue 878 * @vm: The VM the engine does a bind or exec for 879 * 880 * Get last fence, takes a ref 881 * 882 * Returns: last fence if not signaled, dma fence stub if signaled 883 */ 884 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q, 885 struct xe_vm *vm) 886 { 887 struct dma_fence *fence; 888 889 xe_exec_queue_last_fence_lockdep_assert(q, vm); 890 891 if (q->last_fence && 892 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) 893 xe_exec_queue_last_fence_put(q, vm); 894 895 fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); 896 dma_fence_get(fence); 897 return fence; 898 } 899 900 /** 901 * xe_exec_queue_last_fence_set() - Set last fence 902 * @q: The exec queue 903 * @vm: The VM the engine does a bind or exec for 904 * @fence: The fence 905 * 906 * Set the last fence for the engine. Increases reference count for fence, when 907 * closing engine xe_exec_queue_last_fence_put should be called. 908 */ 909 void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm, 910 struct dma_fence *fence) 911 { 912 xe_exec_queue_last_fence_lockdep_assert(q, vm); 913 914 xe_exec_queue_last_fence_put(q, vm); 915 q->last_fence = dma_fence_get(fence); 916 } 917