1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_exec_queue.h" 7 8 #include <linux/nospec.h> 9 10 #include <drm/drm_device.h> 11 #include <drm/drm_drv.h> 12 #include <drm/drm_file.h> 13 #include <uapi/drm/xe_drm.h> 14 15 #include "xe_device.h" 16 #include "xe_gt.h" 17 #include "xe_hw_engine_class_sysfs.h" 18 #include "xe_hw_engine_group.h" 19 #include "xe_hw_fence.h" 20 #include "xe_irq.h" 21 #include "xe_lrc.h" 22 #include "xe_macros.h" 23 #include "xe_migrate.h" 24 #include "xe_pm.h" 25 #include "xe_ring_ops_types.h" 26 #include "xe_trace.h" 27 #include "xe_vm.h" 28 #include "xe_pxp.h" 29 30 enum xe_exec_queue_sched_prop { 31 XE_EXEC_QUEUE_JOB_TIMEOUT = 0, 32 XE_EXEC_QUEUE_TIMESLICE = 1, 33 XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2, 34 XE_EXEC_QUEUE_SCHED_PROP_MAX = 3, 35 }; 36 37 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, 38 u64 extensions, int ext_number); 39 40 static void __xe_exec_queue_free(struct xe_exec_queue *q) 41 { 42 if (xe_exec_queue_uses_pxp(q)) 43 xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q); 44 if (q->vm) 45 xe_vm_put(q->vm); 46 47 if (q->xef) 48 xe_file_put(q->xef); 49 50 kfree(q); 51 } 52 53 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe, 54 struct xe_vm *vm, 55 u32 logical_mask, 56 u16 width, struct xe_hw_engine *hwe, 57 u32 flags, u64 extensions) 58 { 59 struct xe_exec_queue *q; 60 struct xe_gt *gt = hwe->gt; 61 int err; 62 63 /* only kernel queues can be permanent */ 64 XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL)); 65 66 q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL); 67 if (!q) 68 return ERR_PTR(-ENOMEM); 69 70 kref_init(&q->refcount); 71 q->flags = flags; 72 q->hwe = hwe; 73 q->gt = gt; 74 q->class = hwe->class; 75 q->width = width; 76 q->msix_vec = XE_IRQ_DEFAULT_MSIX; 77 q->logical_mask = logical_mask; 78 q->fence_irq = >->fence_irq[hwe->class]; 79 q->ring_ops = gt->ring_ops[hwe->class]; 80 q->ops = gt->exec_queue_ops; 81 INIT_LIST_HEAD(&q->lr.link); 82 INIT_LIST_HEAD(&q->multi_gt_link); 83 INIT_LIST_HEAD(&q->hw_engine_group_link); 84 INIT_LIST_HEAD(&q->pxp.link); 85 86 q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us; 87 q->sched_props.preempt_timeout_us = 88 hwe->eclass->sched_props.preempt_timeout_us; 89 q->sched_props.job_timeout_ms = 90 hwe->eclass->sched_props.job_timeout_ms; 91 if (q->flags & EXEC_QUEUE_FLAG_KERNEL && 92 q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY) 93 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL; 94 else 95 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL; 96 97 if (vm) 98 q->vm = xe_vm_get(vm); 99 100 if (extensions) { 101 /* 102 * may set q->usm, must come before xe_lrc_create(), 103 * may overwrite q->sched_props, must come before q->ops->init() 104 */ 105 err = exec_queue_user_extensions(xe, q, extensions, 0); 106 if (err) { 107 __xe_exec_queue_free(q); 108 return ERR_PTR(err); 109 } 110 } 111 112 return q; 113 } 114 115 static int __xe_exec_queue_init(struct xe_exec_queue *q) 116 { 117 int i, err; 118 u32 flags = 0; 119 120 /* 121 * PXP workloads executing on RCS or CCS must run in isolation (i.e. no 122 * other workload can use the EUs at the same time). On MTL this is done 123 * by setting the RUNALONE bit in the LRC, while starting on Xe2 there 124 * is a dedicated bit for it. 125 */ 126 if (xe_exec_queue_uses_pxp(q) && 127 (q->class == XE_ENGINE_CLASS_RENDER || q->class == XE_ENGINE_CLASS_COMPUTE)) { 128 if (GRAPHICS_VER(gt_to_xe(q->gt)) >= 20) 129 flags |= XE_LRC_CREATE_PXP; 130 else 131 flags |= XE_LRC_CREATE_RUNALONE; 132 } 133 134 for (i = 0; i < q->width; ++i) { 135 q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec, flags); 136 if (IS_ERR(q->lrc[i])) { 137 err = PTR_ERR(q->lrc[i]); 138 goto err_lrc; 139 } 140 } 141 142 err = q->ops->init(q); 143 if (err) 144 goto err_lrc; 145 146 return 0; 147 148 err_lrc: 149 for (i = i - 1; i >= 0; --i) 150 xe_lrc_put(q->lrc[i]); 151 return err; 152 } 153 154 static void __xe_exec_queue_fini(struct xe_exec_queue *q) 155 { 156 int i; 157 158 q->ops->fini(q); 159 160 for (i = 0; i < q->width; ++i) 161 xe_lrc_put(q->lrc[i]); 162 } 163 164 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm, 165 u32 logical_mask, u16 width, 166 struct xe_hw_engine *hwe, u32 flags, 167 u64 extensions) 168 { 169 struct xe_exec_queue *q; 170 int err; 171 172 /* VMs for GSCCS queues (and only those) must have the XE_VM_FLAG_GSC flag */ 173 xe_assert(xe, !vm || (!!(vm->flags & XE_VM_FLAG_GSC) == !!(hwe->engine_id == XE_HW_ENGINE_GSCCS0))); 174 175 q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags, 176 extensions); 177 if (IS_ERR(q)) 178 return q; 179 180 err = __xe_exec_queue_init(q); 181 if (err) 182 goto err_post_alloc; 183 184 /* 185 * We can only add the queue to the PXP list after the init is complete, 186 * because the PXP termination can call exec_queue_kill and that will 187 * go bad if the queue is only half-initialized. This means that we 188 * can't do it when we handle the PXP extension in __xe_exec_queue_alloc 189 * and we need to do it here instead. 190 */ 191 if (xe_exec_queue_uses_pxp(q)) { 192 err = xe_pxp_exec_queue_add(xe->pxp, q); 193 if (err) 194 goto err_post_init; 195 } 196 197 return q; 198 199 err_post_init: 200 __xe_exec_queue_fini(q); 201 err_post_alloc: 202 __xe_exec_queue_free(q); 203 return ERR_PTR(err); 204 } 205 ALLOW_ERROR_INJECTION(xe_exec_queue_create, ERRNO); 206 207 struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt, 208 struct xe_vm *vm, 209 enum xe_engine_class class, 210 u32 flags, u64 extensions) 211 { 212 struct xe_hw_engine *hwe, *hwe0 = NULL; 213 enum xe_hw_engine_id id; 214 u32 logical_mask = 0; 215 216 for_each_hw_engine(hwe, gt, id) { 217 if (xe_hw_engine_is_reserved(hwe)) 218 continue; 219 220 if (hwe->class == class) { 221 logical_mask |= BIT(hwe->logical_instance); 222 if (!hwe0) 223 hwe0 = hwe; 224 } 225 } 226 227 if (!logical_mask) 228 return ERR_PTR(-ENODEV); 229 230 return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, extensions); 231 } 232 233 /** 234 * xe_exec_queue_create_bind() - Create bind exec queue. 235 * @xe: Xe device. 236 * @tile: tile which bind exec queue belongs to. 237 * @flags: exec queue creation flags 238 * @extensions: exec queue creation extensions 239 * 240 * Normalize bind exec queue creation. Bind exec queue is tied to migration VM 241 * for access to physical memory required for page table programming. On a 242 * faulting devices the reserved copy engine instance must be used to avoid 243 * deadlocking (user binds cannot get stuck behind faults as kernel binds which 244 * resolve faults depend on user binds). On non-faulting devices any copy engine 245 * can be used. 246 * 247 * Returns exec queue on success, ERR_PTR on failure 248 */ 249 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe, 250 struct xe_tile *tile, 251 u32 flags, u64 extensions) 252 { 253 struct xe_gt *gt = tile->primary_gt; 254 struct xe_exec_queue *q; 255 struct xe_vm *migrate_vm; 256 257 migrate_vm = xe_migrate_get_vm(tile->migrate); 258 if (xe->info.has_usm) { 259 struct xe_hw_engine *hwe = xe_gt_hw_engine(gt, 260 XE_ENGINE_CLASS_COPY, 261 gt->usm.reserved_bcs_instance, 262 false); 263 264 if (!hwe) { 265 xe_vm_put(migrate_vm); 266 return ERR_PTR(-EINVAL); 267 } 268 269 q = xe_exec_queue_create(xe, migrate_vm, 270 BIT(hwe->logical_instance), 1, hwe, 271 flags, extensions); 272 } else { 273 q = xe_exec_queue_create_class(xe, gt, migrate_vm, 274 XE_ENGINE_CLASS_COPY, flags, 275 extensions); 276 } 277 xe_vm_put(migrate_vm); 278 279 return q; 280 } 281 ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO); 282 283 void xe_exec_queue_destroy(struct kref *ref) 284 { 285 struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount); 286 struct xe_exec_queue *eq, *next; 287 288 if (xe_exec_queue_uses_pxp(q)) 289 xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q); 290 291 xe_exec_queue_last_fence_put_unlocked(q); 292 if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) { 293 list_for_each_entry_safe(eq, next, &q->multi_gt_list, 294 multi_gt_link) 295 xe_exec_queue_put(eq); 296 } 297 298 q->ops->destroy(q); 299 } 300 301 void xe_exec_queue_fini(struct xe_exec_queue *q) 302 { 303 /* 304 * Before releasing our ref to lrc and xef, accumulate our run ticks 305 * and wakeup any waiters. 306 */ 307 xe_exec_queue_update_run_ticks(q); 308 if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal)) 309 wake_up_var(&q->xef->exec_queue.pending_removal); 310 311 __xe_exec_queue_fini(q); 312 __xe_exec_queue_free(q); 313 } 314 315 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance) 316 { 317 switch (q->class) { 318 case XE_ENGINE_CLASS_RENDER: 319 snprintf(q->name, sizeof(q->name), "rcs%d", instance); 320 break; 321 case XE_ENGINE_CLASS_VIDEO_DECODE: 322 snprintf(q->name, sizeof(q->name), "vcs%d", instance); 323 break; 324 case XE_ENGINE_CLASS_VIDEO_ENHANCE: 325 snprintf(q->name, sizeof(q->name), "vecs%d", instance); 326 break; 327 case XE_ENGINE_CLASS_COPY: 328 snprintf(q->name, sizeof(q->name), "bcs%d", instance); 329 break; 330 case XE_ENGINE_CLASS_COMPUTE: 331 snprintf(q->name, sizeof(q->name), "ccs%d", instance); 332 break; 333 case XE_ENGINE_CLASS_OTHER: 334 snprintf(q->name, sizeof(q->name), "gsccs%d", instance); 335 break; 336 default: 337 XE_WARN_ON(q->class); 338 } 339 } 340 341 struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id) 342 { 343 struct xe_exec_queue *q; 344 345 mutex_lock(&xef->exec_queue.lock); 346 q = xa_load(&xef->exec_queue.xa, id); 347 if (q) 348 xe_exec_queue_get(q); 349 mutex_unlock(&xef->exec_queue.lock); 350 351 return q; 352 } 353 354 enum xe_exec_queue_priority 355 xe_exec_queue_device_get_max_priority(struct xe_device *xe) 356 { 357 return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH : 358 XE_EXEC_QUEUE_PRIORITY_NORMAL; 359 } 360 361 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q, 362 u64 value) 363 { 364 if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH)) 365 return -EINVAL; 366 367 if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe))) 368 return -EPERM; 369 370 q->sched_props.priority = value; 371 return 0; 372 } 373 374 static bool xe_exec_queue_enforce_schedule_limit(void) 375 { 376 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT) 377 return true; 378 #else 379 return !capable(CAP_SYS_NICE); 380 #endif 381 } 382 383 static void 384 xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass, 385 enum xe_exec_queue_sched_prop prop, 386 u32 *min, u32 *max) 387 { 388 switch (prop) { 389 case XE_EXEC_QUEUE_JOB_TIMEOUT: 390 *min = eclass->sched_props.job_timeout_min; 391 *max = eclass->sched_props.job_timeout_max; 392 break; 393 case XE_EXEC_QUEUE_TIMESLICE: 394 *min = eclass->sched_props.timeslice_min; 395 *max = eclass->sched_props.timeslice_max; 396 break; 397 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT: 398 *min = eclass->sched_props.preempt_timeout_min; 399 *max = eclass->sched_props.preempt_timeout_max; 400 break; 401 default: 402 break; 403 } 404 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT) 405 if (capable(CAP_SYS_NICE)) { 406 switch (prop) { 407 case XE_EXEC_QUEUE_JOB_TIMEOUT: 408 *min = XE_HW_ENGINE_JOB_TIMEOUT_MIN; 409 *max = XE_HW_ENGINE_JOB_TIMEOUT_MAX; 410 break; 411 case XE_EXEC_QUEUE_TIMESLICE: 412 *min = XE_HW_ENGINE_TIMESLICE_MIN; 413 *max = XE_HW_ENGINE_TIMESLICE_MAX; 414 break; 415 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT: 416 *min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN; 417 *max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX; 418 break; 419 default: 420 break; 421 } 422 } 423 #endif 424 } 425 426 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q, 427 u64 value) 428 { 429 u32 min = 0, max = 0; 430 431 xe_exec_queue_get_prop_minmax(q->hwe->eclass, 432 XE_EXEC_QUEUE_TIMESLICE, &min, &max); 433 434 if (xe_exec_queue_enforce_schedule_limit() && 435 !xe_hw_engine_timeout_in_range(value, min, max)) 436 return -EINVAL; 437 438 q->sched_props.timeslice_us = value; 439 return 0; 440 } 441 442 static int 443 exec_queue_set_pxp_type(struct xe_device *xe, struct xe_exec_queue *q, u64 value) 444 { 445 if (value == DRM_XE_PXP_TYPE_NONE) 446 return 0; 447 448 /* we only support HWDRM sessions right now */ 449 if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM)) 450 return -EINVAL; 451 452 if (!xe_pxp_is_enabled(xe->pxp)) 453 return -ENODEV; 454 455 return xe_pxp_exec_queue_set_type(xe->pxp, q, DRM_XE_PXP_TYPE_HWDRM); 456 } 457 458 typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe, 459 struct xe_exec_queue *q, 460 u64 value); 461 462 static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = { 463 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority, 464 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice, 465 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE] = exec_queue_set_pxp_type, 466 }; 467 468 static int exec_queue_user_ext_set_property(struct xe_device *xe, 469 struct xe_exec_queue *q, 470 u64 extension) 471 { 472 u64 __user *address = u64_to_user_ptr(extension); 473 struct drm_xe_ext_set_property ext; 474 int err; 475 u32 idx; 476 477 err = copy_from_user(&ext, address, sizeof(ext)); 478 if (XE_IOCTL_DBG(xe, err)) 479 return -EFAULT; 480 481 if (XE_IOCTL_DBG(xe, ext.property >= 482 ARRAY_SIZE(exec_queue_set_property_funcs)) || 483 XE_IOCTL_DBG(xe, ext.pad) || 484 XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY && 485 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE && 486 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE)) 487 return -EINVAL; 488 489 idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs)); 490 if (!exec_queue_set_property_funcs[idx]) 491 return -EINVAL; 492 493 return exec_queue_set_property_funcs[idx](xe, q, ext.value); 494 } 495 496 typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe, 497 struct xe_exec_queue *q, 498 u64 extension); 499 500 static const xe_exec_queue_user_extension_fn exec_queue_user_extension_funcs[] = { 501 [DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property, 502 }; 503 504 #define MAX_USER_EXTENSIONS 16 505 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, 506 u64 extensions, int ext_number) 507 { 508 u64 __user *address = u64_to_user_ptr(extensions); 509 struct drm_xe_user_extension ext; 510 int err; 511 u32 idx; 512 513 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS)) 514 return -E2BIG; 515 516 err = copy_from_user(&ext, address, sizeof(ext)); 517 if (XE_IOCTL_DBG(xe, err)) 518 return -EFAULT; 519 520 if (XE_IOCTL_DBG(xe, ext.pad) || 521 XE_IOCTL_DBG(xe, ext.name >= 522 ARRAY_SIZE(exec_queue_user_extension_funcs))) 523 return -EINVAL; 524 525 idx = array_index_nospec(ext.name, 526 ARRAY_SIZE(exec_queue_user_extension_funcs)); 527 err = exec_queue_user_extension_funcs[idx](xe, q, extensions); 528 if (XE_IOCTL_DBG(xe, err)) 529 return err; 530 531 if (ext.next_extension) 532 return exec_queue_user_extensions(xe, q, ext.next_extension, 533 ++ext_number); 534 535 return 0; 536 } 537 538 static u32 calc_validate_logical_mask(struct xe_device *xe, 539 struct drm_xe_engine_class_instance *eci, 540 u16 width, u16 num_placements) 541 { 542 int len = width * num_placements; 543 int i, j, n; 544 u16 class; 545 u16 gt_id; 546 u32 return_mask = 0, prev_mask; 547 548 if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) && 549 len > 1)) 550 return 0; 551 552 for (i = 0; i < width; ++i) { 553 u32 current_mask = 0; 554 555 for (j = 0; j < num_placements; ++j) { 556 struct xe_hw_engine *hwe; 557 558 n = j * width + i; 559 560 hwe = xe_hw_engine_lookup(xe, eci[n]); 561 if (XE_IOCTL_DBG(xe, !hwe)) 562 return 0; 563 564 if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe))) 565 return 0; 566 567 if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) || 568 XE_IOCTL_DBG(xe, n && eci[n].engine_class != class)) 569 return 0; 570 571 class = eci[n].engine_class; 572 gt_id = eci[n].gt_id; 573 574 if (width == 1 || !i) 575 return_mask |= BIT(eci[n].engine_instance); 576 current_mask |= BIT(eci[n].engine_instance); 577 } 578 579 /* Parallel submissions must be logically contiguous */ 580 if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1)) 581 return 0; 582 583 prev_mask = current_mask; 584 } 585 586 return return_mask; 587 } 588 589 int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, 590 struct drm_file *file) 591 { 592 struct xe_device *xe = to_xe_device(dev); 593 struct xe_file *xef = to_xe_file(file); 594 struct drm_xe_exec_queue_create *args = data; 595 struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE]; 596 struct drm_xe_engine_class_instance __user *user_eci = 597 u64_to_user_ptr(args->instances); 598 struct xe_hw_engine *hwe; 599 struct xe_vm *vm; 600 struct xe_tile *tile; 601 struct xe_exec_queue *q = NULL; 602 u32 logical_mask; 603 u32 flags = 0; 604 u32 id; 605 u32 len; 606 int err; 607 608 if (XE_IOCTL_DBG(xe, args->flags & ~DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT) || 609 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) 610 return -EINVAL; 611 612 len = args->width * args->num_placements; 613 if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE)) 614 return -EINVAL; 615 616 err = copy_from_user(eci, user_eci, 617 sizeof(struct drm_xe_engine_class_instance) * len); 618 if (XE_IOCTL_DBG(xe, err)) 619 return -EFAULT; 620 621 if (XE_IOCTL_DBG(xe, !xe_device_get_gt(xe, eci[0].gt_id))) 622 return -EINVAL; 623 624 if (args->flags & DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT) 625 flags |= EXEC_QUEUE_FLAG_LOW_LATENCY; 626 627 if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) { 628 if (XE_IOCTL_DBG(xe, args->width != 1) || 629 XE_IOCTL_DBG(xe, args->num_placements != 1) || 630 XE_IOCTL_DBG(xe, eci[0].engine_instance != 0)) 631 return -EINVAL; 632 633 for_each_tile(tile, xe, id) { 634 struct xe_exec_queue *new; 635 636 flags |= EXEC_QUEUE_FLAG_VM; 637 if (id) 638 flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD; 639 640 new = xe_exec_queue_create_bind(xe, tile, flags, 641 args->extensions); 642 if (IS_ERR(new)) { 643 err = PTR_ERR(new); 644 if (q) 645 goto put_exec_queue; 646 return err; 647 } 648 if (id == 0) 649 q = new; 650 else 651 list_add_tail(&new->multi_gt_list, 652 &q->multi_gt_link); 653 } 654 } else { 655 logical_mask = calc_validate_logical_mask(xe, eci, 656 args->width, 657 args->num_placements); 658 if (XE_IOCTL_DBG(xe, !logical_mask)) 659 return -EINVAL; 660 661 hwe = xe_hw_engine_lookup(xe, eci[0]); 662 if (XE_IOCTL_DBG(xe, !hwe)) 663 return -EINVAL; 664 665 vm = xe_vm_lookup(xef, args->vm_id); 666 if (XE_IOCTL_DBG(xe, !vm)) 667 return -ENOENT; 668 669 err = down_read_interruptible(&vm->lock); 670 if (err) { 671 xe_vm_put(vm); 672 return err; 673 } 674 675 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) { 676 up_read(&vm->lock); 677 xe_vm_put(vm); 678 return -ENOENT; 679 } 680 681 q = xe_exec_queue_create(xe, vm, logical_mask, 682 args->width, hwe, flags, 683 args->extensions); 684 up_read(&vm->lock); 685 xe_vm_put(vm); 686 if (IS_ERR(q)) 687 return PTR_ERR(q); 688 689 if (xe_vm_in_preempt_fence_mode(vm)) { 690 q->lr.context = dma_fence_context_alloc(1); 691 692 err = xe_vm_add_compute_exec_queue(vm, q); 693 if (XE_IOCTL_DBG(xe, err)) 694 goto put_exec_queue; 695 } 696 697 if (q->vm && q->hwe->hw_engine_group) { 698 err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q); 699 if (err) 700 goto put_exec_queue; 701 } 702 } 703 704 q->xef = xe_file_get(xef); 705 706 /* user id alloc must always be last in ioctl to prevent UAF */ 707 err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL); 708 if (err) 709 goto kill_exec_queue; 710 711 args->exec_queue_id = id; 712 713 return 0; 714 715 kill_exec_queue: 716 xe_exec_queue_kill(q); 717 put_exec_queue: 718 xe_exec_queue_put(q); 719 return err; 720 } 721 722 int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data, 723 struct drm_file *file) 724 { 725 struct xe_device *xe = to_xe_device(dev); 726 struct xe_file *xef = to_xe_file(file); 727 struct drm_xe_exec_queue_get_property *args = data; 728 struct xe_exec_queue *q; 729 int ret; 730 731 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) 732 return -EINVAL; 733 734 q = xe_exec_queue_lookup(xef, args->exec_queue_id); 735 if (XE_IOCTL_DBG(xe, !q)) 736 return -ENOENT; 737 738 switch (args->property) { 739 case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN: 740 args->value = q->ops->reset_status(q); 741 ret = 0; 742 break; 743 default: 744 ret = -EINVAL; 745 } 746 747 xe_exec_queue_put(q); 748 749 return ret; 750 } 751 752 /** 753 * xe_exec_queue_is_lr() - Whether an exec_queue is long-running 754 * @q: The exec_queue 755 * 756 * Return: True if the exec_queue is long-running, false otherwise. 757 */ 758 bool xe_exec_queue_is_lr(struct xe_exec_queue *q) 759 { 760 return q->vm && xe_vm_in_lr_mode(q->vm) && 761 !(q->flags & EXEC_QUEUE_FLAG_VM); 762 } 763 764 static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q) 765 { 766 return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1; 767 } 768 769 /** 770 * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full 771 * @q: The exec_queue 772 * 773 * Return: True if the exec_queue's ring is full, false otherwise. 774 */ 775 bool xe_exec_queue_ring_full(struct xe_exec_queue *q) 776 { 777 struct xe_lrc *lrc = q->lrc[0]; 778 s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES; 779 780 return xe_exec_queue_num_job_inflight(q) >= max_job; 781 } 782 783 /** 784 * xe_exec_queue_is_idle() - Whether an exec_queue is idle. 785 * @q: The exec_queue 786 * 787 * FIXME: Need to determine what to use as the short-lived 788 * timeline lock for the exec_queues, so that the return value 789 * of this function becomes more than just an advisory 790 * snapshot in time. The timeline lock must protect the 791 * seqno from racing submissions on the same exec_queue. 792 * Typically vm->resv, but user-created timeline locks use the migrate vm 793 * and never grabs the migrate vm->resv so we have a race there. 794 * 795 * Return: True if the exec_queue is idle, false otherwise. 796 */ 797 bool xe_exec_queue_is_idle(struct xe_exec_queue *q) 798 { 799 if (xe_exec_queue_is_parallel(q)) { 800 int i; 801 802 for (i = 0; i < q->width; ++i) { 803 if (xe_lrc_seqno(q->lrc[i]) != 804 q->lrc[i]->fence_ctx.next_seqno - 1) 805 return false; 806 } 807 808 return true; 809 } 810 811 return xe_lrc_seqno(q->lrc[0]) == 812 q->lrc[0]->fence_ctx.next_seqno - 1; 813 } 814 815 /** 816 * xe_exec_queue_update_run_ticks() - Update run time in ticks for this exec queue 817 * from hw 818 * @q: The exec queue 819 * 820 * Update the timestamp saved by HW for this exec queue and save run ticks 821 * calculated by using the delta from last update. 822 */ 823 void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q) 824 { 825 struct xe_device *xe = gt_to_xe(q->gt); 826 struct xe_lrc *lrc; 827 u64 old_ts, new_ts; 828 int idx; 829 830 /* 831 * Jobs that are executed by kernel doesn't have a corresponding xe_file 832 * and thus are not accounted. 833 */ 834 if (!q->xef) 835 return; 836 837 /* Synchronize with unbind while holding the xe file open */ 838 if (!drm_dev_enter(&xe->drm, &idx)) 839 return; 840 /* 841 * Only sample the first LRC. For parallel submission, all of them are 842 * scheduled together and we compensate that below by multiplying by 843 * width - this may introduce errors if that premise is not true and 844 * they don't exit 100% aligned. On the other hand, looping through 845 * the LRCs and reading them in different time could also introduce 846 * errors. 847 */ 848 lrc = q->lrc[0]; 849 new_ts = xe_lrc_update_timestamp(lrc, &old_ts); 850 q->xef->run_ticks[q->class] += (new_ts - old_ts) * q->width; 851 852 drm_dev_exit(idx); 853 } 854 855 /** 856 * xe_exec_queue_kill - permanently stop all execution from an exec queue 857 * @q: The exec queue 858 * 859 * This function permanently stops all activity on an exec queue. If the queue 860 * is actively executing on the HW, it will be kicked off the engine; any 861 * pending jobs are discarded and all future submissions are rejected. 862 * This function is safe to call multiple times. 863 */ 864 void xe_exec_queue_kill(struct xe_exec_queue *q) 865 { 866 struct xe_exec_queue *eq = q, *next; 867 868 list_for_each_entry_safe(eq, next, &eq->multi_gt_list, 869 multi_gt_link) { 870 q->ops->kill(eq); 871 xe_vm_remove_compute_exec_queue(q->vm, eq); 872 } 873 874 q->ops->kill(q); 875 xe_vm_remove_compute_exec_queue(q->vm, q); 876 } 877 878 int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data, 879 struct drm_file *file) 880 { 881 struct xe_device *xe = to_xe_device(dev); 882 struct xe_file *xef = to_xe_file(file); 883 struct drm_xe_exec_queue_destroy *args = data; 884 struct xe_exec_queue *q; 885 886 if (XE_IOCTL_DBG(xe, args->pad) || 887 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) 888 return -EINVAL; 889 890 mutex_lock(&xef->exec_queue.lock); 891 q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id); 892 if (q) 893 atomic_inc(&xef->exec_queue.pending_removal); 894 mutex_unlock(&xef->exec_queue.lock); 895 896 if (XE_IOCTL_DBG(xe, !q)) 897 return -ENOENT; 898 899 if (q->vm && q->hwe->hw_engine_group) 900 xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q); 901 902 xe_exec_queue_kill(q); 903 904 trace_xe_exec_queue_close(q); 905 xe_exec_queue_put(q); 906 907 return 0; 908 } 909 910 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q, 911 struct xe_vm *vm) 912 { 913 if (q->flags & EXEC_QUEUE_FLAG_VM) { 914 lockdep_assert_held(&vm->lock); 915 } else { 916 xe_vm_assert_held(vm); 917 lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem); 918 } 919 } 920 921 /** 922 * xe_exec_queue_last_fence_put() - Drop ref to last fence 923 * @q: The exec queue 924 * @vm: The VM the engine does a bind or exec for 925 */ 926 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm) 927 { 928 xe_exec_queue_last_fence_lockdep_assert(q, vm); 929 930 xe_exec_queue_last_fence_put_unlocked(q); 931 } 932 933 /** 934 * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked 935 * @q: The exec queue 936 * 937 * Only safe to be called from xe_exec_queue_destroy(). 938 */ 939 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q) 940 { 941 if (q->last_fence) { 942 dma_fence_put(q->last_fence); 943 q->last_fence = NULL; 944 } 945 } 946 947 /** 948 * xe_exec_queue_last_fence_get() - Get last fence 949 * @q: The exec queue 950 * @vm: The VM the engine does a bind or exec for 951 * 952 * Get last fence, takes a ref 953 * 954 * Returns: last fence if not signaled, dma fence stub if signaled 955 */ 956 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q, 957 struct xe_vm *vm) 958 { 959 struct dma_fence *fence; 960 961 xe_exec_queue_last_fence_lockdep_assert(q, vm); 962 963 if (q->last_fence && 964 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) 965 xe_exec_queue_last_fence_put(q, vm); 966 967 fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); 968 dma_fence_get(fence); 969 return fence; 970 } 971 972 /** 973 * xe_exec_queue_last_fence_get_for_resume() - Get last fence 974 * @q: The exec queue 975 * @vm: The VM the engine does a bind or exec for 976 * 977 * Get last fence, takes a ref. Only safe to be called in the context of 978 * resuming the hw engine group's long-running exec queue, when the group 979 * semaphore is held. 980 * 981 * Returns: last fence if not signaled, dma fence stub if signaled 982 */ 983 struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q, 984 struct xe_vm *vm) 985 { 986 struct dma_fence *fence; 987 988 lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem); 989 990 if (q->last_fence && 991 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) 992 xe_exec_queue_last_fence_put_unlocked(q); 993 994 fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); 995 dma_fence_get(fence); 996 return fence; 997 } 998 999 /** 1000 * xe_exec_queue_last_fence_set() - Set last fence 1001 * @q: The exec queue 1002 * @vm: The VM the engine does a bind or exec for 1003 * @fence: The fence 1004 * 1005 * Set the last fence for the engine. Increases reference count for fence, when 1006 * closing engine xe_exec_queue_last_fence_put should be called. 1007 */ 1008 void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm, 1009 struct dma_fence *fence) 1010 { 1011 xe_exec_queue_last_fence_lockdep_assert(q, vm); 1012 1013 xe_exec_queue_last_fence_put(q, vm); 1014 q->last_fence = dma_fence_get(fence); 1015 } 1016 1017 /** 1018 * xe_exec_queue_last_fence_test_dep - Test last fence dependency of queue 1019 * @q: The exec queue 1020 * @vm: The VM the engine does a bind or exec for 1021 * 1022 * Returns: 1023 * -ETIME if there exists an unsignalled last fence dependency, zero otherwise. 1024 */ 1025 int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm) 1026 { 1027 struct dma_fence *fence; 1028 int err = 0; 1029 1030 fence = xe_exec_queue_last_fence_get(q, vm); 1031 if (fence) { 1032 err = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ? 1033 0 : -ETIME; 1034 dma_fence_put(fence); 1035 } 1036 1037 return err; 1038 } 1039