1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_query.h" 7 8 #include <linux/nospec.h> 9 #include <linux/sched/clock.h> 10 11 #include <drm/ttm/ttm_placement.h> 12 #include <generated/xe_wa_oob.h> 13 #include <uapi/drm/xe_drm.h> 14 15 #include "regs/xe_engine_regs.h" 16 #include "regs/xe_gt_regs.h" 17 #include "xe_bo.h" 18 #include "xe_device.h" 19 #include "xe_eu_stall.h" 20 #include "xe_exec_queue.h" 21 #include "xe_force_wake.h" 22 #include "xe_ggtt.h" 23 #include "xe_gt.h" 24 #include "xe_gt_topology.h" 25 #include "xe_guc_hwconfig.h" 26 #include "xe_macros.h" 27 #include "xe_mmio.h" 28 #include "xe_oa.h" 29 #include "xe_pxp.h" 30 #include "xe_ttm_vram_mgr.h" 31 #include "xe_vram_types.h" 32 #include "xe_wa.h" 33 34 static const u16 xe_to_user_engine_class[] = { 35 [XE_ENGINE_CLASS_RENDER] = DRM_XE_ENGINE_CLASS_RENDER, 36 [XE_ENGINE_CLASS_COPY] = DRM_XE_ENGINE_CLASS_COPY, 37 [XE_ENGINE_CLASS_VIDEO_DECODE] = DRM_XE_ENGINE_CLASS_VIDEO_DECODE, 38 [XE_ENGINE_CLASS_VIDEO_ENHANCE] = DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE, 39 [XE_ENGINE_CLASS_COMPUTE] = DRM_XE_ENGINE_CLASS_COMPUTE, 40 }; 41 42 static const enum xe_engine_class user_to_xe_engine_class[] = { 43 [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER, 44 [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY, 45 [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE, 46 [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE, 47 [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE, 48 }; 49 50 static size_t calc_hw_engine_info_size(struct xe_device *xe) 51 { 52 struct xe_hw_engine *hwe; 53 enum xe_hw_engine_id id; 54 struct xe_gt *gt; 55 u8 gt_id; 56 int i = 0; 57 58 for_each_gt(gt, xe, gt_id) 59 for_each_hw_engine(hwe, gt, id) { 60 if (xe_hw_engine_is_reserved(hwe)) 61 continue; 62 i++; 63 } 64 65 return sizeof(struct drm_xe_query_engines) + 66 i * sizeof(struct drm_xe_engine); 67 } 68 69 typedef u64 (*__ktime_func_t)(void); 70 static __ktime_func_t __clock_id_to_func(clockid_t clk_id) 71 { 72 /* 73 * Use logic same as the perf subsystem to allow user to select the 74 * reference clock id to be used for timestamps. 75 */ 76 switch (clk_id) { 77 case CLOCK_MONOTONIC: 78 return &ktime_get_ns; 79 case CLOCK_MONOTONIC_RAW: 80 return &ktime_get_raw_ns; 81 case CLOCK_REALTIME: 82 return &ktime_get_real_ns; 83 case CLOCK_BOOTTIME: 84 return &ktime_get_boottime_ns; 85 case CLOCK_TAI: 86 return &ktime_get_clocktai_ns; 87 default: 88 return NULL; 89 } 90 } 91 92 static void 93 hwe_read_timestamp(struct xe_hw_engine *hwe, u64 *engine_ts, u64 *cpu_ts, 94 u64 *cpu_delta, __ktime_func_t cpu_clock) 95 { 96 struct xe_mmio *mmio = &hwe->gt->mmio; 97 u32 upper, lower, old_upper, loop = 0; 98 struct xe_reg upper_reg = RING_TIMESTAMP_UDW(hwe->mmio_base), 99 lower_reg = RING_TIMESTAMP(hwe->mmio_base); 100 101 upper = xe_mmio_read32(mmio, upper_reg); 102 do { 103 *cpu_delta = local_clock(); 104 *cpu_ts = cpu_clock(); 105 lower = xe_mmio_read32(mmio, lower_reg); 106 *cpu_delta = local_clock() - *cpu_delta; 107 old_upper = upper; 108 upper = xe_mmio_read32(mmio, upper_reg); 109 } while (upper != old_upper && loop++ < 2); 110 111 *engine_ts = (u64)upper << 32 | lower; 112 } 113 114 static int 115 query_engine_cycles(struct xe_device *xe, 116 struct drm_xe_device_query *query) 117 { 118 struct drm_xe_query_engine_cycles __user *query_ptr; 119 struct drm_xe_engine_class_instance *eci; 120 struct drm_xe_query_engine_cycles resp; 121 size_t size = sizeof(resp); 122 __ktime_func_t cpu_clock; 123 struct xe_hw_engine *hwe; 124 struct xe_gt *gt; 125 126 if (IS_SRIOV_VF(xe)) 127 return -EOPNOTSUPP; 128 129 if (query->size == 0) { 130 query->size = size; 131 return 0; 132 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 133 return -EINVAL; 134 } 135 136 query_ptr = u64_to_user_ptr(query->data); 137 if (copy_from_user(&resp, query_ptr, size)) 138 return -EFAULT; 139 140 cpu_clock = __clock_id_to_func(resp.clockid); 141 if (!cpu_clock) 142 return -EINVAL; 143 144 eci = &resp.eci; 145 gt = xe_device_get_gt(xe, eci->gt_id); 146 if (!gt) 147 return -EINVAL; 148 149 if (eci->engine_class >= ARRAY_SIZE(user_to_xe_engine_class)) 150 return -EINVAL; 151 152 hwe = xe_gt_hw_engine(gt, user_to_xe_engine_class[eci->engine_class], 153 eci->engine_instance, true); 154 if (!hwe) 155 return -EINVAL; 156 157 xe_with_force_wake(fw_ref, gt_to_fw(gt), XE_FORCEWAKE_ALL) { 158 if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) 159 return -EIO; 160 161 hwe_read_timestamp(hwe, &resp.engine_cycles, &resp.cpu_timestamp, 162 &resp.cpu_delta, cpu_clock); 163 } 164 165 if (GRAPHICS_VER(xe) >= 20) 166 resp.width = 64; 167 else 168 resp.width = 36; 169 170 /* Only write to the output fields of user query */ 171 if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp) || 172 put_user(resp.cpu_delta, &query_ptr->cpu_delta) || 173 put_user(resp.engine_cycles, &query_ptr->engine_cycles) || 174 put_user(resp.width, &query_ptr->width)) 175 return -EFAULT; 176 177 return 0; 178 } 179 180 static int query_engines(struct xe_device *xe, 181 struct drm_xe_device_query *query) 182 { 183 size_t size = calc_hw_engine_info_size(xe); 184 struct drm_xe_query_engines __user *query_ptr = 185 u64_to_user_ptr(query->data); 186 struct drm_xe_query_engines *engines; 187 struct xe_hw_engine *hwe; 188 enum xe_hw_engine_id id; 189 struct xe_gt *gt; 190 u8 gt_id; 191 int i = 0; 192 193 if (query->size == 0) { 194 query->size = size; 195 return 0; 196 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 197 return -EINVAL; 198 } 199 200 engines = kzalloc(size, GFP_KERNEL); 201 if (!engines) 202 return -ENOMEM; 203 204 for_each_gt(gt, xe, gt_id) 205 for_each_hw_engine(hwe, gt, id) { 206 if (xe_hw_engine_is_reserved(hwe)) 207 continue; 208 209 engines->engines[i].instance.engine_class = 210 xe_to_user_engine_class[hwe->class]; 211 engines->engines[i].instance.engine_instance = 212 hwe->logical_instance; 213 engines->engines[i].instance.gt_id = gt->info.id; 214 215 i++; 216 } 217 218 engines->num_engines = i; 219 220 if (copy_to_user(query_ptr, engines, size)) { 221 kfree(engines); 222 return -EFAULT; 223 } 224 kfree(engines); 225 226 return 0; 227 } 228 229 static size_t calc_mem_regions_size(struct xe_device *xe) 230 { 231 u32 num_managers = 1; 232 int i; 233 234 for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) 235 if (ttm_manager_type(&xe->ttm, i)) 236 num_managers++; 237 238 return offsetof(struct drm_xe_query_mem_regions, mem_regions[num_managers]); 239 } 240 241 static int query_mem_regions(struct xe_device *xe, 242 struct drm_xe_device_query *query) 243 { 244 size_t size = calc_mem_regions_size(xe); 245 struct drm_xe_query_mem_regions *mem_regions; 246 struct drm_xe_query_mem_regions __user *query_ptr = 247 u64_to_user_ptr(query->data); 248 struct ttm_resource_manager *man; 249 int ret, i; 250 251 if (query->size == 0) { 252 query->size = size; 253 return 0; 254 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 255 return -EINVAL; 256 } 257 258 mem_regions = kzalloc(size, GFP_KERNEL); 259 if (XE_IOCTL_DBG(xe, !mem_regions)) 260 return -ENOMEM; 261 262 man = ttm_manager_type(&xe->ttm, XE_PL_TT); 263 mem_regions->mem_regions[0].mem_class = DRM_XE_MEM_REGION_CLASS_SYSMEM; 264 /* 265 * The instance needs to be a unique number that represents the index 266 * in the placement mask used at xe_gem_create_ioctl() for the 267 * xe_bo_create() placement. 268 */ 269 mem_regions->mem_regions[0].instance = 0; 270 mem_regions->mem_regions[0].min_page_size = PAGE_SIZE; 271 mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT; 272 mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man); 273 mem_regions->num_mem_regions = 1; 274 275 for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) { 276 man = ttm_manager_type(&xe->ttm, i); 277 if (man) { 278 mem_regions->mem_regions[mem_regions->num_mem_regions].mem_class = 279 DRM_XE_MEM_REGION_CLASS_VRAM; 280 mem_regions->mem_regions[mem_regions->num_mem_regions].instance = 281 mem_regions->num_mem_regions; 282 mem_regions->mem_regions[mem_regions->num_mem_regions].min_page_size = 283 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? 284 SZ_64K : PAGE_SIZE; 285 mem_regions->mem_regions[mem_regions->num_mem_regions].total_size = 286 man->size; 287 288 xe_ttm_vram_get_used(man, 289 &mem_regions->mem_regions 290 [mem_regions->num_mem_regions].used, 291 &mem_regions->mem_regions 292 [mem_regions->num_mem_regions].cpu_visible_used); 293 294 mem_regions->mem_regions[mem_regions->num_mem_regions].cpu_visible_size = 295 xe_ttm_vram_get_cpu_visible_size(man); 296 mem_regions->num_mem_regions++; 297 } 298 } 299 300 if (!copy_to_user(query_ptr, mem_regions, size)) 301 ret = 0; 302 else 303 ret = -ENOSPC; 304 305 kfree(mem_regions); 306 return ret; 307 } 308 309 static int query_config(struct xe_device *xe, struct drm_xe_device_query *query) 310 { 311 const u32 num_params = DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY + 1; 312 size_t size = 313 sizeof(struct drm_xe_query_config) + num_params * sizeof(u64); 314 struct drm_xe_query_config __user *query_ptr = 315 u64_to_user_ptr(query->data); 316 struct drm_xe_query_config *config; 317 318 if (query->size == 0) { 319 query->size = size; 320 return 0; 321 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 322 return -EINVAL; 323 } 324 325 config = kzalloc(size, GFP_KERNEL); 326 if (!config) 327 return -ENOMEM; 328 329 config->num_params = num_params; 330 config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] = 331 xe->info.devid | (xe->info.revid << 16); 332 if (xe->mem.vram) 333 config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= 334 DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM; 335 if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_XE_GPUSVM)) 336 config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= 337 DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR; 338 if (GRAPHICS_VER(xe) >= 20) 339 config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= 340 DRM_XE_QUERY_CONFIG_FLAG_HAS_NO_COMPRESSION_HINT; 341 config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= 342 DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY; 343 config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= 344 DRM_XE_QUERY_CONFIG_FLAG_HAS_DISABLE_STATE_CACHE_PERF_FIX; 345 config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= 346 DRM_XE_QUERY_CONFIG_FLAG_HAS_PURGING_SUPPORT; 347 config->info[DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT] = 348 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K; 349 config->info[DRM_XE_QUERY_CONFIG_VA_BITS] = xe->info.va_bits; 350 config->info[DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY] = 351 xe_exec_queue_device_get_max_priority(xe); 352 353 if (copy_to_user(query_ptr, config, size)) { 354 kfree(config); 355 return -EFAULT; 356 } 357 kfree(config); 358 359 return 0; 360 } 361 362 static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query) 363 { 364 struct xe_gt *gt; 365 size_t size = sizeof(struct drm_xe_query_gt_list) + 366 xe->info.gt_count * sizeof(struct drm_xe_gt); 367 struct drm_xe_query_gt_list __user *query_ptr = 368 u64_to_user_ptr(query->data); 369 struct drm_xe_query_gt_list *gt_list; 370 int iter = 0; 371 u8 id; 372 373 if (query->size == 0) { 374 query->size = size; 375 return 0; 376 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 377 return -EINVAL; 378 } 379 380 gt_list = kzalloc(size, GFP_KERNEL); 381 if (!gt_list) 382 return -ENOMEM; 383 384 gt_list->num_gt = xe->info.gt_count; 385 386 for_each_gt(gt, xe, id) { 387 if (xe_gt_is_media_type(gt)) 388 gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MEDIA; 389 else 390 gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MAIN; 391 gt_list->gt_list[iter].tile_id = gt_to_tile(gt)->id; 392 gt_list->gt_list[iter].gt_id = gt->info.id; 393 gt_list->gt_list[iter].reference_clock = gt->info.reference_clock; 394 /* 395 * The mem_regions indexes in the mask below need to 396 * directly identify the struct 397 * drm_xe_query_mem_regions' instance constructed at 398 * query_mem_regions() 399 * 400 * For our current platforms: 401 * Bit 0 -> System Memory 402 * Bit 1 -> VRAM0 on Tile0 403 * Bit 2 -> VRAM1 on Tile1 404 * However the uAPI is generic and it's userspace's 405 * responsibility to check the mem_class, without any 406 * assumption. 407 */ 408 if (!IS_DGFX(xe)) 409 gt_list->gt_list[iter].near_mem_regions = 0x1; 410 else 411 gt_list->gt_list[iter].near_mem_regions = 412 BIT(gt_to_tile(gt)->mem.vram->id) << 1; 413 gt_list->gt_list[iter].far_mem_regions = xe->info.mem_region_mask ^ 414 gt_list->gt_list[iter].near_mem_regions; 415 416 gt_list->gt_list[iter].ip_ver_major = 417 REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid); 418 gt_list->gt_list[iter].ip_ver_minor = 419 REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid); 420 gt_list->gt_list[iter].ip_ver_rev = 421 REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid); 422 423 iter++; 424 } 425 426 if (copy_to_user(query_ptr, gt_list, size)) { 427 kfree(gt_list); 428 return -EFAULT; 429 } 430 kfree(gt_list); 431 432 return 0; 433 } 434 435 static int query_hwconfig(struct xe_device *xe, 436 struct drm_xe_device_query *query) 437 { 438 struct xe_gt *gt = xe_root_mmio_gt(xe); 439 size_t size = gt ? xe_guc_hwconfig_size(>->uc.guc) : 0; 440 void __user *query_ptr = u64_to_user_ptr(query->data); 441 void *hwconfig; 442 443 if (query->size == 0) { 444 query->size = size; 445 return 0; 446 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 447 return -EINVAL; 448 } 449 450 hwconfig = kzalloc(size, GFP_KERNEL); 451 if (!hwconfig) 452 return -ENOMEM; 453 454 xe_guc_hwconfig_copy(>->uc.guc, hwconfig); 455 456 if (copy_to_user(query_ptr, hwconfig, size)) { 457 kfree(hwconfig); 458 return -EFAULT; 459 } 460 kfree(hwconfig); 461 462 return 0; 463 } 464 465 static size_t calc_topo_query_size(struct xe_device *xe) 466 { 467 struct xe_gt *gt; 468 size_t query_size = 0; 469 int id; 470 471 for_each_gt(gt, xe, id) { 472 query_size += 3 * sizeof(struct drm_xe_query_topology_mask) + 473 sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) + 474 sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) + 475 sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss); 476 477 /* L3bank mask may not be available for some GTs */ 478 if (xe_gt_topology_report_l3(gt)) 479 query_size += sizeof(struct drm_xe_query_topology_mask) + 480 sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask); 481 } 482 483 return query_size; 484 } 485 486 static int copy_mask(void __user **ptr, 487 struct drm_xe_query_topology_mask *topo, 488 void *mask, size_t mask_size) 489 { 490 topo->num_bytes = mask_size; 491 492 if (copy_to_user(*ptr, topo, sizeof(*topo))) 493 return -EFAULT; 494 *ptr += sizeof(*topo); 495 496 if (copy_to_user(*ptr, mask, mask_size)) 497 return -EFAULT; 498 *ptr += mask_size; 499 500 return 0; 501 } 502 503 static int query_gt_topology(struct xe_device *xe, 504 struct drm_xe_device_query *query) 505 { 506 void __user *query_ptr = u64_to_user_ptr(query->data); 507 size_t size = calc_topo_query_size(xe); 508 struct drm_xe_query_topology_mask topo; 509 struct xe_gt *gt; 510 int id; 511 512 if (query->size == 0) { 513 query->size = size; 514 return 0; 515 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 516 return -EINVAL; 517 } 518 519 for_each_gt(gt, xe, id) { 520 int err; 521 522 topo.gt_id = id; 523 524 topo.type = DRM_XE_TOPO_DSS_GEOMETRY; 525 err = copy_mask(&query_ptr, &topo, gt->fuse_topo.g_dss_mask, 526 sizeof(gt->fuse_topo.g_dss_mask)); 527 if (err) 528 return err; 529 530 topo.type = DRM_XE_TOPO_DSS_COMPUTE; 531 err = copy_mask(&query_ptr, &topo, gt->fuse_topo.c_dss_mask, 532 sizeof(gt->fuse_topo.c_dss_mask)); 533 if (err) 534 return err; 535 536 /* 537 * If the kernel doesn't have a way to obtain a correct L3bank 538 * mask, then it's better to omit L3 from the query rather than 539 * reporting bogus or zeroed information to userspace. 540 */ 541 if (xe_gt_topology_report_l3(gt)) { 542 topo.type = DRM_XE_TOPO_L3_BANK; 543 err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask, 544 sizeof(gt->fuse_topo.l3_bank_mask)); 545 if (err) 546 return err; 547 } 548 549 topo.type = gt->fuse_topo.eu_type == XE_GT_EU_TYPE_SIMD16 ? 550 DRM_XE_TOPO_SIMD16_EU_PER_DSS : 551 DRM_XE_TOPO_EU_PER_DSS; 552 err = copy_mask(&query_ptr, &topo, 553 gt->fuse_topo.eu_mask_per_dss, 554 sizeof(gt->fuse_topo.eu_mask_per_dss)); 555 if (err) 556 return err; 557 } 558 559 return 0; 560 } 561 562 static int 563 query_uc_fw_version(struct xe_device *xe, struct drm_xe_device_query *query) 564 { 565 struct drm_xe_query_uc_fw_version __user *query_ptr = u64_to_user_ptr(query->data); 566 size_t size = sizeof(struct drm_xe_query_uc_fw_version); 567 struct drm_xe_query_uc_fw_version resp; 568 struct xe_uc_fw_version *version = NULL; 569 570 if (query->size == 0) { 571 query->size = size; 572 return 0; 573 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 574 return -EINVAL; 575 } 576 577 if (copy_from_user(&resp, query_ptr, size)) 578 return -EFAULT; 579 580 if (XE_IOCTL_DBG(xe, resp.pad || resp.pad2 || resp.reserved)) 581 return -EINVAL; 582 583 switch (resp.uc_type) { 584 case XE_QUERY_UC_TYPE_GUC_SUBMISSION: { 585 struct xe_guc *guc = &xe->tiles[0].primary_gt->uc.guc; 586 587 version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY]; 588 break; 589 } 590 case XE_QUERY_UC_TYPE_HUC: { 591 struct xe_gt *media_gt = NULL; 592 struct xe_huc *huc; 593 594 if (MEDIA_VER(xe) >= 13) { 595 struct xe_tile *tile; 596 u8 gt_id; 597 598 for_each_tile(tile, xe, gt_id) { 599 if (tile->media_gt) { 600 media_gt = tile->media_gt; 601 break; 602 } 603 } 604 } else { 605 media_gt = xe->tiles[0].primary_gt; 606 } 607 608 if (!media_gt) 609 break; 610 611 huc = &media_gt->uc.huc; 612 if (huc->fw.status == XE_UC_FIRMWARE_RUNNING) 613 version = &huc->fw.versions.found[XE_UC_FW_VER_RELEASE]; 614 break; 615 } 616 default: 617 return -EINVAL; 618 } 619 620 if (version) { 621 resp.branch_ver = 0; 622 resp.major_ver = version->major; 623 resp.minor_ver = version->minor; 624 resp.patch_ver = version->patch; 625 } else { 626 return -ENODEV; 627 } 628 629 if (copy_to_user(query_ptr, &resp, size)) 630 return -EFAULT; 631 632 return 0; 633 } 634 635 static size_t calc_oa_unit_query_size(struct xe_device *xe) 636 { 637 size_t size = sizeof(struct drm_xe_query_oa_units); 638 struct xe_gt *gt; 639 int i, id; 640 641 for_each_gt(gt, xe, id) { 642 for (i = 0; i < gt->oa.num_oa_units; i++) { 643 size += sizeof(struct drm_xe_oa_unit); 644 size += gt->oa.oa_unit[i].num_engines * 645 sizeof(struct drm_xe_engine_class_instance); 646 } 647 } 648 649 return size; 650 } 651 652 static int query_oa_units(struct xe_device *xe, 653 struct drm_xe_device_query *query) 654 { 655 void __user *query_ptr = u64_to_user_ptr(query->data); 656 size_t size = calc_oa_unit_query_size(xe); 657 struct drm_xe_query_oa_units *qoa; 658 enum xe_hw_engine_id hwe_id; 659 struct drm_xe_oa_unit *du; 660 struct xe_hw_engine *hwe; 661 struct xe_oa_unit *u; 662 int gt_id, i, j, ret; 663 struct xe_gt *gt; 664 u8 *pdu; 665 666 if (query->size == 0) { 667 query->size = size; 668 return 0; 669 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 670 return -EINVAL; 671 } 672 673 qoa = kzalloc(size, GFP_KERNEL); 674 if (!qoa) 675 return -ENOMEM; 676 677 pdu = (u8 *)&qoa->oa_units[0]; 678 for_each_gt(gt, xe, gt_id) { 679 for (i = 0; i < gt->oa.num_oa_units; i++) { 680 u = >->oa.oa_unit[i]; 681 du = (struct drm_xe_oa_unit *)pdu; 682 683 du->oa_unit_id = u->oa_unit_id; 684 du->oa_unit_type = u->type; 685 du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt); 686 du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS | 687 DRM_XE_OA_CAPS_OA_BUFFER_SIZE | 688 DRM_XE_OA_CAPS_WAIT_NUM_REPORTS | 689 DRM_XE_OA_CAPS_OAM | 690 DRM_XE_OA_CAPS_OA_UNIT_GT_ID; 691 du->gt_id = u->gt->info.id; 692 j = 0; 693 for_each_hw_engine(hwe, gt, hwe_id) { 694 if (!xe_hw_engine_is_reserved(hwe) && 695 xe_oa_unit_id(hwe) == u->oa_unit_id) { 696 du->eci[j].engine_class = 697 xe_to_user_engine_class[hwe->class]; 698 du->eci[j].engine_instance = hwe->logical_instance; 699 du->eci[j].gt_id = gt->info.id; 700 j++; 701 } 702 } 703 du->num_engines = j; 704 pdu += sizeof(*du) + j * sizeof(du->eci[0]); 705 qoa->num_oa_units++; 706 } 707 } 708 709 ret = copy_to_user(query_ptr, qoa, size); 710 kfree(qoa); 711 712 return ret ? -EFAULT : 0; 713 } 714 715 static int query_pxp_status(struct xe_device *xe, struct drm_xe_device_query *query) 716 { 717 struct drm_xe_query_pxp_status __user *query_ptr = u64_to_user_ptr(query->data); 718 size_t size = sizeof(struct drm_xe_query_pxp_status); 719 struct drm_xe_query_pxp_status resp = { 0 }; 720 int ret; 721 722 if (query->size == 0) { 723 query->size = size; 724 return 0; 725 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 726 return -EINVAL; 727 } 728 729 ret = xe_pxp_get_readiness_status(xe->pxp); 730 if (ret < 0) 731 return ret; 732 733 resp.status = ret; 734 resp.supported_session_types = BIT(DRM_XE_PXP_TYPE_HWDRM); 735 736 if (copy_to_user(query_ptr, &resp, size)) 737 return -EFAULT; 738 739 return 0; 740 } 741 742 static int query_eu_stall(struct xe_device *xe, 743 struct drm_xe_device_query *query) 744 { 745 void __user *query_ptr = u64_to_user_ptr(query->data); 746 struct drm_xe_query_eu_stall *info; 747 size_t size, array_size; 748 const u64 *rates; 749 u32 num_rates; 750 int ret; 751 752 if (!xe_eu_stall_supported_on_platform(xe)) 753 return -ENODEV; 754 755 array_size = xe_eu_stall_get_sampling_rates(&num_rates, &rates); 756 size = sizeof(struct drm_xe_query_eu_stall) + array_size; 757 758 if (query->size == 0) { 759 query->size = size; 760 return 0; 761 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 762 return -EINVAL; 763 } 764 765 info = kzalloc(size, GFP_KERNEL); 766 if (!info) 767 return -ENOMEM; 768 769 info->num_sampling_rates = num_rates; 770 info->capabilities = DRM_XE_EU_STALL_CAPS_BASE; 771 info->record_size = xe_eu_stall_data_record_size(xe); 772 info->per_xecore_buf_size = xe_eu_stall_get_per_xecore_buf_size(); 773 memcpy(info->sampling_rates, rates, array_size); 774 775 ret = copy_to_user(query_ptr, info, size); 776 kfree(info); 777 778 return ret ? -EFAULT : 0; 779 } 780 781 static int (* const xe_query_funcs[])(struct xe_device *xe, 782 struct drm_xe_device_query *query) = { 783 query_engines, 784 query_mem_regions, 785 query_config, 786 query_gt_list, 787 query_hwconfig, 788 query_gt_topology, 789 query_engine_cycles, 790 query_uc_fw_version, 791 query_oa_units, 792 query_pxp_status, 793 query_eu_stall, 794 }; 795 796 int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 797 { 798 struct xe_device *xe = to_xe_device(dev); 799 struct drm_xe_device_query *query = data; 800 u32 idx; 801 802 if (XE_IOCTL_DBG(xe, query->extensions) || 803 XE_IOCTL_DBG(xe, query->reserved[0] || query->reserved[1])) 804 return -EINVAL; 805 806 if (XE_IOCTL_DBG(xe, query->query >= ARRAY_SIZE(xe_query_funcs))) 807 return -EINVAL; 808 809 idx = array_index_nospec(query->query, ARRAY_SIZE(xe_query_funcs)); 810 if (XE_IOCTL_DBG(xe, !xe_query_funcs[idx])) 811 return -EINVAL; 812 813 return xe_query_funcs[idx](xe, query); 814 } 815