1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_query.h" 7 8 #include <linux/nospec.h> 9 #include <linux/sched/clock.h> 10 11 #include <drm/ttm/ttm_placement.h> 12 #include <generated/xe_wa_oob.h> 13 #include <uapi/drm/xe_drm.h> 14 15 #include "regs/xe_engine_regs.h" 16 #include "regs/xe_gt_regs.h" 17 #include "xe_bo.h" 18 #include "xe_device.h" 19 #include "xe_eu_stall.h" 20 #include "xe_exec_queue.h" 21 #include "xe_force_wake.h" 22 #include "xe_ggtt.h" 23 #include "xe_gt.h" 24 #include "xe_gt_topology.h" 25 #include "xe_guc_hwconfig.h" 26 #include "xe_macros.h" 27 #include "xe_mmio.h" 28 #include "xe_oa.h" 29 #include "xe_pxp.h" 30 #include "xe_ttm_vram_mgr.h" 31 #include "xe_vram_types.h" 32 #include "xe_wa.h" 33 34 static const u16 xe_to_user_engine_class[] = { 35 [XE_ENGINE_CLASS_RENDER] = DRM_XE_ENGINE_CLASS_RENDER, 36 [XE_ENGINE_CLASS_COPY] = DRM_XE_ENGINE_CLASS_COPY, 37 [XE_ENGINE_CLASS_VIDEO_DECODE] = DRM_XE_ENGINE_CLASS_VIDEO_DECODE, 38 [XE_ENGINE_CLASS_VIDEO_ENHANCE] = DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE, 39 [XE_ENGINE_CLASS_COMPUTE] = DRM_XE_ENGINE_CLASS_COMPUTE, 40 }; 41 42 static const enum xe_engine_class user_to_xe_engine_class[] = { 43 [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER, 44 [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY, 45 [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE, 46 [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE, 47 [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE, 48 }; 49 50 static size_t calc_hw_engine_info_size(struct xe_device *xe) 51 { 52 struct xe_hw_engine *hwe; 53 enum xe_hw_engine_id id; 54 struct xe_gt *gt; 55 u8 gt_id; 56 int i = 0; 57 58 for_each_gt(gt, xe, gt_id) 59 for_each_hw_engine(hwe, gt, id) { 60 if (xe_hw_engine_is_reserved(hwe)) 61 continue; 62 i++; 63 } 64 65 return sizeof(struct drm_xe_query_engines) + 66 i * sizeof(struct drm_xe_engine); 67 } 68 69 typedef u64 (*__ktime_func_t)(void); 70 static __ktime_func_t __clock_id_to_func(clockid_t clk_id) 71 { 72 /* 73 * Use logic same as the perf subsystem to allow user to select the 74 * reference clock id to be used for timestamps. 75 */ 76 switch (clk_id) { 77 case CLOCK_MONOTONIC: 78 return &ktime_get_ns; 79 case CLOCK_MONOTONIC_RAW: 80 return &ktime_get_raw_ns; 81 case CLOCK_REALTIME: 82 return &ktime_get_real_ns; 83 case CLOCK_BOOTTIME: 84 return &ktime_get_boottime_ns; 85 case CLOCK_TAI: 86 return &ktime_get_clocktai_ns; 87 default: 88 return NULL; 89 } 90 } 91 92 static void 93 hwe_read_timestamp(struct xe_hw_engine *hwe, u64 *engine_ts, u64 *cpu_ts, 94 u64 *cpu_delta, __ktime_func_t cpu_clock) 95 { 96 struct xe_mmio *mmio = &hwe->gt->mmio; 97 u32 upper, lower, old_upper, loop = 0; 98 struct xe_reg upper_reg = RING_TIMESTAMP_UDW(hwe->mmio_base), 99 lower_reg = RING_TIMESTAMP(hwe->mmio_base); 100 101 upper = xe_mmio_read32(mmio, upper_reg); 102 do { 103 *cpu_delta = local_clock(); 104 *cpu_ts = cpu_clock(); 105 lower = xe_mmio_read32(mmio, lower_reg); 106 *cpu_delta = local_clock() - *cpu_delta; 107 old_upper = upper; 108 upper = xe_mmio_read32(mmio, upper_reg); 109 } while (upper != old_upper && loop++ < 2); 110 111 *engine_ts = (u64)upper << 32 | lower; 112 } 113 114 static int 115 query_engine_cycles(struct xe_device *xe, 116 struct drm_xe_device_query *query) 117 { 118 struct drm_xe_query_engine_cycles __user *query_ptr; 119 struct drm_xe_engine_class_instance *eci; 120 struct drm_xe_query_engine_cycles resp; 121 size_t size = sizeof(resp); 122 __ktime_func_t cpu_clock; 123 struct xe_hw_engine *hwe; 124 struct xe_gt *gt; 125 126 if (IS_SRIOV_VF(xe)) 127 return -EOPNOTSUPP; 128 129 if (query->size == 0) { 130 query->size = size; 131 return 0; 132 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 133 return -EINVAL; 134 } 135 136 query_ptr = u64_to_user_ptr(query->data); 137 if (copy_from_user(&resp, query_ptr, size)) 138 return -EFAULT; 139 140 cpu_clock = __clock_id_to_func(resp.clockid); 141 if (!cpu_clock) 142 return -EINVAL; 143 144 eci = &resp.eci; 145 gt = xe_device_get_gt(xe, eci->gt_id); 146 if (!gt) 147 return -EINVAL; 148 149 if (eci->engine_class >= ARRAY_SIZE(user_to_xe_engine_class)) 150 return -EINVAL; 151 152 hwe = xe_gt_hw_engine(gt, user_to_xe_engine_class[eci->engine_class], 153 eci->engine_instance, true); 154 if (!hwe) 155 return -EINVAL; 156 157 xe_with_force_wake(fw_ref, gt_to_fw(gt), XE_FORCEWAKE_ALL) { 158 if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) 159 return -EIO; 160 161 hwe_read_timestamp(hwe, &resp.engine_cycles, &resp.cpu_timestamp, 162 &resp.cpu_delta, cpu_clock); 163 } 164 165 if (GRAPHICS_VER(xe) >= 20) 166 resp.width = 64; 167 else 168 resp.width = 36; 169 170 /* Only write to the output fields of user query */ 171 if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp) || 172 put_user(resp.cpu_delta, &query_ptr->cpu_delta) || 173 put_user(resp.engine_cycles, &query_ptr->engine_cycles) || 174 put_user(resp.width, &query_ptr->width)) 175 return -EFAULT; 176 177 return 0; 178 } 179 180 static int query_engines(struct xe_device *xe, 181 struct drm_xe_device_query *query) 182 { 183 size_t size = calc_hw_engine_info_size(xe); 184 struct drm_xe_query_engines __user *query_ptr = 185 u64_to_user_ptr(query->data); 186 struct drm_xe_query_engines *engines; 187 struct xe_hw_engine *hwe; 188 enum xe_hw_engine_id id; 189 struct xe_gt *gt; 190 u8 gt_id; 191 int i = 0; 192 193 if (query->size == 0) { 194 query->size = size; 195 return 0; 196 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 197 return -EINVAL; 198 } 199 200 engines = kzalloc(size, GFP_KERNEL); 201 if (!engines) 202 return -ENOMEM; 203 204 for_each_gt(gt, xe, gt_id) 205 for_each_hw_engine(hwe, gt, id) { 206 if (xe_hw_engine_is_reserved(hwe)) 207 continue; 208 209 engines->engines[i].instance.engine_class = 210 xe_to_user_engine_class[hwe->class]; 211 engines->engines[i].instance.engine_instance = 212 hwe->logical_instance; 213 engines->engines[i].instance.gt_id = gt->info.id; 214 215 i++; 216 } 217 218 engines->num_engines = i; 219 220 if (copy_to_user(query_ptr, engines, size)) { 221 kfree(engines); 222 return -EFAULT; 223 } 224 kfree(engines); 225 226 return 0; 227 } 228 229 static size_t calc_mem_regions_size(struct xe_device *xe) 230 { 231 u32 num_managers = 1; 232 int i; 233 234 for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) 235 if (ttm_manager_type(&xe->ttm, i)) 236 num_managers++; 237 238 return offsetof(struct drm_xe_query_mem_regions, mem_regions[num_managers]); 239 } 240 241 static int query_mem_regions(struct xe_device *xe, 242 struct drm_xe_device_query *query) 243 { 244 size_t size = calc_mem_regions_size(xe); 245 struct drm_xe_query_mem_regions *mem_regions; 246 struct drm_xe_query_mem_regions __user *query_ptr = 247 u64_to_user_ptr(query->data); 248 struct ttm_resource_manager *man; 249 int ret, i; 250 251 if (query->size == 0) { 252 query->size = size; 253 return 0; 254 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 255 return -EINVAL; 256 } 257 258 mem_regions = kzalloc(size, GFP_KERNEL); 259 if (XE_IOCTL_DBG(xe, !mem_regions)) 260 return -ENOMEM; 261 262 man = ttm_manager_type(&xe->ttm, XE_PL_TT); 263 mem_regions->mem_regions[0].mem_class = DRM_XE_MEM_REGION_CLASS_SYSMEM; 264 /* 265 * The instance needs to be a unique number that represents the index 266 * in the placement mask used at xe_gem_create_ioctl() for the 267 * xe_bo_create() placement. 268 */ 269 mem_regions->mem_regions[0].instance = 0; 270 mem_regions->mem_regions[0].min_page_size = PAGE_SIZE; 271 mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT; 272 mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man); 273 mem_regions->num_mem_regions = 1; 274 275 for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) { 276 man = ttm_manager_type(&xe->ttm, i); 277 if (man) { 278 mem_regions->mem_regions[mem_regions->num_mem_regions].mem_class = 279 DRM_XE_MEM_REGION_CLASS_VRAM; 280 mem_regions->mem_regions[mem_regions->num_mem_regions].instance = 281 mem_regions->num_mem_regions; 282 mem_regions->mem_regions[mem_regions->num_mem_regions].min_page_size = 283 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? 284 SZ_64K : PAGE_SIZE; 285 mem_regions->mem_regions[mem_regions->num_mem_regions].total_size = 286 man->size; 287 288 xe_ttm_vram_get_used(man, 289 &mem_regions->mem_regions 290 [mem_regions->num_mem_regions].used, 291 &mem_regions->mem_regions 292 [mem_regions->num_mem_regions].cpu_visible_used); 293 294 mem_regions->mem_regions[mem_regions->num_mem_regions].cpu_visible_size = 295 xe_ttm_vram_get_cpu_visible_size(man); 296 mem_regions->num_mem_regions++; 297 } 298 } 299 300 if (!copy_to_user(query_ptr, mem_regions, size)) 301 ret = 0; 302 else 303 ret = -ENOSPC; 304 305 kfree(mem_regions); 306 return ret; 307 } 308 309 static int query_config(struct xe_device *xe, struct drm_xe_device_query *query) 310 { 311 const u32 num_params = DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY + 1; 312 size_t size = 313 sizeof(struct drm_xe_query_config) + num_params * sizeof(u64); 314 struct drm_xe_query_config __user *query_ptr = 315 u64_to_user_ptr(query->data); 316 struct drm_xe_query_config *config; 317 318 if (query->size == 0) { 319 query->size = size; 320 return 0; 321 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 322 return -EINVAL; 323 } 324 325 config = kzalloc(size, GFP_KERNEL); 326 if (!config) 327 return -ENOMEM; 328 329 config->num_params = num_params; 330 config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] = 331 xe->info.devid | (xe->info.revid << 16); 332 if (xe->mem.vram) 333 config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= 334 DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM; 335 if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_XE_GPUSVM)) 336 config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= 337 DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR; 338 if (GRAPHICS_VER(xe) >= 20) 339 config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= 340 DRM_XE_QUERY_CONFIG_FLAG_HAS_NO_COMPRESSION_HINT; 341 config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= 342 DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY; 343 config->info[DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT] = 344 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K; 345 config->info[DRM_XE_QUERY_CONFIG_VA_BITS] = xe->info.va_bits; 346 config->info[DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY] = 347 xe_exec_queue_device_get_max_priority(xe); 348 349 if (copy_to_user(query_ptr, config, size)) { 350 kfree(config); 351 return -EFAULT; 352 } 353 kfree(config); 354 355 return 0; 356 } 357 358 static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query) 359 { 360 struct xe_gt *gt; 361 size_t size = sizeof(struct drm_xe_query_gt_list) + 362 xe->info.gt_count * sizeof(struct drm_xe_gt); 363 struct drm_xe_query_gt_list __user *query_ptr = 364 u64_to_user_ptr(query->data); 365 struct drm_xe_query_gt_list *gt_list; 366 int iter = 0; 367 u8 id; 368 369 if (query->size == 0) { 370 query->size = size; 371 return 0; 372 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 373 return -EINVAL; 374 } 375 376 gt_list = kzalloc(size, GFP_KERNEL); 377 if (!gt_list) 378 return -ENOMEM; 379 380 gt_list->num_gt = xe->info.gt_count; 381 382 for_each_gt(gt, xe, id) { 383 if (xe_gt_is_media_type(gt)) 384 gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MEDIA; 385 else 386 gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MAIN; 387 gt_list->gt_list[iter].tile_id = gt_to_tile(gt)->id; 388 gt_list->gt_list[iter].gt_id = gt->info.id; 389 gt_list->gt_list[iter].reference_clock = gt->info.reference_clock; 390 /* 391 * The mem_regions indexes in the mask below need to 392 * directly identify the struct 393 * drm_xe_query_mem_regions' instance constructed at 394 * query_mem_regions() 395 * 396 * For our current platforms: 397 * Bit 0 -> System Memory 398 * Bit 1 -> VRAM0 on Tile0 399 * Bit 2 -> VRAM1 on Tile1 400 * However the uAPI is generic and it's userspace's 401 * responsibility to check the mem_class, without any 402 * assumption. 403 */ 404 if (!IS_DGFX(xe)) 405 gt_list->gt_list[iter].near_mem_regions = 0x1; 406 else 407 gt_list->gt_list[iter].near_mem_regions = 408 BIT(gt_to_tile(gt)->mem.vram->id) << 1; 409 gt_list->gt_list[iter].far_mem_regions = xe->info.mem_region_mask ^ 410 gt_list->gt_list[iter].near_mem_regions; 411 412 gt_list->gt_list[iter].ip_ver_major = 413 REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid); 414 gt_list->gt_list[iter].ip_ver_minor = 415 REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid); 416 gt_list->gt_list[iter].ip_ver_rev = 417 REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid); 418 419 iter++; 420 } 421 422 if (copy_to_user(query_ptr, gt_list, size)) { 423 kfree(gt_list); 424 return -EFAULT; 425 } 426 kfree(gt_list); 427 428 return 0; 429 } 430 431 static int query_hwconfig(struct xe_device *xe, 432 struct drm_xe_device_query *query) 433 { 434 struct xe_gt *gt = xe_root_mmio_gt(xe); 435 size_t size = gt ? xe_guc_hwconfig_size(>->uc.guc) : 0; 436 void __user *query_ptr = u64_to_user_ptr(query->data); 437 void *hwconfig; 438 439 if (query->size == 0) { 440 query->size = size; 441 return 0; 442 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 443 return -EINVAL; 444 } 445 446 hwconfig = kzalloc(size, GFP_KERNEL); 447 if (!hwconfig) 448 return -ENOMEM; 449 450 xe_guc_hwconfig_copy(>->uc.guc, hwconfig); 451 452 if (copy_to_user(query_ptr, hwconfig, size)) { 453 kfree(hwconfig); 454 return -EFAULT; 455 } 456 kfree(hwconfig); 457 458 return 0; 459 } 460 461 static size_t calc_topo_query_size(struct xe_device *xe) 462 { 463 struct xe_gt *gt; 464 size_t query_size = 0; 465 int id; 466 467 for_each_gt(gt, xe, id) { 468 query_size += 3 * sizeof(struct drm_xe_query_topology_mask) + 469 sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) + 470 sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) + 471 sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss); 472 473 /* L3bank mask may not be available for some GTs */ 474 if (xe_gt_topology_report_l3(gt)) 475 query_size += sizeof(struct drm_xe_query_topology_mask) + 476 sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask); 477 } 478 479 return query_size; 480 } 481 482 static int copy_mask(void __user **ptr, 483 struct drm_xe_query_topology_mask *topo, 484 void *mask, size_t mask_size) 485 { 486 topo->num_bytes = mask_size; 487 488 if (copy_to_user(*ptr, topo, sizeof(*topo))) 489 return -EFAULT; 490 *ptr += sizeof(*topo); 491 492 if (copy_to_user(*ptr, mask, mask_size)) 493 return -EFAULT; 494 *ptr += mask_size; 495 496 return 0; 497 } 498 499 static int query_gt_topology(struct xe_device *xe, 500 struct drm_xe_device_query *query) 501 { 502 void __user *query_ptr = u64_to_user_ptr(query->data); 503 size_t size = calc_topo_query_size(xe); 504 struct drm_xe_query_topology_mask topo; 505 struct xe_gt *gt; 506 int id; 507 508 if (query->size == 0) { 509 query->size = size; 510 return 0; 511 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 512 return -EINVAL; 513 } 514 515 for_each_gt(gt, xe, id) { 516 int err; 517 518 topo.gt_id = id; 519 520 topo.type = DRM_XE_TOPO_DSS_GEOMETRY; 521 err = copy_mask(&query_ptr, &topo, gt->fuse_topo.g_dss_mask, 522 sizeof(gt->fuse_topo.g_dss_mask)); 523 if (err) 524 return err; 525 526 topo.type = DRM_XE_TOPO_DSS_COMPUTE; 527 err = copy_mask(&query_ptr, &topo, gt->fuse_topo.c_dss_mask, 528 sizeof(gt->fuse_topo.c_dss_mask)); 529 if (err) 530 return err; 531 532 /* 533 * If the kernel doesn't have a way to obtain a correct L3bank 534 * mask, then it's better to omit L3 from the query rather than 535 * reporting bogus or zeroed information to userspace. 536 */ 537 if (xe_gt_topology_report_l3(gt)) { 538 topo.type = DRM_XE_TOPO_L3_BANK; 539 err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask, 540 sizeof(gt->fuse_topo.l3_bank_mask)); 541 if (err) 542 return err; 543 } 544 545 topo.type = gt->fuse_topo.eu_type == XE_GT_EU_TYPE_SIMD16 ? 546 DRM_XE_TOPO_SIMD16_EU_PER_DSS : 547 DRM_XE_TOPO_EU_PER_DSS; 548 err = copy_mask(&query_ptr, &topo, 549 gt->fuse_topo.eu_mask_per_dss, 550 sizeof(gt->fuse_topo.eu_mask_per_dss)); 551 if (err) 552 return err; 553 } 554 555 return 0; 556 } 557 558 static int 559 query_uc_fw_version(struct xe_device *xe, struct drm_xe_device_query *query) 560 { 561 struct drm_xe_query_uc_fw_version __user *query_ptr = u64_to_user_ptr(query->data); 562 size_t size = sizeof(struct drm_xe_query_uc_fw_version); 563 struct drm_xe_query_uc_fw_version resp; 564 struct xe_uc_fw_version *version = NULL; 565 566 if (query->size == 0) { 567 query->size = size; 568 return 0; 569 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 570 return -EINVAL; 571 } 572 573 if (copy_from_user(&resp, query_ptr, size)) 574 return -EFAULT; 575 576 if (XE_IOCTL_DBG(xe, resp.pad || resp.pad2 || resp.reserved)) 577 return -EINVAL; 578 579 switch (resp.uc_type) { 580 case XE_QUERY_UC_TYPE_GUC_SUBMISSION: { 581 struct xe_guc *guc = &xe->tiles[0].primary_gt->uc.guc; 582 583 version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY]; 584 break; 585 } 586 case XE_QUERY_UC_TYPE_HUC: { 587 struct xe_gt *media_gt = NULL; 588 struct xe_huc *huc; 589 590 if (MEDIA_VER(xe) >= 13) { 591 struct xe_tile *tile; 592 u8 gt_id; 593 594 for_each_tile(tile, xe, gt_id) { 595 if (tile->media_gt) { 596 media_gt = tile->media_gt; 597 break; 598 } 599 } 600 } else { 601 media_gt = xe->tiles[0].primary_gt; 602 } 603 604 if (!media_gt) 605 break; 606 607 huc = &media_gt->uc.huc; 608 if (huc->fw.status == XE_UC_FIRMWARE_RUNNING) 609 version = &huc->fw.versions.found[XE_UC_FW_VER_RELEASE]; 610 break; 611 } 612 default: 613 return -EINVAL; 614 } 615 616 if (version) { 617 resp.branch_ver = 0; 618 resp.major_ver = version->major; 619 resp.minor_ver = version->minor; 620 resp.patch_ver = version->patch; 621 } else { 622 return -ENODEV; 623 } 624 625 if (copy_to_user(query_ptr, &resp, size)) 626 return -EFAULT; 627 628 return 0; 629 } 630 631 static size_t calc_oa_unit_query_size(struct xe_device *xe) 632 { 633 size_t size = sizeof(struct drm_xe_query_oa_units); 634 struct xe_gt *gt; 635 int i, id; 636 637 for_each_gt(gt, xe, id) { 638 for (i = 0; i < gt->oa.num_oa_units; i++) { 639 size += sizeof(struct drm_xe_oa_unit); 640 size += gt->oa.oa_unit[i].num_engines * 641 sizeof(struct drm_xe_engine_class_instance); 642 } 643 } 644 645 return size; 646 } 647 648 static int query_oa_units(struct xe_device *xe, 649 struct drm_xe_device_query *query) 650 { 651 void __user *query_ptr = u64_to_user_ptr(query->data); 652 size_t size = calc_oa_unit_query_size(xe); 653 struct drm_xe_query_oa_units *qoa; 654 enum xe_hw_engine_id hwe_id; 655 struct drm_xe_oa_unit *du; 656 struct xe_hw_engine *hwe; 657 struct xe_oa_unit *u; 658 int gt_id, i, j, ret; 659 struct xe_gt *gt; 660 u8 *pdu; 661 662 if (query->size == 0) { 663 query->size = size; 664 return 0; 665 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 666 return -EINVAL; 667 } 668 669 qoa = kzalloc(size, GFP_KERNEL); 670 if (!qoa) 671 return -ENOMEM; 672 673 pdu = (u8 *)&qoa->oa_units[0]; 674 for_each_gt(gt, xe, gt_id) { 675 for (i = 0; i < gt->oa.num_oa_units; i++) { 676 u = >->oa.oa_unit[i]; 677 du = (struct drm_xe_oa_unit *)pdu; 678 679 du->oa_unit_id = u->oa_unit_id; 680 du->oa_unit_type = u->type; 681 du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt); 682 du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS | 683 DRM_XE_OA_CAPS_OA_BUFFER_SIZE | 684 DRM_XE_OA_CAPS_WAIT_NUM_REPORTS | 685 DRM_XE_OA_CAPS_OAM | 686 DRM_XE_OA_CAPS_OA_UNIT_GT_ID; 687 du->gt_id = u->gt->info.id; 688 j = 0; 689 for_each_hw_engine(hwe, gt, hwe_id) { 690 if (!xe_hw_engine_is_reserved(hwe) && 691 xe_oa_unit_id(hwe) == u->oa_unit_id) { 692 du->eci[j].engine_class = 693 xe_to_user_engine_class[hwe->class]; 694 du->eci[j].engine_instance = hwe->logical_instance; 695 du->eci[j].gt_id = gt->info.id; 696 j++; 697 } 698 } 699 du->num_engines = j; 700 pdu += sizeof(*du) + j * sizeof(du->eci[0]); 701 qoa->num_oa_units++; 702 } 703 } 704 705 ret = copy_to_user(query_ptr, qoa, size); 706 kfree(qoa); 707 708 return ret ? -EFAULT : 0; 709 } 710 711 static int query_pxp_status(struct xe_device *xe, struct drm_xe_device_query *query) 712 { 713 struct drm_xe_query_pxp_status __user *query_ptr = u64_to_user_ptr(query->data); 714 size_t size = sizeof(struct drm_xe_query_pxp_status); 715 struct drm_xe_query_pxp_status resp = { 0 }; 716 int ret; 717 718 if (query->size == 0) { 719 query->size = size; 720 return 0; 721 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 722 return -EINVAL; 723 } 724 725 ret = xe_pxp_get_readiness_status(xe->pxp); 726 if (ret < 0) 727 return ret; 728 729 resp.status = ret; 730 resp.supported_session_types = BIT(DRM_XE_PXP_TYPE_HWDRM); 731 732 if (copy_to_user(query_ptr, &resp, size)) 733 return -EFAULT; 734 735 return 0; 736 } 737 738 static int query_eu_stall(struct xe_device *xe, 739 struct drm_xe_device_query *query) 740 { 741 void __user *query_ptr = u64_to_user_ptr(query->data); 742 struct drm_xe_query_eu_stall *info; 743 size_t size, array_size; 744 const u64 *rates; 745 u32 num_rates; 746 int ret; 747 748 if (!xe_eu_stall_supported_on_platform(xe)) 749 return -ENODEV; 750 751 array_size = xe_eu_stall_get_sampling_rates(&num_rates, &rates); 752 size = sizeof(struct drm_xe_query_eu_stall) + array_size; 753 754 if (query->size == 0) { 755 query->size = size; 756 return 0; 757 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 758 return -EINVAL; 759 } 760 761 info = kzalloc(size, GFP_KERNEL); 762 if (!info) 763 return -ENOMEM; 764 765 info->num_sampling_rates = num_rates; 766 info->capabilities = DRM_XE_EU_STALL_CAPS_BASE; 767 info->record_size = xe_eu_stall_data_record_size(xe); 768 info->per_xecore_buf_size = xe_eu_stall_get_per_xecore_buf_size(); 769 memcpy(info->sampling_rates, rates, array_size); 770 771 ret = copy_to_user(query_ptr, info, size); 772 kfree(info); 773 774 return ret ? -EFAULT : 0; 775 } 776 777 static int (* const xe_query_funcs[])(struct xe_device *xe, 778 struct drm_xe_device_query *query) = { 779 query_engines, 780 query_mem_regions, 781 query_config, 782 query_gt_list, 783 query_hwconfig, 784 query_gt_topology, 785 query_engine_cycles, 786 query_uc_fw_version, 787 query_oa_units, 788 query_pxp_status, 789 query_eu_stall, 790 }; 791 792 int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 793 { 794 struct xe_device *xe = to_xe_device(dev); 795 struct drm_xe_device_query *query = data; 796 u32 idx; 797 798 if (XE_IOCTL_DBG(xe, query->extensions) || 799 XE_IOCTL_DBG(xe, query->reserved[0] || query->reserved[1])) 800 return -EINVAL; 801 802 if (XE_IOCTL_DBG(xe, query->query >= ARRAY_SIZE(xe_query_funcs))) 803 return -EINVAL; 804 805 idx = array_index_nospec(query->query, ARRAY_SIZE(xe_query_funcs)); 806 if (XE_IOCTL_DBG(xe, !xe_query_funcs[idx])) 807 return -EINVAL; 808 809 return xe_query_funcs[idx](xe, query); 810 } 811