1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_query.h" 7 8 #include <linux/nospec.h> 9 #include <linux/sched/clock.h> 10 11 #include <drm/ttm/ttm_placement.h> 12 #include <generated/xe_wa_oob.h> 13 #include <uapi/drm/xe_drm.h> 14 15 #include "regs/xe_engine_regs.h" 16 #include "regs/xe_gt_regs.h" 17 #include "xe_bo.h" 18 #include "xe_device.h" 19 #include "xe_eu_stall.h" 20 #include "xe_exec_queue.h" 21 #include "xe_force_wake.h" 22 #include "xe_ggtt.h" 23 #include "xe_gt.h" 24 #include "xe_gt_topology.h" 25 #include "xe_guc_hwconfig.h" 26 #include "xe_macros.h" 27 #include "xe_mmio.h" 28 #include "xe_oa.h" 29 #include "xe_pxp.h" 30 #include "xe_ttm_vram_mgr.h" 31 #include "xe_vram_types.h" 32 #include "xe_wa.h" 33 34 static const u16 xe_to_user_engine_class[] = { 35 [XE_ENGINE_CLASS_RENDER] = DRM_XE_ENGINE_CLASS_RENDER, 36 [XE_ENGINE_CLASS_COPY] = DRM_XE_ENGINE_CLASS_COPY, 37 [XE_ENGINE_CLASS_VIDEO_DECODE] = DRM_XE_ENGINE_CLASS_VIDEO_DECODE, 38 [XE_ENGINE_CLASS_VIDEO_ENHANCE] = DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE, 39 [XE_ENGINE_CLASS_COMPUTE] = DRM_XE_ENGINE_CLASS_COMPUTE, 40 }; 41 42 static const enum xe_engine_class user_to_xe_engine_class[] = { 43 [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER, 44 [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY, 45 [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE, 46 [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE, 47 [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE, 48 }; 49 50 static size_t calc_hw_engine_info_size(struct xe_device *xe) 51 { 52 struct xe_hw_engine *hwe; 53 enum xe_hw_engine_id id; 54 struct xe_gt *gt; 55 u8 gt_id; 56 int i = 0; 57 58 for_each_gt(gt, xe, gt_id) 59 for_each_hw_engine(hwe, gt, id) { 60 if (xe_hw_engine_is_reserved(hwe)) 61 continue; 62 i++; 63 } 64 65 return sizeof(struct drm_xe_query_engines) + 66 i * sizeof(struct drm_xe_engine); 67 } 68 69 typedef u64 (*__ktime_func_t)(void); 70 static __ktime_func_t __clock_id_to_func(clockid_t clk_id) 71 { 72 /* 73 * Use logic same as the perf subsystem to allow user to select the 74 * reference clock id to be used for timestamps. 75 */ 76 switch (clk_id) { 77 case CLOCK_MONOTONIC: 78 return &ktime_get_ns; 79 case CLOCK_MONOTONIC_RAW: 80 return &ktime_get_raw_ns; 81 case CLOCK_REALTIME: 82 return &ktime_get_real_ns; 83 case CLOCK_BOOTTIME: 84 return &ktime_get_boottime_ns; 85 case CLOCK_TAI: 86 return &ktime_get_clocktai_ns; 87 default: 88 return NULL; 89 } 90 } 91 92 static void 93 hwe_read_timestamp(struct xe_hw_engine *hwe, u64 *engine_ts, u64 *cpu_ts, 94 u64 *cpu_delta, __ktime_func_t cpu_clock) 95 { 96 struct xe_mmio *mmio = &hwe->gt->mmio; 97 u32 upper, lower, old_upper, loop = 0; 98 struct xe_reg upper_reg = RING_TIMESTAMP_UDW(hwe->mmio_base), 99 lower_reg = RING_TIMESTAMP(hwe->mmio_base); 100 101 upper = xe_mmio_read32(mmio, upper_reg); 102 do { 103 *cpu_delta = local_clock(); 104 *cpu_ts = cpu_clock(); 105 lower = xe_mmio_read32(mmio, lower_reg); 106 *cpu_delta = local_clock() - *cpu_delta; 107 old_upper = upper; 108 upper = xe_mmio_read32(mmio, upper_reg); 109 } while (upper != old_upper && loop++ < 2); 110 111 *engine_ts = (u64)upper << 32 | lower; 112 } 113 114 static int 115 query_engine_cycles(struct xe_device *xe, 116 struct drm_xe_device_query *query) 117 { 118 struct drm_xe_query_engine_cycles __user *query_ptr; 119 struct drm_xe_engine_class_instance *eci; 120 struct drm_xe_query_engine_cycles resp; 121 size_t size = sizeof(resp); 122 __ktime_func_t cpu_clock; 123 struct xe_hw_engine *hwe; 124 struct xe_gt *gt; 125 126 if (IS_SRIOV_VF(xe)) 127 return -EOPNOTSUPP; 128 129 if (query->size == 0) { 130 query->size = size; 131 return 0; 132 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 133 return -EINVAL; 134 } 135 136 query_ptr = u64_to_user_ptr(query->data); 137 if (copy_from_user(&resp, query_ptr, size)) 138 return -EFAULT; 139 140 cpu_clock = __clock_id_to_func(resp.clockid); 141 if (!cpu_clock) 142 return -EINVAL; 143 144 eci = &resp.eci; 145 if (eci->gt_id >= xe->info.max_gt_per_tile) 146 return -EINVAL; 147 148 gt = xe_device_get_gt(xe, eci->gt_id); 149 if (!gt) 150 return -EINVAL; 151 152 if (eci->engine_class >= ARRAY_SIZE(user_to_xe_engine_class)) 153 return -EINVAL; 154 155 hwe = xe_gt_hw_engine(gt, user_to_xe_engine_class[eci->engine_class], 156 eci->engine_instance, true); 157 if (!hwe) 158 return -EINVAL; 159 160 xe_with_force_wake(fw_ref, gt_to_fw(gt), XE_FORCEWAKE_ALL) { 161 if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) 162 return -EIO; 163 164 hwe_read_timestamp(hwe, &resp.engine_cycles, &resp.cpu_timestamp, 165 &resp.cpu_delta, cpu_clock); 166 } 167 168 if (GRAPHICS_VER(xe) >= 20) 169 resp.width = 64; 170 else 171 resp.width = 36; 172 173 /* Only write to the output fields of user query */ 174 if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp) || 175 put_user(resp.cpu_delta, &query_ptr->cpu_delta) || 176 put_user(resp.engine_cycles, &query_ptr->engine_cycles) || 177 put_user(resp.width, &query_ptr->width)) 178 return -EFAULT; 179 180 return 0; 181 } 182 183 static int query_engines(struct xe_device *xe, 184 struct drm_xe_device_query *query) 185 { 186 size_t size = calc_hw_engine_info_size(xe); 187 struct drm_xe_query_engines __user *query_ptr = 188 u64_to_user_ptr(query->data); 189 struct drm_xe_query_engines *engines; 190 struct xe_hw_engine *hwe; 191 enum xe_hw_engine_id id; 192 struct xe_gt *gt; 193 u8 gt_id; 194 int i = 0; 195 196 if (query->size == 0) { 197 query->size = size; 198 return 0; 199 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 200 return -EINVAL; 201 } 202 203 engines = kzalloc(size, GFP_KERNEL); 204 if (!engines) 205 return -ENOMEM; 206 207 for_each_gt(gt, xe, gt_id) 208 for_each_hw_engine(hwe, gt, id) { 209 if (xe_hw_engine_is_reserved(hwe)) 210 continue; 211 212 engines->engines[i].instance.engine_class = 213 xe_to_user_engine_class[hwe->class]; 214 engines->engines[i].instance.engine_instance = 215 hwe->logical_instance; 216 engines->engines[i].instance.gt_id = gt->info.id; 217 218 i++; 219 } 220 221 engines->num_engines = i; 222 223 if (copy_to_user(query_ptr, engines, size)) { 224 kfree(engines); 225 return -EFAULT; 226 } 227 kfree(engines); 228 229 return 0; 230 } 231 232 static size_t calc_mem_regions_size(struct xe_device *xe) 233 { 234 u32 num_managers = 1; 235 int i; 236 237 for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) 238 if (ttm_manager_type(&xe->ttm, i)) 239 num_managers++; 240 241 return offsetof(struct drm_xe_query_mem_regions, mem_regions[num_managers]); 242 } 243 244 static int query_mem_regions(struct xe_device *xe, 245 struct drm_xe_device_query *query) 246 { 247 size_t size = calc_mem_regions_size(xe); 248 struct drm_xe_query_mem_regions *mem_regions; 249 struct drm_xe_query_mem_regions __user *query_ptr = 250 u64_to_user_ptr(query->data); 251 struct ttm_resource_manager *man; 252 int ret, i; 253 254 if (query->size == 0) { 255 query->size = size; 256 return 0; 257 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 258 return -EINVAL; 259 } 260 261 mem_regions = kzalloc(size, GFP_KERNEL); 262 if (XE_IOCTL_DBG(xe, !mem_regions)) 263 return -ENOMEM; 264 265 man = ttm_manager_type(&xe->ttm, XE_PL_TT); 266 mem_regions->mem_regions[0].mem_class = DRM_XE_MEM_REGION_CLASS_SYSMEM; 267 /* 268 * The instance needs to be a unique number that represents the index 269 * in the placement mask used at xe_gem_create_ioctl() for the 270 * xe_bo_create() placement. 271 */ 272 mem_regions->mem_regions[0].instance = 0; 273 mem_regions->mem_regions[0].min_page_size = PAGE_SIZE; 274 mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT; 275 mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man); 276 mem_regions->num_mem_regions = 1; 277 278 for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) { 279 man = ttm_manager_type(&xe->ttm, i); 280 if (man) { 281 mem_regions->mem_regions[mem_regions->num_mem_regions].mem_class = 282 DRM_XE_MEM_REGION_CLASS_VRAM; 283 mem_regions->mem_regions[mem_regions->num_mem_regions].instance = 284 mem_regions->num_mem_regions; 285 mem_regions->mem_regions[mem_regions->num_mem_regions].min_page_size = 286 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? 287 SZ_64K : PAGE_SIZE; 288 mem_regions->mem_regions[mem_regions->num_mem_regions].total_size = 289 man->size; 290 291 xe_ttm_vram_get_used(man, 292 &mem_regions->mem_regions 293 [mem_regions->num_mem_regions].used, 294 &mem_regions->mem_regions 295 [mem_regions->num_mem_regions].cpu_visible_used); 296 297 mem_regions->mem_regions[mem_regions->num_mem_regions].cpu_visible_size = 298 xe_ttm_vram_get_cpu_visible_size(man); 299 mem_regions->num_mem_regions++; 300 } 301 } 302 303 if (!copy_to_user(query_ptr, mem_regions, size)) 304 ret = 0; 305 else 306 ret = -ENOSPC; 307 308 kfree(mem_regions); 309 return ret; 310 } 311 312 static int query_config(struct xe_device *xe, struct drm_xe_device_query *query) 313 { 314 const u32 num_params = DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY + 1; 315 size_t size = 316 sizeof(struct drm_xe_query_config) + num_params * sizeof(u64); 317 struct drm_xe_query_config __user *query_ptr = 318 u64_to_user_ptr(query->data); 319 struct drm_xe_query_config *config; 320 321 if (query->size == 0) { 322 query->size = size; 323 return 0; 324 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 325 return -EINVAL; 326 } 327 328 config = kzalloc(size, GFP_KERNEL); 329 if (!config) 330 return -ENOMEM; 331 332 config->num_params = num_params; 333 config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] = 334 xe->info.devid | (xe->info.revid << 16); 335 if (xe->mem.vram) 336 config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= 337 DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM; 338 if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_XE_GPUSVM)) 339 config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= 340 DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR; 341 if (GRAPHICS_VER(xe) >= 20) 342 config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= 343 DRM_XE_QUERY_CONFIG_FLAG_HAS_NO_COMPRESSION_HINT; 344 config->info[DRM_XE_QUERY_CONFIG_FLAGS] |= 345 DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY; 346 config->info[DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT] = 347 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K; 348 config->info[DRM_XE_QUERY_CONFIG_VA_BITS] = xe->info.va_bits; 349 config->info[DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY] = 350 xe_exec_queue_device_get_max_priority(xe); 351 352 if (copy_to_user(query_ptr, config, size)) { 353 kfree(config); 354 return -EFAULT; 355 } 356 kfree(config); 357 358 return 0; 359 } 360 361 static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query) 362 { 363 struct xe_gt *gt; 364 size_t size = sizeof(struct drm_xe_query_gt_list) + 365 xe->info.gt_count * sizeof(struct drm_xe_gt); 366 struct drm_xe_query_gt_list __user *query_ptr = 367 u64_to_user_ptr(query->data); 368 struct drm_xe_query_gt_list *gt_list; 369 int iter = 0; 370 u8 id; 371 372 if (query->size == 0) { 373 query->size = size; 374 return 0; 375 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 376 return -EINVAL; 377 } 378 379 gt_list = kzalloc(size, GFP_KERNEL); 380 if (!gt_list) 381 return -ENOMEM; 382 383 gt_list->num_gt = xe->info.gt_count; 384 385 for_each_gt(gt, xe, id) { 386 if (xe_gt_is_media_type(gt)) 387 gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MEDIA; 388 else 389 gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MAIN; 390 gt_list->gt_list[iter].tile_id = gt_to_tile(gt)->id; 391 gt_list->gt_list[iter].gt_id = gt->info.id; 392 gt_list->gt_list[iter].reference_clock = gt->info.reference_clock; 393 /* 394 * The mem_regions indexes in the mask below need to 395 * directly identify the struct 396 * drm_xe_query_mem_regions' instance constructed at 397 * query_mem_regions() 398 * 399 * For our current platforms: 400 * Bit 0 -> System Memory 401 * Bit 1 -> VRAM0 on Tile0 402 * Bit 2 -> VRAM1 on Tile1 403 * However the uAPI is generic and it's userspace's 404 * responsibility to check the mem_class, without any 405 * assumption. 406 */ 407 if (!IS_DGFX(xe)) 408 gt_list->gt_list[iter].near_mem_regions = 0x1; 409 else 410 gt_list->gt_list[iter].near_mem_regions = 411 BIT(gt_to_tile(gt)->mem.vram->id) << 1; 412 gt_list->gt_list[iter].far_mem_regions = xe->info.mem_region_mask ^ 413 gt_list->gt_list[iter].near_mem_regions; 414 415 gt_list->gt_list[iter].ip_ver_major = 416 REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid); 417 gt_list->gt_list[iter].ip_ver_minor = 418 REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid); 419 gt_list->gt_list[iter].ip_ver_rev = 420 REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid); 421 422 iter++; 423 } 424 425 if (copy_to_user(query_ptr, gt_list, size)) { 426 kfree(gt_list); 427 return -EFAULT; 428 } 429 kfree(gt_list); 430 431 return 0; 432 } 433 434 static int query_hwconfig(struct xe_device *xe, 435 struct drm_xe_device_query *query) 436 { 437 struct xe_gt *gt = xe_root_mmio_gt(xe); 438 size_t size = gt ? xe_guc_hwconfig_size(>->uc.guc) : 0; 439 void __user *query_ptr = u64_to_user_ptr(query->data); 440 void *hwconfig; 441 442 if (query->size == 0) { 443 query->size = size; 444 return 0; 445 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 446 return -EINVAL; 447 } 448 449 hwconfig = kzalloc(size, GFP_KERNEL); 450 if (!hwconfig) 451 return -ENOMEM; 452 453 xe_guc_hwconfig_copy(>->uc.guc, hwconfig); 454 455 if (copy_to_user(query_ptr, hwconfig, size)) { 456 kfree(hwconfig); 457 return -EFAULT; 458 } 459 kfree(hwconfig); 460 461 return 0; 462 } 463 464 static size_t calc_topo_query_size(struct xe_device *xe) 465 { 466 struct xe_gt *gt; 467 size_t query_size = 0; 468 int id; 469 470 for_each_gt(gt, xe, id) { 471 query_size += 3 * sizeof(struct drm_xe_query_topology_mask) + 472 sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) + 473 sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) + 474 sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss); 475 476 /* L3bank mask may not be available for some GTs */ 477 if (xe_gt_topology_report_l3(gt)) 478 query_size += sizeof(struct drm_xe_query_topology_mask) + 479 sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask); 480 } 481 482 return query_size; 483 } 484 485 static int copy_mask(void __user **ptr, 486 struct drm_xe_query_topology_mask *topo, 487 void *mask, size_t mask_size) 488 { 489 topo->num_bytes = mask_size; 490 491 if (copy_to_user(*ptr, topo, sizeof(*topo))) 492 return -EFAULT; 493 *ptr += sizeof(*topo); 494 495 if (copy_to_user(*ptr, mask, mask_size)) 496 return -EFAULT; 497 *ptr += mask_size; 498 499 return 0; 500 } 501 502 static int query_gt_topology(struct xe_device *xe, 503 struct drm_xe_device_query *query) 504 { 505 void __user *query_ptr = u64_to_user_ptr(query->data); 506 size_t size = calc_topo_query_size(xe); 507 struct drm_xe_query_topology_mask topo; 508 struct xe_gt *gt; 509 int id; 510 511 if (query->size == 0) { 512 query->size = size; 513 return 0; 514 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 515 return -EINVAL; 516 } 517 518 for_each_gt(gt, xe, id) { 519 int err; 520 521 topo.gt_id = id; 522 523 topo.type = DRM_XE_TOPO_DSS_GEOMETRY; 524 err = copy_mask(&query_ptr, &topo, gt->fuse_topo.g_dss_mask, 525 sizeof(gt->fuse_topo.g_dss_mask)); 526 if (err) 527 return err; 528 529 topo.type = DRM_XE_TOPO_DSS_COMPUTE; 530 err = copy_mask(&query_ptr, &topo, gt->fuse_topo.c_dss_mask, 531 sizeof(gt->fuse_topo.c_dss_mask)); 532 if (err) 533 return err; 534 535 /* 536 * If the kernel doesn't have a way to obtain a correct L3bank 537 * mask, then it's better to omit L3 from the query rather than 538 * reporting bogus or zeroed information to userspace. 539 */ 540 if (xe_gt_topology_report_l3(gt)) { 541 topo.type = DRM_XE_TOPO_L3_BANK; 542 err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask, 543 sizeof(gt->fuse_topo.l3_bank_mask)); 544 if (err) 545 return err; 546 } 547 548 topo.type = gt->fuse_topo.eu_type == XE_GT_EU_TYPE_SIMD16 ? 549 DRM_XE_TOPO_SIMD16_EU_PER_DSS : 550 DRM_XE_TOPO_EU_PER_DSS; 551 err = copy_mask(&query_ptr, &topo, 552 gt->fuse_topo.eu_mask_per_dss, 553 sizeof(gt->fuse_topo.eu_mask_per_dss)); 554 if (err) 555 return err; 556 } 557 558 return 0; 559 } 560 561 static int 562 query_uc_fw_version(struct xe_device *xe, struct drm_xe_device_query *query) 563 { 564 struct drm_xe_query_uc_fw_version __user *query_ptr = u64_to_user_ptr(query->data); 565 size_t size = sizeof(struct drm_xe_query_uc_fw_version); 566 struct drm_xe_query_uc_fw_version resp; 567 struct xe_uc_fw_version *version = NULL; 568 569 if (query->size == 0) { 570 query->size = size; 571 return 0; 572 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 573 return -EINVAL; 574 } 575 576 if (copy_from_user(&resp, query_ptr, size)) 577 return -EFAULT; 578 579 if (XE_IOCTL_DBG(xe, resp.pad || resp.pad2 || resp.reserved)) 580 return -EINVAL; 581 582 switch (resp.uc_type) { 583 case XE_QUERY_UC_TYPE_GUC_SUBMISSION: { 584 struct xe_guc *guc = &xe->tiles[0].primary_gt->uc.guc; 585 586 version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY]; 587 break; 588 } 589 case XE_QUERY_UC_TYPE_HUC: { 590 struct xe_gt *media_gt = NULL; 591 struct xe_huc *huc; 592 593 if (MEDIA_VER(xe) >= 13) { 594 struct xe_tile *tile; 595 u8 gt_id; 596 597 for_each_tile(tile, xe, gt_id) { 598 if (tile->media_gt) { 599 media_gt = tile->media_gt; 600 break; 601 } 602 } 603 } else { 604 media_gt = xe->tiles[0].primary_gt; 605 } 606 607 if (!media_gt) 608 break; 609 610 huc = &media_gt->uc.huc; 611 if (huc->fw.status == XE_UC_FIRMWARE_RUNNING) 612 version = &huc->fw.versions.found[XE_UC_FW_VER_RELEASE]; 613 break; 614 } 615 default: 616 return -EINVAL; 617 } 618 619 if (version) { 620 resp.branch_ver = 0; 621 resp.major_ver = version->major; 622 resp.minor_ver = version->minor; 623 resp.patch_ver = version->patch; 624 } else { 625 return -ENODEV; 626 } 627 628 if (copy_to_user(query_ptr, &resp, size)) 629 return -EFAULT; 630 631 return 0; 632 } 633 634 static size_t calc_oa_unit_query_size(struct xe_device *xe) 635 { 636 size_t size = sizeof(struct drm_xe_query_oa_units); 637 struct xe_gt *gt; 638 int i, id; 639 640 for_each_gt(gt, xe, id) { 641 for (i = 0; i < gt->oa.num_oa_units; i++) { 642 size += sizeof(struct drm_xe_oa_unit); 643 size += gt->oa.oa_unit[i].num_engines * 644 sizeof(struct drm_xe_engine_class_instance); 645 } 646 } 647 648 return size; 649 } 650 651 static int query_oa_units(struct xe_device *xe, 652 struct drm_xe_device_query *query) 653 { 654 void __user *query_ptr = u64_to_user_ptr(query->data); 655 size_t size = calc_oa_unit_query_size(xe); 656 struct drm_xe_query_oa_units *qoa; 657 enum xe_hw_engine_id hwe_id; 658 struct drm_xe_oa_unit *du; 659 struct xe_hw_engine *hwe; 660 struct xe_oa_unit *u; 661 int gt_id, i, j, ret; 662 struct xe_gt *gt; 663 u8 *pdu; 664 665 if (query->size == 0) { 666 query->size = size; 667 return 0; 668 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 669 return -EINVAL; 670 } 671 672 qoa = kzalloc(size, GFP_KERNEL); 673 if (!qoa) 674 return -ENOMEM; 675 676 pdu = (u8 *)&qoa->oa_units[0]; 677 for_each_gt(gt, xe, gt_id) { 678 for (i = 0; i < gt->oa.num_oa_units; i++) { 679 u = >->oa.oa_unit[i]; 680 du = (struct drm_xe_oa_unit *)pdu; 681 682 du->oa_unit_id = u->oa_unit_id; 683 du->oa_unit_type = u->type; 684 du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt); 685 du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS | 686 DRM_XE_OA_CAPS_OA_BUFFER_SIZE | 687 DRM_XE_OA_CAPS_WAIT_NUM_REPORTS | 688 DRM_XE_OA_CAPS_OAM | 689 DRM_XE_OA_CAPS_OA_UNIT_GT_ID; 690 du->gt_id = u->gt->info.id; 691 j = 0; 692 for_each_hw_engine(hwe, gt, hwe_id) { 693 if (!xe_hw_engine_is_reserved(hwe) && 694 xe_oa_unit_id(hwe) == u->oa_unit_id) { 695 du->eci[j].engine_class = 696 xe_to_user_engine_class[hwe->class]; 697 du->eci[j].engine_instance = hwe->logical_instance; 698 du->eci[j].gt_id = gt->info.id; 699 j++; 700 } 701 } 702 du->num_engines = j; 703 pdu += sizeof(*du) + j * sizeof(du->eci[0]); 704 qoa->num_oa_units++; 705 } 706 } 707 708 ret = copy_to_user(query_ptr, qoa, size); 709 kfree(qoa); 710 711 return ret ? -EFAULT : 0; 712 } 713 714 static int query_pxp_status(struct xe_device *xe, struct drm_xe_device_query *query) 715 { 716 struct drm_xe_query_pxp_status __user *query_ptr = u64_to_user_ptr(query->data); 717 size_t size = sizeof(struct drm_xe_query_pxp_status); 718 struct drm_xe_query_pxp_status resp = { 0 }; 719 int ret; 720 721 if (query->size == 0) { 722 query->size = size; 723 return 0; 724 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 725 return -EINVAL; 726 } 727 728 ret = xe_pxp_get_readiness_status(xe->pxp); 729 if (ret < 0) 730 return ret; 731 732 resp.status = ret; 733 resp.supported_session_types = BIT(DRM_XE_PXP_TYPE_HWDRM); 734 735 if (copy_to_user(query_ptr, &resp, size)) 736 return -EFAULT; 737 738 return 0; 739 } 740 741 static int query_eu_stall(struct xe_device *xe, 742 struct drm_xe_device_query *query) 743 { 744 void __user *query_ptr = u64_to_user_ptr(query->data); 745 struct drm_xe_query_eu_stall *info; 746 size_t size, array_size; 747 const u64 *rates; 748 u32 num_rates; 749 int ret; 750 751 if (!xe_eu_stall_supported_on_platform(xe)) 752 return -ENODEV; 753 754 array_size = xe_eu_stall_get_sampling_rates(&num_rates, &rates); 755 size = sizeof(struct drm_xe_query_eu_stall) + array_size; 756 757 if (query->size == 0) { 758 query->size = size; 759 return 0; 760 } else if (XE_IOCTL_DBG(xe, query->size != size)) { 761 return -EINVAL; 762 } 763 764 info = kzalloc(size, GFP_KERNEL); 765 if (!info) 766 return -ENOMEM; 767 768 info->num_sampling_rates = num_rates; 769 info->capabilities = DRM_XE_EU_STALL_CAPS_BASE; 770 info->record_size = xe_eu_stall_data_record_size(xe); 771 info->per_xecore_buf_size = xe_eu_stall_get_per_xecore_buf_size(); 772 memcpy(info->sampling_rates, rates, array_size); 773 774 ret = copy_to_user(query_ptr, info, size); 775 kfree(info); 776 777 return ret ? -EFAULT : 0; 778 } 779 780 static int (* const xe_query_funcs[])(struct xe_device *xe, 781 struct drm_xe_device_query *query) = { 782 query_engines, 783 query_mem_regions, 784 query_config, 785 query_gt_list, 786 query_hwconfig, 787 query_gt_topology, 788 query_engine_cycles, 789 query_uc_fw_version, 790 query_oa_units, 791 query_pxp_status, 792 query_eu_stall, 793 }; 794 795 int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 796 { 797 struct xe_device *xe = to_xe_device(dev); 798 struct drm_xe_device_query *query = data; 799 u32 idx; 800 801 if (XE_IOCTL_DBG(xe, query->extensions) || 802 XE_IOCTL_DBG(xe, query->reserved[0] || query->reserved[1])) 803 return -EINVAL; 804 805 if (XE_IOCTL_DBG(xe, query->query >= ARRAY_SIZE(xe_query_funcs))) 806 return -EINVAL; 807 808 idx = array_index_nospec(query->query, ARRAY_SIZE(xe_query_funcs)); 809 if (XE_IOCTL_DBG(xe, !xe_query_funcs[idx])) 810 return -EINVAL; 811 812 return xe_query_funcs[idx](xe, query); 813 } 814