1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2018 Intel Corporation 5 */ 6 7 #include <linux/nospec.h> 8 9 #include <drm/drm_print.h> 10 11 #include "i915_drv.h" 12 #include "i915_perf.h" 13 #include "i915_query.h" 14 #include "gt/intel_engine_user.h" 15 #include <uapi/drm/i915_drm.h> 16 17 static int copy_query_item(void *query_hdr, size_t query_sz, 18 u32 total_length, 19 struct drm_i915_query_item *query_item) 20 { 21 if (query_item->length == 0) 22 return total_length; 23 24 if (query_item->length < total_length) 25 return -EINVAL; 26 27 if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr), 28 query_sz)) 29 return -EFAULT; 30 31 return 0; 32 } 33 34 static int fill_topology_info(const struct sseu_dev_info *sseu, 35 struct drm_i915_query_item *query_item, 36 intel_sseu_ss_mask_t subslice_mask) 37 { 38 struct drm_i915_query_topology_info topo; 39 u32 slice_length, subslice_length, eu_length, total_length; 40 int ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices); 41 int eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); 42 int ret; 43 44 BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask)); 45 46 if (sseu->max_slices == 0) 47 return -ENODEV; 48 49 slice_length = sizeof(sseu->slice_mask); 50 subslice_length = sseu->max_slices * ss_stride; 51 eu_length = sseu->max_slices * sseu->max_subslices * eu_stride; 52 total_length = sizeof(topo) + slice_length + subslice_length + 53 eu_length; 54 55 ret = copy_query_item(&topo, sizeof(topo), total_length, query_item); 56 57 if (ret != 0) 58 return ret; 59 60 memset(&topo, 0, sizeof(topo)); 61 topo.max_slices = sseu->max_slices; 62 topo.max_subslices = sseu->max_subslices; 63 topo.max_eus_per_subslice = sseu->max_eus_per_subslice; 64 65 topo.subslice_offset = slice_length; 66 topo.subslice_stride = ss_stride; 67 topo.eu_offset = slice_length + subslice_length; 68 topo.eu_stride = eu_stride; 69 70 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr), 71 &topo, sizeof(topo))) 72 return -EFAULT; 73 74 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)), 75 &sseu->slice_mask, slice_length)) 76 return -EFAULT; 77 78 if (intel_sseu_copy_ssmask_to_user(u64_to_user_ptr(query_item->data_ptr + 79 sizeof(topo) + slice_length), 80 sseu)) 81 return -EFAULT; 82 83 if (intel_sseu_copy_eumask_to_user(u64_to_user_ptr(query_item->data_ptr + 84 sizeof(topo) + 85 slice_length + subslice_length), 86 sseu)) 87 return -EFAULT; 88 89 return total_length; 90 } 91 92 static int query_topology_info(struct drm_i915_private *dev_priv, 93 struct drm_i915_query_item *query_item) 94 { 95 const struct sseu_dev_info *sseu = &to_gt(dev_priv)->info.sseu; 96 97 if (query_item->flags != 0) 98 return -EINVAL; 99 100 return fill_topology_info(sseu, query_item, sseu->subslice_mask); 101 } 102 103 static int query_geometry_subslices(struct drm_i915_private *i915, 104 struct drm_i915_query_item *query_item) 105 { 106 const struct sseu_dev_info *sseu; 107 struct intel_engine_cs *engine; 108 struct i915_engine_class_instance classinstance; 109 110 if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 55)) 111 return -ENODEV; 112 113 classinstance = *((struct i915_engine_class_instance *)&query_item->flags); 114 115 engine = intel_engine_lookup_user(i915, (u8)classinstance.engine_class, 116 (u8)classinstance.engine_instance); 117 118 if (!engine) 119 return -EINVAL; 120 121 if (engine->class != RENDER_CLASS) 122 return -EINVAL; 123 124 sseu = &engine->gt->info.sseu; 125 126 return fill_topology_info(sseu, query_item, sseu->geometry_subslice_mask); 127 } 128 129 static int 130 query_engine_info(struct drm_i915_private *i915, 131 struct drm_i915_query_item *query_item) 132 { 133 struct drm_i915_query_engine_info __user *query_ptr = 134 u64_to_user_ptr(query_item->data_ptr); 135 struct drm_i915_engine_info __user *info_ptr; 136 struct drm_i915_query_engine_info query; 137 struct drm_i915_engine_info info = { }; 138 unsigned int num_uabi_engines = 0; 139 struct intel_engine_cs *engine; 140 int len, ret; 141 142 if (query_item->flags) 143 return -EINVAL; 144 145 for_each_uabi_engine(engine, i915) 146 num_uabi_engines++; 147 148 len = struct_size(query_ptr, engines, num_uabi_engines); 149 150 ret = copy_query_item(&query, sizeof(query), len, query_item); 151 if (ret != 0) 152 return ret; 153 154 if (query.num_engines || query.rsvd[0] || query.rsvd[1] || 155 query.rsvd[2]) 156 return -EINVAL; 157 158 info_ptr = &query_ptr->engines[0]; 159 160 for_each_uabi_engine(engine, i915) { 161 info.engine.engine_class = engine->uabi_class; 162 info.engine.engine_instance = engine->uabi_instance; 163 info.flags = I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE; 164 info.capabilities = engine->uabi_capabilities; 165 info.logical_instance = ilog2(engine->logical_mask); 166 167 if (copy_to_user(info_ptr, &info, sizeof(info))) 168 return -EFAULT; 169 170 query.num_engines++; 171 info_ptr++; 172 } 173 174 if (copy_to_user(query_ptr, &query, sizeof(query))) 175 return -EFAULT; 176 177 return len; 178 } 179 180 static int can_copy_perf_config_registers_or_number(u32 user_n_regs, 181 u64 user_regs_ptr, 182 u32 kernel_n_regs) 183 { 184 /* 185 * We'll just put the number of registers, and won't copy the 186 * register. 187 */ 188 if (user_n_regs == 0) 189 return 0; 190 191 if (user_n_regs < kernel_n_regs) 192 return -EINVAL; 193 194 return 0; 195 } 196 197 static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel_regs, 198 u32 kernel_n_regs, 199 u64 user_regs_ptr, 200 u32 *user_n_regs) 201 { 202 u32 __user *p = u64_to_user_ptr(user_regs_ptr); 203 u32 r; 204 205 if (*user_n_regs == 0) { 206 *user_n_regs = kernel_n_regs; 207 return 0; 208 } 209 210 *user_n_regs = kernel_n_regs; 211 212 if (!user_write_access_begin(p, 2 * sizeof(u32) * kernel_n_regs)) 213 return -EFAULT; 214 215 for (r = 0; r < kernel_n_regs; r++, p += 2) { 216 unsafe_put_user(i915_mmio_reg_offset(kernel_regs[r].addr), 217 p, Efault); 218 unsafe_put_user(kernel_regs[r].value, p + 1, Efault); 219 } 220 user_write_access_end(); 221 return 0; 222 Efault: 223 user_write_access_end(); 224 return -EFAULT; 225 } 226 227 static int query_perf_config_data(struct drm_i915_private *i915, 228 struct drm_i915_query_item *query_item, 229 bool use_uuid) 230 { 231 struct drm_i915_query_perf_config __user *user_query_config_ptr = 232 u64_to_user_ptr(query_item->data_ptr); 233 struct drm_i915_perf_oa_config __user *user_config_ptr = 234 u64_to_user_ptr(query_item->data_ptr + 235 sizeof(struct drm_i915_query_perf_config)); 236 struct drm_i915_perf_oa_config user_config; 237 struct i915_perf *perf = &i915->perf; 238 struct i915_oa_config *oa_config; 239 char uuid[UUID_STRING_LEN + 1]; 240 u64 config_id; 241 u32 flags, total_size; 242 int ret; 243 244 if (!perf->i915) 245 return -ENODEV; 246 247 total_size = 248 sizeof(struct drm_i915_query_perf_config) + 249 sizeof(struct drm_i915_perf_oa_config); 250 251 if (query_item->length == 0) 252 return total_size; 253 254 if (query_item->length < total_size) { 255 drm_dbg(&i915->drm, 256 "Invalid query config data item size=%u expected=%u\n", 257 query_item->length, total_size); 258 return -EINVAL; 259 } 260 261 if (get_user(flags, &user_query_config_ptr->flags)) 262 return -EFAULT; 263 264 if (flags != 0) 265 return -EINVAL; 266 267 if (use_uuid) { 268 struct i915_oa_config *tmp; 269 int id; 270 271 BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid)); 272 273 memset(&uuid, 0, sizeof(uuid)); 274 if (copy_from_user(uuid, user_query_config_ptr->uuid, 275 sizeof(user_query_config_ptr->uuid))) 276 return -EFAULT; 277 278 oa_config = NULL; 279 rcu_read_lock(); 280 idr_for_each_entry(&perf->metrics_idr, tmp, id) { 281 if (!strcmp(tmp->uuid, uuid)) { 282 oa_config = i915_oa_config_get(tmp); 283 break; 284 } 285 } 286 rcu_read_unlock(); 287 } else { 288 if (get_user(config_id, &user_query_config_ptr->config)) 289 return -EFAULT; 290 291 oa_config = i915_perf_get_oa_config(perf, config_id); 292 } 293 if (!oa_config) 294 return -ENOENT; 295 296 if (copy_from_user(&user_config, user_config_ptr, sizeof(user_config))) { 297 ret = -EFAULT; 298 goto out; 299 } 300 301 ret = can_copy_perf_config_registers_or_number(user_config.n_boolean_regs, 302 user_config.boolean_regs_ptr, 303 oa_config->b_counter_regs_len); 304 if (ret) 305 goto out; 306 307 ret = can_copy_perf_config_registers_or_number(user_config.n_flex_regs, 308 user_config.flex_regs_ptr, 309 oa_config->flex_regs_len); 310 if (ret) 311 goto out; 312 313 ret = can_copy_perf_config_registers_or_number(user_config.n_mux_regs, 314 user_config.mux_regs_ptr, 315 oa_config->mux_regs_len); 316 if (ret) 317 goto out; 318 319 ret = copy_perf_config_registers_or_number(oa_config->b_counter_regs, 320 oa_config->b_counter_regs_len, 321 user_config.boolean_regs_ptr, 322 &user_config.n_boolean_regs); 323 if (ret) 324 goto out; 325 326 ret = copy_perf_config_registers_or_number(oa_config->flex_regs, 327 oa_config->flex_regs_len, 328 user_config.flex_regs_ptr, 329 &user_config.n_flex_regs); 330 if (ret) 331 goto out; 332 333 ret = copy_perf_config_registers_or_number(oa_config->mux_regs, 334 oa_config->mux_regs_len, 335 user_config.mux_regs_ptr, 336 &user_config.n_mux_regs); 337 if (ret) 338 goto out; 339 340 memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid)); 341 342 if (copy_to_user(user_config_ptr, &user_config, sizeof(user_config))) { 343 ret = -EFAULT; 344 goto out; 345 } 346 347 ret = total_size; 348 349 out: 350 i915_oa_config_put(oa_config); 351 return ret; 352 } 353 354 static size_t sizeof_perf_config_list(size_t count) 355 { 356 return sizeof(struct drm_i915_query_perf_config) + sizeof(u64) * count; 357 } 358 359 static size_t sizeof_perf_metrics(struct i915_perf *perf) 360 { 361 struct i915_oa_config *tmp; 362 size_t i; 363 int id; 364 365 i = 1; 366 rcu_read_lock(); 367 idr_for_each_entry(&perf->metrics_idr, tmp, id) 368 i++; 369 rcu_read_unlock(); 370 371 return sizeof_perf_config_list(i); 372 } 373 374 static int query_perf_config_list(struct drm_i915_private *i915, 375 struct drm_i915_query_item *query_item) 376 { 377 struct drm_i915_query_perf_config __user *user_query_config_ptr = 378 u64_to_user_ptr(query_item->data_ptr); 379 struct i915_perf *perf = &i915->perf; 380 u64 *oa_config_ids = NULL; 381 int alloc, n_configs; 382 u32 flags; 383 int ret; 384 385 if (!perf->i915) 386 return -ENODEV; 387 388 if (query_item->length == 0) 389 return sizeof_perf_metrics(perf); 390 391 if (get_user(flags, &user_query_config_ptr->flags)) 392 return -EFAULT; 393 394 if (flags != 0) 395 return -EINVAL; 396 397 n_configs = 1; 398 do { 399 struct i915_oa_config *tmp; 400 u64 *ids; 401 int id; 402 403 ids = krealloc(oa_config_ids, 404 n_configs * sizeof(*oa_config_ids), 405 GFP_KERNEL); 406 if (!ids) 407 return -ENOMEM; 408 409 alloc = fetch_and_zero(&n_configs); 410 411 ids[n_configs++] = 1ull; /* reserved for test_config */ 412 rcu_read_lock(); 413 idr_for_each_entry(&perf->metrics_idr, tmp, id) { 414 if (n_configs < alloc) 415 ids[n_configs] = id; 416 n_configs++; 417 } 418 rcu_read_unlock(); 419 420 oa_config_ids = ids; 421 } while (n_configs > alloc); 422 423 if (query_item->length < sizeof_perf_config_list(n_configs)) { 424 drm_dbg(&i915->drm, 425 "Invalid query config list item size=%u expected=%zu\n", 426 query_item->length, 427 sizeof_perf_config_list(n_configs)); 428 kfree(oa_config_ids); 429 return -EINVAL; 430 } 431 432 if (put_user(n_configs, &user_query_config_ptr->config)) { 433 kfree(oa_config_ids); 434 return -EFAULT; 435 } 436 437 ret = copy_to_user(user_query_config_ptr + 1, 438 oa_config_ids, 439 n_configs * sizeof(*oa_config_ids)); 440 kfree(oa_config_ids); 441 if (ret) 442 return -EFAULT; 443 444 return sizeof_perf_config_list(n_configs); 445 } 446 447 static int query_perf_config(struct drm_i915_private *i915, 448 struct drm_i915_query_item *query_item) 449 { 450 switch (query_item->flags) { 451 case DRM_I915_QUERY_PERF_CONFIG_LIST: 452 return query_perf_config_list(i915, query_item); 453 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID: 454 return query_perf_config_data(i915, query_item, true); 455 case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID: 456 return query_perf_config_data(i915, query_item, false); 457 default: 458 return -EINVAL; 459 } 460 } 461 462 static int query_memregion_info(struct drm_i915_private *i915, 463 struct drm_i915_query_item *query_item) 464 { 465 struct drm_i915_query_memory_regions __user *query_ptr = 466 u64_to_user_ptr(query_item->data_ptr); 467 struct drm_i915_memory_region_info __user *info_ptr = 468 &query_ptr->regions[0]; 469 struct drm_i915_memory_region_info info = { }; 470 struct drm_i915_query_memory_regions query; 471 struct intel_memory_region *mr; 472 u32 total_length; 473 int ret, id, i; 474 475 if (query_item->flags != 0) 476 return -EINVAL; 477 478 total_length = sizeof(query); 479 for_each_memory_region(mr, i915, id) { 480 if (mr->private) 481 continue; 482 483 total_length += sizeof(info); 484 } 485 486 ret = copy_query_item(&query, sizeof(query), total_length, query_item); 487 if (ret != 0) 488 return ret; 489 490 if (query.num_regions) 491 return -EINVAL; 492 493 for (i = 0; i < ARRAY_SIZE(query.rsvd); i++) { 494 if (query.rsvd[i]) 495 return -EINVAL; 496 } 497 498 for_each_memory_region(mr, i915, id) { 499 if (mr->private) 500 continue; 501 502 info.region.memory_class = mr->type; 503 info.region.memory_instance = mr->instance; 504 info.probed_size = mr->total; 505 506 if (mr->type == INTEL_MEMORY_LOCAL) 507 info.probed_cpu_visible_size = resource_size(&mr->io); 508 else 509 info.probed_cpu_visible_size = mr->total; 510 511 if (perfmon_capable()) { 512 intel_memory_region_avail(mr, 513 &info.unallocated_size, 514 &info.unallocated_cpu_visible_size); 515 } else { 516 info.unallocated_size = info.probed_size; 517 info.unallocated_cpu_visible_size = 518 info.probed_cpu_visible_size; 519 } 520 521 if (__copy_to_user(info_ptr, &info, sizeof(info))) 522 return -EFAULT; 523 524 query.num_regions++; 525 info_ptr++; 526 } 527 528 if (__copy_to_user(query_ptr, &query, sizeof(query))) 529 return -EFAULT; 530 531 return total_length; 532 } 533 534 static int query_hwconfig_blob(struct drm_i915_private *i915, 535 struct drm_i915_query_item *query_item) 536 { 537 struct intel_gt *gt = to_gt(i915); 538 struct intel_hwconfig *hwconfig = >->info.hwconfig; 539 540 if (!hwconfig->size || !hwconfig->ptr) 541 return -ENODEV; 542 543 if (query_item->length == 0) 544 return hwconfig->size; 545 546 if (query_item->length < hwconfig->size) 547 return -EINVAL; 548 549 if (copy_to_user(u64_to_user_ptr(query_item->data_ptr), 550 hwconfig->ptr, hwconfig->size)) 551 return -EFAULT; 552 553 return hwconfig->size; 554 } 555 556 static int 557 query_guc_submission_version(struct drm_i915_private *i915, 558 struct drm_i915_query_item *query) 559 { 560 struct drm_i915_query_guc_submission_version __user *query_ptr = 561 u64_to_user_ptr(query->data_ptr); 562 struct drm_i915_query_guc_submission_version ver; 563 struct intel_guc *guc = &to_gt(i915)->uc.guc; 564 const size_t size = sizeof(ver); 565 int ret; 566 567 if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc)) 568 return -ENODEV; 569 570 ret = copy_query_item(&ver, size, size, query); 571 if (ret != 0) 572 return ret; 573 574 if (ver.branch || ver.major || ver.minor || ver.patch) 575 return -EINVAL; 576 577 ver.branch = 0; 578 ver.major = guc->submission_version.major; 579 ver.minor = guc->submission_version.minor; 580 ver.patch = guc->submission_version.patch; 581 582 if (copy_to_user(query_ptr, &ver, size)) 583 return -EFAULT; 584 585 return 0; 586 } 587 588 static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv, 589 struct drm_i915_query_item *query_item) = { 590 query_topology_info, 591 query_engine_info, 592 query_perf_config, 593 query_memregion_info, 594 query_hwconfig_blob, 595 query_geometry_subslices, 596 query_guc_submission_version, 597 }; 598 599 int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 600 { 601 struct drm_i915_private *dev_priv = to_i915(dev); 602 struct drm_i915_query *args = data; 603 struct drm_i915_query_item __user *user_item_ptr = 604 u64_to_user_ptr(args->items_ptr); 605 u32 i; 606 607 if (args->flags != 0) 608 return -EINVAL; 609 610 for (i = 0; i < args->num_items; i++, user_item_ptr++) { 611 struct drm_i915_query_item item; 612 unsigned long func_idx; 613 int ret; 614 615 if (copy_from_user(&item, user_item_ptr, sizeof(item))) 616 return -EFAULT; 617 618 if (item.query_id == 0) 619 return -EINVAL; 620 621 if (overflows_type(item.query_id - 1, unsigned long)) 622 return -EINVAL; 623 624 func_idx = item.query_id - 1; 625 626 ret = -EINVAL; 627 if (func_idx < ARRAY_SIZE(i915_query_funcs)) { 628 func_idx = array_index_nospec(func_idx, 629 ARRAY_SIZE(i915_query_funcs)); 630 ret = i915_query_funcs[func_idx](dev_priv, &item); 631 } 632 633 /* Only write the length back to userspace if they differ. */ 634 if (ret != item.length && put_user(ret, &user_item_ptr->length)) 635 return -EFAULT; 636 } 637 638 return 0; 639 } 640