1 // SPDX-License-Identifier: GPL-2.0 2 3 /* 4 * Copyright 2016-2022 HabanaLabs, Ltd. 5 * All Rights Reserved. 6 */ 7 8 #define pr_fmt(fmt) "habanalabs: " fmt 9 10 #include <uapi/drm/habanalabs_accel.h> 11 #include "habanalabs.h" 12 13 #include <linux/fs.h> 14 #include <linux/kernel.h> 15 #include <linux/pci.h> 16 #include <linux/slab.h> 17 #include <linux/uaccess.h> 18 #include <linux/vmalloc.h> 19 20 /* make sure there is space for all the signed info */ 21 static_assert(sizeof(struct cpucp_info) <= SEC_DEV_INFO_BUF_SZ); 22 23 static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = { 24 [HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr), 25 [HL_DEBUG_OP_ETF] = sizeof(struct hl_debug_params_etf), 26 [HL_DEBUG_OP_STM] = sizeof(struct hl_debug_params_stm), 27 [HL_DEBUG_OP_FUNNEL] = 0, 28 [HL_DEBUG_OP_BMON] = sizeof(struct hl_debug_params_bmon), 29 [HL_DEBUG_OP_SPMU] = sizeof(struct hl_debug_params_spmu), 30 [HL_DEBUG_OP_TIMESTAMP] = 0 31 32 }; 33 34 static int device_status_info(struct hl_device *hdev, struct hl_info_args *args) 35 { 36 struct hl_info_device_status dev_stat = {0}; 37 u32 size = args->return_size; 38 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 39 40 if ((!size) || (!out)) 41 return -EINVAL; 42 43 dev_stat.status = hl_device_status(hdev); 44 45 return copy_to_user(out, &dev_stat, 46 min((size_t)size, sizeof(dev_stat))) ? -EFAULT : 0; 47 } 48 49 static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args) 50 { 51 struct hl_info_hw_ip_info hw_ip = {0}; 52 u32 size = args->return_size; 53 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 54 struct asic_fixed_properties *prop = &hdev->asic_prop; 55 u64 sram_kmd_size, dram_kmd_size, dram_available_size; 56 57 if ((!size) || (!out)) 58 return -EINVAL; 59 60 sram_kmd_size = (prop->sram_user_base_address - 61 prop->sram_base_address); 62 dram_kmd_size = (prop->dram_user_base_address - 63 prop->dram_base_address); 64 65 hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev); 66 hw_ip.sram_base_address = prop->sram_user_base_address; 67 hw_ip.dram_base_address = 68 prop->dram_supports_virtual_memory ? 69 prop->dmmu.start_addr : prop->dram_user_base_address; 70 hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask & 0xFF; 71 hw_ip.tpc_enabled_mask_ext = prop->tpc_enabled_mask; 72 73 hw_ip.sram_size = prop->sram_size - sram_kmd_size; 74 75 dram_available_size = prop->dram_size - dram_kmd_size; 76 77 hw_ip.dram_size = DIV_ROUND_DOWN_ULL(dram_available_size, prop->dram_page_size) * 78 prop->dram_page_size; 79 80 if (hw_ip.dram_size > PAGE_SIZE) 81 hw_ip.dram_enabled = 1; 82 83 hw_ip.dram_page_size = prop->dram_page_size; 84 hw_ip.device_mem_alloc_default_page_size = prop->device_mem_alloc_default_page_size; 85 hw_ip.num_of_events = prop->num_of_events; 86 87 memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version, 88 min(VERSION_MAX_LEN, HL_INFO_VERSION_MAX_LEN)); 89 90 memcpy(hw_ip.card_name, prop->cpucp_info.card_name, 91 min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN)); 92 93 hw_ip.cpld_version = le32_to_cpu(prop->cpucp_info.cpld_version); 94 hw_ip.module_id = le32_to_cpu(prop->cpucp_info.card_location); 95 96 hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr; 97 hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf; 98 hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od; 99 hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor; 100 101 hw_ip.decoder_enabled_mask = prop->decoder_enabled_mask; 102 hw_ip.mme_master_slave_mode = prop->mme_master_slave_mode; 103 hw_ip.first_available_interrupt_id = prop->first_available_user_interrupt; 104 hw_ip.number_of_user_interrupts = prop->user_interrupt_count; 105 hw_ip.tpc_interrupt_id = prop->tpc_interrupt_id; 106 107 hw_ip.edma_enabled_mask = prop->edma_enabled_mask; 108 hw_ip.server_type = prop->server_type; 109 hw_ip.security_enabled = prop->fw_security_enabled; 110 hw_ip.revision_id = hdev->pdev->revision; 111 hw_ip.rotator_enabled_mask = prop->rotator_enabled_mask; 112 hw_ip.engine_core_interrupt_reg_addr = prop->engine_core_interrupt_reg_addr; 113 hw_ip.reserved_dram_size = dram_kmd_size; 114 115 return copy_to_user(out, &hw_ip, 116 min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0; 117 } 118 119 static int hw_events_info(struct hl_device *hdev, bool aggregate, 120 struct hl_info_args *args) 121 { 122 u32 size, max_size = args->return_size; 123 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 124 void *arr; 125 126 if ((!max_size) || (!out)) 127 return -EINVAL; 128 129 arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size); 130 if (!arr) { 131 dev_err(hdev->dev, "Events info not supported\n"); 132 return -EOPNOTSUPP; 133 } 134 135 return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0; 136 } 137 138 static int events_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 139 { 140 u32 max_size = args->return_size; 141 u64 events_mask; 142 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 143 144 if ((max_size < sizeof(u64)) || (!out)) 145 return -EINVAL; 146 147 mutex_lock(&hpriv->notifier_event.lock); 148 events_mask = hpriv->notifier_event.events_mask; 149 hpriv->notifier_event.events_mask = 0; 150 mutex_unlock(&hpriv->notifier_event.lock); 151 152 return copy_to_user(out, &events_mask, sizeof(u64)) ? -EFAULT : 0; 153 } 154 155 static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 156 { 157 struct hl_device *hdev = hpriv->hdev; 158 struct hl_info_dram_usage dram_usage = {0}; 159 u32 max_size = args->return_size; 160 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 161 struct asic_fixed_properties *prop = &hdev->asic_prop; 162 u64 dram_kmd_size; 163 164 if ((!max_size) || (!out)) 165 return -EINVAL; 166 167 dram_kmd_size = (prop->dram_user_base_address - 168 prop->dram_base_address); 169 dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) - 170 atomic64_read(&hdev->dram_used_mem); 171 if (hpriv->ctx) 172 dram_usage.ctx_dram_mem = 173 atomic64_read(&hpriv->ctx->dram_phys_mem); 174 175 return copy_to_user(out, &dram_usage, 176 min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0; 177 } 178 179 static int hw_idle(struct hl_device *hdev, struct hl_info_args *args) 180 { 181 struct hl_info_hw_idle hw_idle = {0}; 182 u32 max_size = args->return_size; 183 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 184 185 if ((!max_size) || (!out)) 186 return -EINVAL; 187 188 hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev, 189 hw_idle.busy_engines_mask_ext, 190 HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL); 191 hw_idle.busy_engines_mask = 192 lower_32_bits(hw_idle.busy_engines_mask_ext[0]); 193 194 return copy_to_user(out, &hw_idle, 195 min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0; 196 } 197 198 static int debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, struct hl_debug_args *args) 199 { 200 struct hl_debug_params *params; 201 void *input = NULL, *output = NULL; 202 int rc; 203 204 params = kzalloc(sizeof(*params), GFP_KERNEL); 205 if (!params) 206 return -ENOMEM; 207 208 params->reg_idx = args->reg_idx; 209 params->enable = args->enable; 210 params->op = args->op; 211 212 if (args->input_ptr && args->input_size) { 213 input = kzalloc(hl_debug_struct_size[args->op], GFP_KERNEL); 214 if (!input) { 215 rc = -ENOMEM; 216 goto out; 217 } 218 219 if (copy_from_user(input, u64_to_user_ptr(args->input_ptr), 220 args->input_size)) { 221 rc = -EFAULT; 222 dev_err(hdev->dev, "failed to copy input debug data\n"); 223 goto out; 224 } 225 226 params->input = input; 227 } 228 229 if (args->output_ptr && args->output_size) { 230 output = kzalloc(args->output_size, GFP_KERNEL); 231 if (!output) { 232 rc = -ENOMEM; 233 goto out; 234 } 235 236 params->output = output; 237 params->output_size = args->output_size; 238 } 239 240 rc = hdev->asic_funcs->debug_coresight(hdev, ctx, params); 241 if (rc) { 242 dev_err(hdev->dev, 243 "debug coresight operation failed %d\n", rc); 244 goto out; 245 } 246 247 if (output && copy_to_user((void __user *) (uintptr_t) args->output_ptr, 248 output, args->output_size)) { 249 dev_err(hdev->dev, "copy to user failed in debug ioctl\n"); 250 rc = -EFAULT; 251 goto out; 252 } 253 254 255 out: 256 kfree(params); 257 kfree(output); 258 kfree(input); 259 260 return rc; 261 } 262 263 static int device_utilization(struct hl_device *hdev, struct hl_info_args *args) 264 { 265 struct hl_info_device_utilization device_util = {0}; 266 u32 max_size = args->return_size; 267 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 268 int rc; 269 270 if ((!max_size) || (!out)) 271 return -EINVAL; 272 273 rc = hl_device_utilization(hdev, &device_util.utilization); 274 if (rc) 275 return -EINVAL; 276 277 return copy_to_user(out, &device_util, 278 min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0; 279 } 280 281 static int get_clk_rate(struct hl_device *hdev, struct hl_info_args *args) 282 { 283 struct hl_info_clk_rate clk_rate = {0}; 284 u32 max_size = args->return_size; 285 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 286 int rc; 287 288 if ((!max_size) || (!out)) 289 return -EINVAL; 290 291 rc = hl_fw_get_clk_rate(hdev, &clk_rate.cur_clk_rate_mhz, &clk_rate.max_clk_rate_mhz); 292 if (rc) 293 return rc; 294 295 return copy_to_user(out, &clk_rate, min_t(size_t, max_size, sizeof(clk_rate))) 296 ? -EFAULT : 0; 297 } 298 299 static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args) 300 { 301 struct hl_info_reset_count reset_count = {0}; 302 u32 max_size = args->return_size; 303 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 304 305 if ((!max_size) || (!out)) 306 return -EINVAL; 307 308 reset_count.hard_reset_cnt = hdev->reset_info.hard_reset_cnt; 309 reset_count.soft_reset_cnt = hdev->reset_info.compute_reset_cnt; 310 311 return copy_to_user(out, &reset_count, 312 min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0; 313 } 314 315 static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args) 316 { 317 struct hl_info_time_sync time_sync = {0}; 318 u32 max_size = args->return_size; 319 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 320 321 if ((!max_size) || (!out)) 322 return -EINVAL; 323 324 time_sync.device_time = hdev->asic_funcs->get_device_time(hdev); 325 time_sync.host_time = ktime_get_raw_ns(); 326 time_sync.tsc_time = rdtsc(); 327 328 return copy_to_user(out, &time_sync, 329 min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0; 330 } 331 332 static int pci_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 333 { 334 struct hl_device *hdev = hpriv->hdev; 335 struct hl_info_pci_counters pci_counters = {0}; 336 u32 max_size = args->return_size; 337 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 338 int rc; 339 340 if ((!max_size) || (!out)) 341 return -EINVAL; 342 343 rc = hl_fw_cpucp_pci_counters_get(hdev, &pci_counters); 344 if (rc) 345 return rc; 346 347 return copy_to_user(out, &pci_counters, 348 min((size_t) max_size, sizeof(pci_counters))) ? -EFAULT : 0; 349 } 350 351 static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 352 { 353 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 354 struct hl_device *hdev = hpriv->hdev; 355 struct hl_info_clk_throttle clk_throttle = {0}; 356 ktime_t end_time, zero_time = ktime_set(0, 0); 357 u32 max_size = args->return_size; 358 int i; 359 360 if ((!max_size) || (!out)) 361 return -EINVAL; 362 363 mutex_lock(&hdev->clk_throttling.lock); 364 365 clk_throttle.clk_throttling_reason = hdev->clk_throttling.current_reason; 366 367 for (i = 0 ; i < HL_CLK_THROTTLE_TYPE_MAX ; i++) { 368 if (!(hdev->clk_throttling.aggregated_reason & BIT(i))) 369 continue; 370 371 clk_throttle.clk_throttling_timestamp_us[i] = 372 ktime_to_us(hdev->clk_throttling.timestamp[i].start); 373 374 if (ktime_compare(hdev->clk_throttling.timestamp[i].end, zero_time)) 375 end_time = hdev->clk_throttling.timestamp[i].end; 376 else 377 end_time = ktime_get(); 378 379 clk_throttle.clk_throttling_duration_ns[i] = 380 ktime_to_ns(ktime_sub(end_time, 381 hdev->clk_throttling.timestamp[i].start)); 382 383 } 384 mutex_unlock(&hdev->clk_throttling.lock); 385 386 return copy_to_user(out, &clk_throttle, 387 min((size_t) max_size, sizeof(clk_throttle))) ? -EFAULT : 0; 388 } 389 390 static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 391 { 392 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 393 struct hl_info_cs_counters cs_counters = {0}; 394 struct hl_device *hdev = hpriv->hdev; 395 struct hl_cs_counters_atomic *cntr; 396 u32 max_size = args->return_size; 397 398 cntr = &hdev->aggregated_cs_counters; 399 400 if ((!max_size) || (!out)) 401 return -EINVAL; 402 403 cs_counters.total_out_of_mem_drop_cnt = 404 atomic64_read(&cntr->out_of_mem_drop_cnt); 405 cs_counters.total_parsing_drop_cnt = 406 atomic64_read(&cntr->parsing_drop_cnt); 407 cs_counters.total_queue_full_drop_cnt = 408 atomic64_read(&cntr->queue_full_drop_cnt); 409 cs_counters.total_device_in_reset_drop_cnt = 410 atomic64_read(&cntr->device_in_reset_drop_cnt); 411 cs_counters.total_max_cs_in_flight_drop_cnt = 412 atomic64_read(&cntr->max_cs_in_flight_drop_cnt); 413 cs_counters.total_validation_drop_cnt = 414 atomic64_read(&cntr->validation_drop_cnt); 415 416 if (hpriv->ctx) { 417 cs_counters.ctx_out_of_mem_drop_cnt = 418 atomic64_read( 419 &hpriv->ctx->cs_counters.out_of_mem_drop_cnt); 420 cs_counters.ctx_parsing_drop_cnt = 421 atomic64_read( 422 &hpriv->ctx->cs_counters.parsing_drop_cnt); 423 cs_counters.ctx_queue_full_drop_cnt = 424 atomic64_read( 425 &hpriv->ctx->cs_counters.queue_full_drop_cnt); 426 cs_counters.ctx_device_in_reset_drop_cnt = 427 atomic64_read( 428 &hpriv->ctx->cs_counters.device_in_reset_drop_cnt); 429 cs_counters.ctx_max_cs_in_flight_drop_cnt = 430 atomic64_read( 431 &hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt); 432 cs_counters.ctx_validation_drop_cnt = 433 atomic64_read( 434 &hpriv->ctx->cs_counters.validation_drop_cnt); 435 } 436 437 return copy_to_user(out, &cs_counters, 438 min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0; 439 } 440 441 static int sync_manager_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 442 { 443 struct hl_device *hdev = hpriv->hdev; 444 struct asic_fixed_properties *prop = &hdev->asic_prop; 445 struct hl_info_sync_manager sm_info = {0}; 446 u32 max_size = args->return_size; 447 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 448 449 if ((!max_size) || (!out)) 450 return -EINVAL; 451 452 if (args->dcore_id >= HL_MAX_DCORES) 453 return -EINVAL; 454 455 sm_info.first_available_sync_object = 456 prop->first_available_user_sob[args->dcore_id]; 457 sm_info.first_available_monitor = 458 prop->first_available_user_mon[args->dcore_id]; 459 sm_info.first_available_cq = 460 prop->first_available_cq[args->dcore_id]; 461 462 return copy_to_user(out, &sm_info, min_t(size_t, (size_t) max_size, 463 sizeof(sm_info))) ? -EFAULT : 0; 464 } 465 466 static int total_energy_consumption_info(struct hl_fpriv *hpriv, 467 struct hl_info_args *args) 468 { 469 struct hl_device *hdev = hpriv->hdev; 470 struct hl_info_energy total_energy = {0}; 471 u32 max_size = args->return_size; 472 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 473 int rc; 474 475 if ((!max_size) || (!out)) 476 return -EINVAL; 477 478 rc = hl_fw_cpucp_total_energy_get(hdev, 479 &total_energy.total_energy_consumption); 480 if (rc) 481 return rc; 482 483 return copy_to_user(out, &total_energy, 484 min((size_t) max_size, sizeof(total_energy))) ? -EFAULT : 0; 485 } 486 487 static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 488 { 489 struct hl_device *hdev = hpriv->hdev; 490 struct hl_pll_frequency_info freq_info = { {0} }; 491 u32 max_size = args->return_size; 492 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 493 int rc; 494 495 if ((!max_size) || (!out)) 496 return -EINVAL; 497 498 rc = hl_fw_cpucp_pll_info_get(hdev, args->pll_index, freq_info.output); 499 if (rc) 500 return rc; 501 502 return copy_to_user(out, &freq_info, 503 min((size_t) max_size, sizeof(freq_info))) ? -EFAULT : 0; 504 } 505 506 static int power_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 507 { 508 struct hl_device *hdev = hpriv->hdev; 509 u32 max_size = args->return_size; 510 struct hl_power_info power_info = {0}; 511 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 512 int rc; 513 514 if ((!max_size) || (!out)) 515 return -EINVAL; 516 517 rc = hl_fw_cpucp_power_get(hdev, &power_info.power); 518 if (rc) 519 return rc; 520 521 return copy_to_user(out, &power_info, 522 min((size_t) max_size, sizeof(power_info))) ? -EFAULT : 0; 523 } 524 525 static int open_stats_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 526 { 527 struct hl_device *hdev = hpriv->hdev; 528 u32 max_size = args->return_size; 529 struct hl_open_stats_info open_stats_info = {0}; 530 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 531 532 if ((!max_size) || (!out)) 533 return -EINVAL; 534 535 open_stats_info.last_open_period_ms = jiffies64_to_msecs( 536 hdev->last_open_session_duration_jif); 537 open_stats_info.open_counter = hdev->open_counter; 538 open_stats_info.is_compute_ctx_active = hdev->is_compute_ctx_active; 539 open_stats_info.compute_ctx_in_release = hdev->compute_ctx_in_release; 540 541 return copy_to_user(out, &open_stats_info, 542 min((size_t) max_size, sizeof(open_stats_info))) ? -EFAULT : 0; 543 } 544 545 static int dram_pending_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 546 { 547 struct hl_device *hdev = hpriv->hdev; 548 u32 max_size = args->return_size; 549 u32 pend_rows_num = 0; 550 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 551 int rc; 552 553 if ((!max_size) || (!out)) 554 return -EINVAL; 555 556 rc = hl_fw_dram_pending_row_get(hdev, &pend_rows_num); 557 if (rc) 558 return rc; 559 560 return copy_to_user(out, &pend_rows_num, 561 min_t(size_t, max_size, sizeof(pend_rows_num))) ? -EFAULT : 0; 562 } 563 564 static int dram_replaced_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 565 { 566 struct hl_device *hdev = hpriv->hdev; 567 u32 max_size = args->return_size; 568 struct cpucp_hbm_row_info info = {0}; 569 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 570 int rc; 571 572 if ((!max_size) || (!out)) 573 return -EINVAL; 574 575 rc = hl_fw_dram_replaced_row_get(hdev, &info); 576 if (rc) 577 return rc; 578 579 return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; 580 } 581 582 static int last_err_open_dev_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 583 { 584 struct hl_info_last_err_open_dev_time info = {0}; 585 struct hl_device *hdev = hpriv->hdev; 586 u32 max_size = args->return_size; 587 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 588 589 if ((!max_size) || (!out)) 590 return -EINVAL; 591 592 info.timestamp = ktime_to_ns(hdev->last_successful_open_ktime); 593 594 return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; 595 } 596 597 static int cs_timeout_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 598 { 599 struct hl_info_cs_timeout_event info = {0}; 600 struct hl_device *hdev = hpriv->hdev; 601 u32 max_size = args->return_size; 602 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 603 604 if ((!max_size) || (!out)) 605 return -EINVAL; 606 607 info.seq = hdev->captured_err_info.cs_timeout.seq; 608 info.timestamp = ktime_to_ns(hdev->captured_err_info.cs_timeout.timestamp); 609 610 return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; 611 } 612 613 static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 614 { 615 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 616 struct hl_device *hdev = hpriv->hdev; 617 u32 max_size = args->return_size; 618 struct razwi_info *razwi_info; 619 620 if ((!max_size) || (!out)) 621 return -EINVAL; 622 623 razwi_info = &hdev->captured_err_info.razwi_info; 624 if (!razwi_info->razwi_info_available) 625 return 0; 626 627 return copy_to_user(out, &razwi_info->razwi, 628 min_t(size_t, max_size, sizeof(struct hl_info_razwi_event))) ? -EFAULT : 0; 629 } 630 631 static int undefined_opcode_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 632 { 633 struct hl_device *hdev = hpriv->hdev; 634 u32 max_size = args->return_size; 635 struct hl_info_undefined_opcode_event info = {0}; 636 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 637 638 if ((!max_size) || (!out)) 639 return -EINVAL; 640 641 info.timestamp = ktime_to_ns(hdev->captured_err_info.undef_opcode.timestamp); 642 info.engine_id = hdev->captured_err_info.undef_opcode.engine_id; 643 info.cq_addr = hdev->captured_err_info.undef_opcode.cq_addr; 644 info.cq_size = hdev->captured_err_info.undef_opcode.cq_size; 645 info.stream_id = hdev->captured_err_info.undef_opcode.stream_id; 646 info.cb_addr_streams_len = hdev->captured_err_info.undef_opcode.cb_addr_streams_len; 647 memcpy(info.cb_addr_streams, hdev->captured_err_info.undef_opcode.cb_addr_streams, 648 sizeof(info.cb_addr_streams)); 649 650 return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; 651 } 652 653 static int dev_mem_alloc_page_sizes_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 654 { 655 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 656 struct hl_info_dev_memalloc_page_sizes info = {0}; 657 struct hl_device *hdev = hpriv->hdev; 658 u32 max_size = args->return_size; 659 660 if ((!max_size) || (!out)) 661 return -EINVAL; 662 663 /* 664 * Future ASICs that will support multiple DRAM page sizes will support only "powers of 2" 665 * pages (unlike some of the ASICs before supporting multiple page sizes). 666 * For this reason for all ASICs that not support multiple page size the function will 667 * return an empty bitmask indicating that multiple page sizes is not supported. 668 */ 669 info.page_order_bitmask = hdev->asic_prop.dmmu.supported_pages_mask; 670 671 return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; 672 } 673 674 static int sec_attest_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 675 { 676 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 677 struct cpucp_sec_attest_info *sec_attest_info; 678 struct hl_info_sec_attest *info; 679 u32 max_size = args->return_size; 680 int rc; 681 682 if ((!max_size) || (!out)) 683 return -EINVAL; 684 685 sec_attest_info = kmalloc(sizeof(*sec_attest_info), GFP_KERNEL); 686 if (!sec_attest_info) 687 return -ENOMEM; 688 689 info = kzalloc(sizeof(*info), GFP_KERNEL); 690 if (!info) { 691 rc = -ENOMEM; 692 goto free_sec_attest_info; 693 } 694 695 rc = hl_fw_get_sec_attest_info(hpriv->hdev, sec_attest_info, args->sec_attest_nonce); 696 if (rc) 697 goto free_info; 698 699 info->nonce = le32_to_cpu(sec_attest_info->nonce); 700 info->pcr_quote_len = le16_to_cpu(sec_attest_info->pcr_quote_len); 701 info->pub_data_len = le16_to_cpu(sec_attest_info->pub_data_len); 702 info->certificate_len = le16_to_cpu(sec_attest_info->certificate_len); 703 info->pcr_num_reg = sec_attest_info->pcr_num_reg; 704 info->pcr_reg_len = sec_attest_info->pcr_reg_len; 705 info->quote_sig_len = sec_attest_info->quote_sig_len; 706 memcpy(&info->pcr_data, &sec_attest_info->pcr_data, sizeof(info->pcr_data)); 707 memcpy(&info->pcr_quote, &sec_attest_info->pcr_quote, sizeof(info->pcr_quote)); 708 memcpy(&info->public_data, &sec_attest_info->public_data, sizeof(info->public_data)); 709 memcpy(&info->certificate, &sec_attest_info->certificate, sizeof(info->certificate)); 710 memcpy(&info->quote_sig, &sec_attest_info->quote_sig, sizeof(info->quote_sig)); 711 712 rc = copy_to_user(out, info, 713 min_t(size_t, max_size, sizeof(*info))) ? -EFAULT : 0; 714 715 free_info: 716 kfree(info); 717 free_sec_attest_info: 718 kfree(sec_attest_info); 719 720 return rc; 721 } 722 723 static int dev_info_signed(struct hl_fpriv *hpriv, struct hl_info_args *args) 724 { 725 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 726 struct cpucp_dev_info_signed *dev_info_signed; 727 struct hl_info_signed *info; 728 u32 max_size = args->return_size; 729 int rc; 730 731 if ((!max_size) || (!out)) 732 return -EINVAL; 733 734 dev_info_signed = kzalloc(sizeof(*dev_info_signed), GFP_KERNEL); 735 if (!dev_info_signed) 736 return -ENOMEM; 737 738 info = kzalloc(sizeof(*info), GFP_KERNEL); 739 if (!info) { 740 rc = -ENOMEM; 741 goto free_dev_info_signed; 742 } 743 744 rc = hl_fw_get_dev_info_signed(hpriv->hdev, 745 dev_info_signed, args->sec_attest_nonce); 746 if (rc) 747 goto free_info; 748 749 info->nonce = le32_to_cpu(dev_info_signed->nonce); 750 info->info_sig_len = dev_info_signed->info_sig_len; 751 info->pub_data_len = le16_to_cpu(dev_info_signed->pub_data_len); 752 info->certificate_len = le16_to_cpu(dev_info_signed->certificate_len); 753 info->dev_info_len = sizeof(struct cpucp_info); 754 memcpy(&info->info_sig, &dev_info_signed->info_sig, sizeof(info->info_sig)); 755 memcpy(&info->public_data, &dev_info_signed->public_data, sizeof(info->public_data)); 756 memcpy(&info->certificate, &dev_info_signed->certificate, sizeof(info->certificate)); 757 memcpy(&info->dev_info, &dev_info_signed->info, info->dev_info_len); 758 759 rc = copy_to_user(out, info, min_t(size_t, max_size, sizeof(*info))) ? -EFAULT : 0; 760 761 free_info: 762 kfree(info); 763 free_dev_info_signed: 764 kfree(dev_info_signed); 765 766 return rc; 767 } 768 769 770 static int eventfd_register(struct hl_fpriv *hpriv, struct hl_info_args *args) 771 { 772 int rc; 773 774 /* check if there is already a registered on that process */ 775 mutex_lock(&hpriv->notifier_event.lock); 776 if (hpriv->notifier_event.eventfd) { 777 mutex_unlock(&hpriv->notifier_event.lock); 778 return -EINVAL; 779 } 780 781 hpriv->notifier_event.eventfd = eventfd_ctx_fdget(args->eventfd); 782 if (IS_ERR(hpriv->notifier_event.eventfd)) { 783 rc = PTR_ERR(hpriv->notifier_event.eventfd); 784 hpriv->notifier_event.eventfd = NULL; 785 mutex_unlock(&hpriv->notifier_event.lock); 786 return rc; 787 } 788 789 mutex_unlock(&hpriv->notifier_event.lock); 790 return 0; 791 } 792 793 static int eventfd_unregister(struct hl_fpriv *hpriv, struct hl_info_args *args) 794 { 795 mutex_lock(&hpriv->notifier_event.lock); 796 if (!hpriv->notifier_event.eventfd) { 797 mutex_unlock(&hpriv->notifier_event.lock); 798 return -EINVAL; 799 } 800 801 eventfd_ctx_put(hpriv->notifier_event.eventfd); 802 hpriv->notifier_event.eventfd = NULL; 803 mutex_unlock(&hpriv->notifier_event.lock); 804 return 0; 805 } 806 807 static int engine_status_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 808 { 809 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 810 u32 status_buf_size = args->return_size; 811 struct hl_device *hdev = hpriv->hdev; 812 struct engines_data eng_data; 813 int rc; 814 815 if ((status_buf_size < SZ_1K) || (status_buf_size > HL_ENGINES_DATA_MAX_SIZE) || (!out)) 816 return -EINVAL; 817 818 eng_data.actual_size = 0; 819 eng_data.allocated_buf_size = status_buf_size; 820 eng_data.buf = vmalloc(status_buf_size); 821 if (!eng_data.buf) 822 return -ENOMEM; 823 824 hdev->asic_funcs->is_device_idle(hdev, NULL, 0, &eng_data); 825 826 if (eng_data.actual_size > eng_data.allocated_buf_size) { 827 dev_err(hdev->dev, 828 "Engines data size (%d Bytes) is bigger than allocated size (%u Bytes)\n", 829 eng_data.actual_size, status_buf_size); 830 vfree(eng_data.buf); 831 return -ENOMEM; 832 } 833 834 args->user_buffer_actual_size = eng_data.actual_size; 835 rc = copy_to_user(out, eng_data.buf, min_t(size_t, status_buf_size, eng_data.actual_size)) ? 836 -EFAULT : 0; 837 838 vfree(eng_data.buf); 839 840 return rc; 841 } 842 843 static int page_fault_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 844 { 845 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 846 struct hl_device *hdev = hpriv->hdev; 847 u32 max_size = args->return_size; 848 struct page_fault_info *pgf_info; 849 850 if ((!max_size) || (!out)) 851 return -EINVAL; 852 853 pgf_info = &hdev->captured_err_info.page_fault_info; 854 if (!pgf_info->page_fault_info_available) 855 return 0; 856 857 return copy_to_user(out, &pgf_info->page_fault, 858 min_t(size_t, max_size, sizeof(struct hl_page_fault_info))) ? -EFAULT : 0; 859 } 860 861 static int user_mappings_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 862 { 863 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 864 u32 user_buf_size = args->return_size; 865 struct hl_device *hdev = hpriv->hdev; 866 struct page_fault_info *pgf_info; 867 u64 actual_size; 868 869 if (!out) 870 return -EINVAL; 871 872 pgf_info = &hdev->captured_err_info.page_fault_info; 873 if (!pgf_info->page_fault_info_available) 874 return 0; 875 876 args->array_size = pgf_info->num_of_user_mappings; 877 878 actual_size = pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping); 879 if (user_buf_size < actual_size) 880 return -ENOMEM; 881 882 return copy_to_user(out, pgf_info->user_mappings, actual_size) ? -EFAULT : 0; 883 } 884 885 static int hw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 886 { 887 void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer; 888 struct hl_device *hdev = hpriv->hdev; 889 u32 user_buf_size = args->return_size; 890 struct hw_err_info *info; 891 int rc; 892 893 if (!user_buf) 894 return -EINVAL; 895 896 info = &hdev->captured_err_info.hw_err; 897 if (!info->event_info_available) 898 return 0; 899 900 if (user_buf_size < sizeof(struct hl_info_hw_err_event)) 901 return -ENOMEM; 902 903 rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_hw_err_event)); 904 return rc ? -EFAULT : 0; 905 } 906 907 static int fw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 908 { 909 void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer; 910 struct hl_device *hdev = hpriv->hdev; 911 u32 user_buf_size = args->return_size; 912 struct fw_err_info *info; 913 int rc; 914 915 if (!user_buf) 916 return -EINVAL; 917 918 info = &hdev->captured_err_info.fw_err; 919 if (!info->event_info_available) 920 return 0; 921 922 if (user_buf_size < sizeof(struct hl_info_fw_err_event)) 923 return -ENOMEM; 924 925 rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_fw_err_event)); 926 return rc ? -EFAULT : 0; 927 } 928 929 static int engine_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 930 { 931 void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer; 932 struct hl_device *hdev = hpriv->hdev; 933 u32 user_buf_size = args->return_size; 934 struct engine_err_info *info; 935 int rc; 936 937 if (!user_buf) 938 return -EINVAL; 939 940 info = &hdev->captured_err_info.engine_err; 941 if (!info->event_info_available) 942 return 0; 943 944 if (user_buf_size < sizeof(struct hl_info_engine_err_event)) 945 return -ENOMEM; 946 947 rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_engine_err_event)); 948 return rc ? -EFAULT : 0; 949 } 950 951 static int send_fw_generic_request(struct hl_device *hdev, struct hl_info_args *info_args) 952 { 953 void __user *buff = (void __user *) (uintptr_t) info_args->return_pointer; 954 u32 size = info_args->return_size; 955 dma_addr_t dma_handle; 956 bool need_input_buff; 957 void *fw_buff; 958 int rc = 0; 959 960 switch (info_args->fw_sub_opcode) { 961 case HL_PASSTHROUGH_VERSIONS: 962 need_input_buff = false; 963 break; 964 case HL_GET_ERR_COUNTERS_CMD: 965 need_input_buff = true; 966 break; 967 case HL_GET_P_STATE: 968 need_input_buff = false; 969 break; 970 default: 971 return -EINVAL; 972 } 973 974 if (size > SZ_1M) { 975 dev_err(hdev->dev, "buffer size cannot exceed 1MB\n"); 976 return -EINVAL; 977 } 978 979 fw_buff = hl_cpu_accessible_dma_pool_alloc(hdev, size, &dma_handle); 980 if (!fw_buff) 981 return -ENOMEM; 982 983 984 if (need_input_buff && copy_from_user(fw_buff, buff, size)) { 985 dev_dbg(hdev->dev, "Failed to copy from user FW buff\n"); 986 rc = -EFAULT; 987 goto free_buff; 988 } 989 990 rc = hl_fw_send_generic_request(hdev, info_args->fw_sub_opcode, dma_handle, &size); 991 if (rc) 992 goto free_buff; 993 994 if (copy_to_user(buff, fw_buff, min(size, info_args->return_size))) { 995 dev_dbg(hdev->dev, "Failed to copy to user FW generic req output\n"); 996 rc = -EFAULT; 997 } 998 999 free_buff: 1000 hl_cpu_accessible_dma_pool_free(hdev, info_args->return_size, fw_buff); 1001 1002 return rc; 1003 } 1004 1005 static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, 1006 struct device *dev) 1007 { 1008 enum hl_device_status status; 1009 struct hl_info_args *args = data; 1010 struct hl_device *hdev = hpriv->hdev; 1011 int rc; 1012 1013 if (args->pad) { 1014 dev_dbg(hdev->dev, "Padding bytes must be 0\n"); 1015 return -EINVAL; 1016 } 1017 1018 /* 1019 * Information is returned for the following opcodes even if the device 1020 * is disabled or in reset. 1021 */ 1022 switch (args->op) { 1023 case HL_INFO_HW_IP_INFO: 1024 return hw_ip_info(hdev, args); 1025 1026 case HL_INFO_DEVICE_STATUS: 1027 return device_status_info(hdev, args); 1028 1029 case HL_INFO_RESET_COUNT: 1030 return get_reset_count(hdev, args); 1031 1032 case HL_INFO_HW_EVENTS: 1033 return hw_events_info(hdev, false, args); 1034 1035 case HL_INFO_HW_EVENTS_AGGREGATE: 1036 return hw_events_info(hdev, true, args); 1037 1038 case HL_INFO_CS_COUNTERS: 1039 return cs_counters_info(hpriv, args); 1040 1041 case HL_INFO_CLK_THROTTLE_REASON: 1042 return clk_throttle_info(hpriv, args); 1043 1044 case HL_INFO_SYNC_MANAGER: 1045 return sync_manager_info(hpriv, args); 1046 1047 case HL_INFO_OPEN_STATS: 1048 return open_stats_info(hpriv, args); 1049 1050 case HL_INFO_LAST_ERR_OPEN_DEV_TIME: 1051 return last_err_open_dev_info(hpriv, args); 1052 1053 case HL_INFO_CS_TIMEOUT_EVENT: 1054 return cs_timeout_info(hpriv, args); 1055 1056 case HL_INFO_RAZWI_EVENT: 1057 return razwi_info(hpriv, args); 1058 1059 case HL_INFO_UNDEFINED_OPCODE_EVENT: 1060 return undefined_opcode_info(hpriv, args); 1061 1062 case HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES: 1063 return dev_mem_alloc_page_sizes_info(hpriv, args); 1064 1065 case HL_INFO_GET_EVENTS: 1066 return events_info(hpriv, args); 1067 1068 case HL_INFO_PAGE_FAULT_EVENT: 1069 return page_fault_info(hpriv, args); 1070 1071 case HL_INFO_USER_MAPPINGS: 1072 return user_mappings_info(hpriv, args); 1073 1074 case HL_INFO_UNREGISTER_EVENTFD: 1075 return eventfd_unregister(hpriv, args); 1076 1077 case HL_INFO_HW_ERR_EVENT: 1078 return hw_err_info(hpriv, args); 1079 1080 case HL_INFO_FW_ERR_EVENT: 1081 return fw_err_info(hpriv, args); 1082 1083 case HL_INFO_USER_ENGINE_ERR_EVENT: 1084 return engine_err_info(hpriv, args); 1085 1086 case HL_INFO_DRAM_USAGE: 1087 return dram_usage_info(hpriv, args); 1088 default: 1089 break; 1090 } 1091 1092 if (!hl_device_operational(hdev, &status)) { 1093 dev_dbg_ratelimited(dev, 1094 "Device is %s. Can't execute INFO IOCTL\n", 1095 hdev->status[status]); 1096 return -EBUSY; 1097 } 1098 1099 switch (args->op) { 1100 case HL_INFO_HW_IDLE: 1101 rc = hw_idle(hdev, args); 1102 break; 1103 1104 case HL_INFO_DEVICE_UTILIZATION: 1105 rc = device_utilization(hdev, args); 1106 break; 1107 1108 case HL_INFO_CLK_RATE: 1109 rc = get_clk_rate(hdev, args); 1110 break; 1111 1112 case HL_INFO_TIME_SYNC: 1113 return time_sync_info(hdev, args); 1114 1115 case HL_INFO_PCI_COUNTERS: 1116 return pci_counters_info(hpriv, args); 1117 1118 case HL_INFO_TOTAL_ENERGY: 1119 return total_energy_consumption_info(hpriv, args); 1120 1121 case HL_INFO_PLL_FREQUENCY: 1122 return pll_frequency_info(hpriv, args); 1123 1124 case HL_INFO_POWER: 1125 return power_info(hpriv, args); 1126 1127 1128 case HL_INFO_DRAM_REPLACED_ROWS: 1129 return dram_replaced_rows_info(hpriv, args); 1130 1131 case HL_INFO_DRAM_PENDING_ROWS: 1132 return dram_pending_rows_info(hpriv, args); 1133 1134 case HL_INFO_SECURED_ATTESTATION: 1135 return sec_attest_info(hpriv, args); 1136 1137 case HL_INFO_REGISTER_EVENTFD: 1138 return eventfd_register(hpriv, args); 1139 1140 case HL_INFO_ENGINE_STATUS: 1141 return engine_status_info(hpriv, args); 1142 1143 case HL_INFO_FW_GENERIC_REQ: 1144 return send_fw_generic_request(hdev, args); 1145 1146 case HL_INFO_DEV_SIGNED: 1147 return dev_info_signed(hpriv, args); 1148 1149 default: 1150 dev_err(dev, "Invalid request %d\n", args->op); 1151 rc = -EINVAL; 1152 break; 1153 } 1154 1155 return rc; 1156 } 1157 1158 int hl_info_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv) 1159 { 1160 struct hl_fpriv *hpriv = file_priv->driver_priv; 1161 1162 return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev); 1163 } 1164 1165 static int hl_info_ioctl_control(struct hl_fpriv *hpriv, void *data) 1166 { 1167 struct hl_info_args *args = data; 1168 1169 switch (args->op) { 1170 case HL_INFO_GET_EVENTS: 1171 case HL_INFO_UNREGISTER_EVENTFD: 1172 case HL_INFO_REGISTER_EVENTFD: 1173 return -EOPNOTSUPP; 1174 default: 1175 break; 1176 } 1177 1178 return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev_ctrl); 1179 } 1180 1181 int hl_debug_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv) 1182 { 1183 struct hl_fpriv *hpriv = file_priv->driver_priv; 1184 struct hl_device *hdev = hpriv->hdev; 1185 struct hl_debug_args *args = data; 1186 enum hl_device_status status; 1187 1188 int rc = 0; 1189 1190 if (!hl_device_operational(hdev, &status)) { 1191 dev_dbg_ratelimited(hdev->dev, 1192 "Device is %s. Can't execute DEBUG IOCTL\n", 1193 hdev->status[status]); 1194 return -EBUSY; 1195 } 1196 1197 switch (args->op) { 1198 case HL_DEBUG_OP_ETR: 1199 case HL_DEBUG_OP_ETF: 1200 case HL_DEBUG_OP_STM: 1201 case HL_DEBUG_OP_FUNNEL: 1202 case HL_DEBUG_OP_BMON: 1203 case HL_DEBUG_OP_SPMU: 1204 case HL_DEBUG_OP_TIMESTAMP: 1205 if (!hdev->in_debug) { 1206 dev_err_ratelimited(hdev->dev, 1207 "Rejecting debug configuration request because device not in debug mode\n"); 1208 return -EFAULT; 1209 } 1210 args->input_size = min(args->input_size, hl_debug_struct_size[args->op]); 1211 rc = debug_coresight(hdev, hpriv->ctx, args); 1212 break; 1213 1214 case HL_DEBUG_OP_SET_MODE: 1215 rc = hl_device_set_debug_mode(hdev, hpriv->ctx, (bool) args->enable); 1216 break; 1217 1218 default: 1219 dev_err(hdev->dev, "Invalid request %d\n", args->op); 1220 rc = -EINVAL; 1221 break; 1222 } 1223 1224 return rc; 1225 } 1226 1227 #define HL_IOCTL_DEF(ioctl, _func) \ 1228 [_IOC_NR(ioctl) - HL_COMMAND_START] = {.cmd = ioctl, .func = _func} 1229 1230 static const struct hl_ioctl_desc hl_ioctls_control[] = { 1231 HL_IOCTL_DEF(DRM_IOCTL_HL_INFO, hl_info_ioctl_control) 1232 }; 1233 1234 static long _hl_ioctl(struct hl_fpriv *hpriv, unsigned int cmd, unsigned long arg, 1235 const struct hl_ioctl_desc *ioctl, struct device *dev) 1236 { 1237 unsigned int nr = _IOC_NR(cmd); 1238 char stack_kdata[128] = {0}; 1239 char *kdata = NULL; 1240 unsigned int usize, asize; 1241 hl_ioctl_t *func; 1242 u32 hl_size; 1243 int retcode; 1244 1245 /* Do not trust userspace, use our own definition */ 1246 func = ioctl->func; 1247 1248 if (unlikely(!func)) { 1249 dev_dbg(dev, "no function\n"); 1250 retcode = -ENOTTY; 1251 goto out_err; 1252 } 1253 1254 hl_size = _IOC_SIZE(ioctl->cmd); 1255 usize = asize = _IOC_SIZE(cmd); 1256 if (hl_size > asize) 1257 asize = hl_size; 1258 1259 cmd = ioctl->cmd; 1260 1261 if (cmd & (IOC_IN | IOC_OUT)) { 1262 if (asize <= sizeof(stack_kdata)) { 1263 kdata = stack_kdata; 1264 } else { 1265 kdata = kzalloc(asize, GFP_KERNEL); 1266 if (!kdata) { 1267 retcode = -ENOMEM; 1268 goto out_err; 1269 } 1270 } 1271 } 1272 1273 if (cmd & IOC_IN) { 1274 if (copy_from_user(kdata, (void __user *)arg, usize)) { 1275 retcode = -EFAULT; 1276 goto out_err; 1277 } 1278 } 1279 1280 retcode = func(hpriv, kdata); 1281 1282 if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize)) 1283 retcode = -EFAULT; 1284 1285 out_err: 1286 if (retcode) 1287 dev_dbg_ratelimited(dev, 1288 "error in ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n", 1289 task_pid_nr(current), current->comm, cmd, nr); 1290 1291 if (kdata != stack_kdata) 1292 kfree(kdata); 1293 1294 return retcode; 1295 } 1296 1297 long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg) 1298 { 1299 struct hl_fpriv *hpriv = filep->private_data; 1300 struct hl_device *hdev = hpriv->hdev; 1301 const struct hl_ioctl_desc *ioctl = NULL; 1302 unsigned int nr = _IOC_NR(cmd); 1303 1304 if (!hdev) { 1305 pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n"); 1306 return -ENODEV; 1307 } 1308 1309 if (nr == _IOC_NR(DRM_IOCTL_HL_INFO)) { 1310 ioctl = &hl_ioctls_control[nr - HL_COMMAND_START]; 1311 } else { 1312 dev_dbg_ratelimited(hdev->dev_ctrl, 1313 "invalid ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n", 1314 task_pid_nr(current), current->comm, cmd, nr); 1315 return -ENOTTY; 1316 } 1317 1318 return _hl_ioctl(hpriv, cmd, arg, ioctl, hdev->dev_ctrl); 1319 } 1320