1 // SPDX-License-Identifier: GPL-2.0 2 3 /* 4 * Copyright 2016-2022 HabanaLabs, Ltd. 5 * All Rights Reserved. 6 */ 7 8 #define pr_fmt(fmt) "habanalabs: " fmt 9 10 #include <uapi/drm/habanalabs_accel.h> 11 #include "habanalabs.h" 12 13 #include <linux/fs.h> 14 #include <linux/kernel.h> 15 #include <linux/pci.h> 16 #include <linux/slab.h> 17 #include <linux/uaccess.h> 18 #include <linux/vmalloc.h> 19 20 #include <asm/msr.h> 21 22 static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = { 23 [HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr), 24 [HL_DEBUG_OP_ETF] = sizeof(struct hl_debug_params_etf), 25 [HL_DEBUG_OP_STM] = sizeof(struct hl_debug_params_stm), 26 [HL_DEBUG_OP_FUNNEL] = 0, 27 [HL_DEBUG_OP_BMON] = sizeof(struct hl_debug_params_bmon), 28 [HL_DEBUG_OP_SPMU] = sizeof(struct hl_debug_params_spmu), 29 [HL_DEBUG_OP_TIMESTAMP] = 0 30 31 }; 32 33 static int device_status_info(struct hl_device *hdev, struct hl_info_args *args) 34 { 35 struct hl_info_device_status dev_stat = {0}; 36 u32 size = args->return_size; 37 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 38 39 if ((!size) || (!out)) 40 return -EINVAL; 41 42 dev_stat.status = hl_device_status(hdev); 43 44 return copy_to_user(out, &dev_stat, 45 min((size_t)size, sizeof(dev_stat))) ? -EFAULT : 0; 46 } 47 48 static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args) 49 { 50 struct hl_info_hw_ip_info hw_ip = {0}; 51 u32 size = args->return_size; 52 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 53 struct asic_fixed_properties *prop = &hdev->asic_prop; 54 u64 sram_kmd_size, dram_kmd_size, dram_available_size; 55 56 if ((!size) || (!out)) 57 return -EINVAL; 58 59 sram_kmd_size = (prop->sram_user_base_address - 60 prop->sram_base_address); 61 dram_kmd_size = (prop->dram_user_base_address - 62 prop->dram_base_address); 63 64 hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev); 65 hw_ip.sram_base_address = prop->sram_user_base_address; 66 hw_ip.dram_base_address = 67 prop->dram_supports_virtual_memory ? 68 prop->dmmu.start_addr : prop->dram_user_base_address; 69 hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask & 0xFF; 70 hw_ip.tpc_enabled_mask_ext = prop->tpc_enabled_mask; 71 72 hw_ip.sram_size = prop->sram_size - sram_kmd_size; 73 74 dram_available_size = prop->dram_size - dram_kmd_size; 75 76 hw_ip.dram_size = DIV_ROUND_DOWN_ULL(dram_available_size, prop->dram_page_size) * 77 prop->dram_page_size; 78 79 if (hw_ip.dram_size > PAGE_SIZE) 80 hw_ip.dram_enabled = 1; 81 82 hw_ip.dram_page_size = prop->dram_page_size; 83 hw_ip.device_mem_alloc_default_page_size = prop->device_mem_alloc_default_page_size; 84 hw_ip.num_of_events = prop->num_of_events; 85 86 memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version, 87 min(VERSION_MAX_LEN, HL_INFO_VERSION_MAX_LEN)); 88 89 memcpy(hw_ip.card_name, prop->cpucp_info.card_name, 90 min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN)); 91 92 hw_ip.cpld_version = le32_to_cpu(prop->cpucp_info.cpld_version); 93 hw_ip.module_id = le32_to_cpu(prop->cpucp_info.card_location); 94 95 hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr; 96 hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf; 97 hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od; 98 hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor; 99 100 hw_ip.decoder_enabled_mask = prop->decoder_enabled_mask; 101 hw_ip.mme_master_slave_mode = prop->mme_master_slave_mode; 102 hw_ip.first_available_interrupt_id = prop->first_available_user_interrupt; 103 hw_ip.number_of_user_interrupts = prop->user_interrupt_count; 104 hw_ip.tpc_interrupt_id = prop->tpc_interrupt_id; 105 106 hw_ip.edma_enabled_mask = prop->edma_enabled_mask; 107 hw_ip.server_type = prop->server_type; 108 hw_ip.security_enabled = prop->fw_security_enabled; 109 hw_ip.revision_id = hdev->pdev->revision; 110 hw_ip.rotator_enabled_mask = prop->rotator_enabled_mask; 111 hw_ip.engine_core_interrupt_reg_addr = prop->engine_core_interrupt_reg_addr; 112 hw_ip.reserved_dram_size = dram_kmd_size; 113 114 return copy_to_user(out, &hw_ip, 115 min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0; 116 } 117 118 static int hw_events_info(struct hl_device *hdev, bool aggregate, 119 struct hl_info_args *args) 120 { 121 u32 size, max_size = args->return_size; 122 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 123 void *arr; 124 125 if ((!max_size) || (!out)) 126 return -EINVAL; 127 128 arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size); 129 if (!arr) { 130 dev_err(hdev->dev, "Events info not supported\n"); 131 return -EOPNOTSUPP; 132 } 133 134 return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0; 135 } 136 137 static int events_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 138 { 139 u32 max_size = args->return_size; 140 u64 events_mask; 141 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 142 143 if ((max_size < sizeof(u64)) || (!out)) 144 return -EINVAL; 145 146 mutex_lock(&hpriv->notifier_event.lock); 147 events_mask = hpriv->notifier_event.events_mask; 148 hpriv->notifier_event.events_mask = 0; 149 mutex_unlock(&hpriv->notifier_event.lock); 150 151 return copy_to_user(out, &events_mask, sizeof(u64)) ? -EFAULT : 0; 152 } 153 154 static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 155 { 156 struct hl_device *hdev = hpriv->hdev; 157 struct hl_info_dram_usage dram_usage = {0}; 158 u32 max_size = args->return_size; 159 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 160 struct asic_fixed_properties *prop = &hdev->asic_prop; 161 u64 dram_kmd_size; 162 163 if ((!max_size) || (!out)) 164 return -EINVAL; 165 166 dram_kmd_size = (prop->dram_user_base_address - 167 prop->dram_base_address); 168 dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) - 169 atomic64_read(&hdev->dram_used_mem); 170 if (hpriv->ctx) 171 dram_usage.ctx_dram_mem = 172 atomic64_read(&hpriv->ctx->dram_phys_mem); 173 174 return copy_to_user(out, &dram_usage, 175 min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0; 176 } 177 178 static int hw_idle(struct hl_device *hdev, struct hl_info_args *args) 179 { 180 struct hl_info_hw_idle hw_idle = {0}; 181 u32 max_size = args->return_size; 182 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 183 184 if ((!max_size) || (!out)) 185 return -EINVAL; 186 187 hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev, 188 hw_idle.busy_engines_mask_ext, 189 HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL); 190 hw_idle.busy_engines_mask = 191 lower_32_bits(hw_idle.busy_engines_mask_ext[0]); 192 193 return copy_to_user(out, &hw_idle, 194 min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0; 195 } 196 197 static int debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, struct hl_debug_args *args) 198 { 199 struct hl_debug_params *params; 200 void *input = NULL, *output = NULL; 201 int rc; 202 203 params = kzalloc(sizeof(*params), GFP_KERNEL); 204 if (!params) 205 return -ENOMEM; 206 207 params->reg_idx = args->reg_idx; 208 params->enable = args->enable; 209 params->op = args->op; 210 211 if (args->input_ptr && args->input_size) { 212 input = kzalloc(hl_debug_struct_size[args->op], GFP_KERNEL); 213 if (!input) { 214 rc = -ENOMEM; 215 goto out; 216 } 217 218 if (copy_from_user(input, u64_to_user_ptr(args->input_ptr), 219 args->input_size)) { 220 rc = -EFAULT; 221 dev_err(hdev->dev, "failed to copy input debug data\n"); 222 goto out; 223 } 224 225 params->input = input; 226 } 227 228 if (args->output_ptr && args->output_size) { 229 output = kzalloc(args->output_size, GFP_KERNEL); 230 if (!output) { 231 rc = -ENOMEM; 232 goto out; 233 } 234 235 params->output = output; 236 params->output_size = args->output_size; 237 } 238 239 rc = hdev->asic_funcs->debug_coresight(hdev, ctx, params); 240 if (rc) { 241 dev_err(hdev->dev, 242 "debug coresight operation failed %d\n", rc); 243 goto out; 244 } 245 246 if (output && copy_to_user((void __user *) (uintptr_t) args->output_ptr, 247 output, args->output_size)) { 248 dev_err(hdev->dev, "copy to user failed in debug ioctl\n"); 249 rc = -EFAULT; 250 goto out; 251 } 252 253 254 out: 255 kfree(params); 256 kfree(output); 257 kfree(input); 258 259 return rc; 260 } 261 262 static int device_utilization(struct hl_device *hdev, struct hl_info_args *args) 263 { 264 struct hl_info_device_utilization device_util = {0}; 265 u32 max_size = args->return_size; 266 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 267 int rc; 268 269 if ((!max_size) || (!out)) 270 return -EINVAL; 271 272 rc = hl_device_utilization(hdev, &device_util.utilization); 273 if (rc) 274 return -EINVAL; 275 276 return copy_to_user(out, &device_util, 277 min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0; 278 } 279 280 static int get_clk_rate(struct hl_device *hdev, struct hl_info_args *args) 281 { 282 struct hl_info_clk_rate clk_rate = {0}; 283 u32 max_size = args->return_size; 284 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 285 int rc; 286 287 if ((!max_size) || (!out)) 288 return -EINVAL; 289 290 rc = hl_fw_get_clk_rate(hdev, &clk_rate.cur_clk_rate_mhz, &clk_rate.max_clk_rate_mhz); 291 if (rc) 292 return rc; 293 294 return copy_to_user(out, &clk_rate, min_t(size_t, max_size, sizeof(clk_rate))) 295 ? -EFAULT : 0; 296 } 297 298 static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args) 299 { 300 struct hl_info_reset_count reset_count = {0}; 301 u32 max_size = args->return_size; 302 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 303 304 if ((!max_size) || (!out)) 305 return -EINVAL; 306 307 reset_count.hard_reset_cnt = hdev->reset_info.hard_reset_cnt; 308 reset_count.soft_reset_cnt = hdev->reset_info.compute_reset_cnt; 309 310 return copy_to_user(out, &reset_count, 311 min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0; 312 } 313 314 static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args) 315 { 316 struct hl_info_time_sync time_sync = {0}; 317 u32 max_size = args->return_size; 318 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 319 320 if ((!max_size) || (!out)) 321 return -EINVAL; 322 323 time_sync.device_time = hdev->asic_funcs->get_device_time(hdev); 324 time_sync.host_time = ktime_get_raw_ns(); 325 time_sync.tsc_time = rdtsc(); 326 327 return copy_to_user(out, &time_sync, 328 min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0; 329 } 330 331 static int pci_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 332 { 333 struct hl_device *hdev = hpriv->hdev; 334 struct hl_info_pci_counters pci_counters = {0}; 335 u32 max_size = args->return_size; 336 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 337 int rc; 338 339 if ((!max_size) || (!out)) 340 return -EINVAL; 341 342 rc = hl_fw_cpucp_pci_counters_get(hdev, &pci_counters); 343 if (rc) 344 return rc; 345 346 return copy_to_user(out, &pci_counters, 347 min((size_t) max_size, sizeof(pci_counters))) ? -EFAULT : 0; 348 } 349 350 static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 351 { 352 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 353 struct hl_device *hdev = hpriv->hdev; 354 struct hl_info_clk_throttle clk_throttle = {0}; 355 ktime_t end_time, zero_time = ktime_set(0, 0); 356 u32 max_size = args->return_size; 357 int i; 358 359 if ((!max_size) || (!out)) 360 return -EINVAL; 361 362 mutex_lock(&hdev->clk_throttling.lock); 363 364 clk_throttle.clk_throttling_reason = hdev->clk_throttling.current_reason; 365 366 for (i = 0 ; i < HL_CLK_THROTTLE_TYPE_MAX ; i++) { 367 if (!(hdev->clk_throttling.aggregated_reason & BIT(i))) 368 continue; 369 370 clk_throttle.clk_throttling_timestamp_us[i] = 371 ktime_to_us(hdev->clk_throttling.timestamp[i].start); 372 373 if (ktime_compare(hdev->clk_throttling.timestamp[i].end, zero_time)) 374 end_time = hdev->clk_throttling.timestamp[i].end; 375 else 376 end_time = ktime_get(); 377 378 clk_throttle.clk_throttling_duration_ns[i] = 379 ktime_to_ns(ktime_sub(end_time, 380 hdev->clk_throttling.timestamp[i].start)); 381 382 } 383 mutex_unlock(&hdev->clk_throttling.lock); 384 385 return copy_to_user(out, &clk_throttle, 386 min((size_t) max_size, sizeof(clk_throttle))) ? -EFAULT : 0; 387 } 388 389 static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 390 { 391 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 392 struct hl_info_cs_counters cs_counters = {0}; 393 struct hl_device *hdev = hpriv->hdev; 394 struct hl_cs_counters_atomic *cntr; 395 u32 max_size = args->return_size; 396 397 cntr = &hdev->aggregated_cs_counters; 398 399 if ((!max_size) || (!out)) 400 return -EINVAL; 401 402 cs_counters.total_out_of_mem_drop_cnt = 403 atomic64_read(&cntr->out_of_mem_drop_cnt); 404 cs_counters.total_parsing_drop_cnt = 405 atomic64_read(&cntr->parsing_drop_cnt); 406 cs_counters.total_queue_full_drop_cnt = 407 atomic64_read(&cntr->queue_full_drop_cnt); 408 cs_counters.total_device_in_reset_drop_cnt = 409 atomic64_read(&cntr->device_in_reset_drop_cnt); 410 cs_counters.total_max_cs_in_flight_drop_cnt = 411 atomic64_read(&cntr->max_cs_in_flight_drop_cnt); 412 cs_counters.total_validation_drop_cnt = 413 atomic64_read(&cntr->validation_drop_cnt); 414 415 if (hpriv->ctx) { 416 cs_counters.ctx_out_of_mem_drop_cnt = 417 atomic64_read( 418 &hpriv->ctx->cs_counters.out_of_mem_drop_cnt); 419 cs_counters.ctx_parsing_drop_cnt = 420 atomic64_read( 421 &hpriv->ctx->cs_counters.parsing_drop_cnt); 422 cs_counters.ctx_queue_full_drop_cnt = 423 atomic64_read( 424 &hpriv->ctx->cs_counters.queue_full_drop_cnt); 425 cs_counters.ctx_device_in_reset_drop_cnt = 426 atomic64_read( 427 &hpriv->ctx->cs_counters.device_in_reset_drop_cnt); 428 cs_counters.ctx_max_cs_in_flight_drop_cnt = 429 atomic64_read( 430 &hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt); 431 cs_counters.ctx_validation_drop_cnt = 432 atomic64_read( 433 &hpriv->ctx->cs_counters.validation_drop_cnt); 434 } 435 436 return copy_to_user(out, &cs_counters, 437 min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0; 438 } 439 440 static int sync_manager_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 441 { 442 struct hl_device *hdev = hpriv->hdev; 443 struct asic_fixed_properties *prop = &hdev->asic_prop; 444 struct hl_info_sync_manager sm_info = {0}; 445 u32 max_size = args->return_size; 446 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 447 448 if ((!max_size) || (!out)) 449 return -EINVAL; 450 451 if (args->dcore_id >= HL_MAX_DCORES) 452 return -EINVAL; 453 454 sm_info.first_available_sync_object = 455 prop->first_available_user_sob[args->dcore_id]; 456 sm_info.first_available_monitor = 457 prop->first_available_user_mon[args->dcore_id]; 458 sm_info.first_available_cq = 459 prop->first_available_cq[args->dcore_id]; 460 461 return copy_to_user(out, &sm_info, min_t(size_t, (size_t) max_size, 462 sizeof(sm_info))) ? -EFAULT : 0; 463 } 464 465 static int total_energy_consumption_info(struct hl_fpriv *hpriv, 466 struct hl_info_args *args) 467 { 468 struct hl_device *hdev = hpriv->hdev; 469 struct hl_info_energy total_energy = {0}; 470 u32 max_size = args->return_size; 471 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 472 int rc; 473 474 if ((!max_size) || (!out)) 475 return -EINVAL; 476 477 rc = hl_fw_cpucp_total_energy_get(hdev, 478 &total_energy.total_energy_consumption); 479 if (rc) 480 return rc; 481 482 return copy_to_user(out, &total_energy, 483 min((size_t) max_size, sizeof(total_energy))) ? -EFAULT : 0; 484 } 485 486 static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 487 { 488 struct hl_device *hdev = hpriv->hdev; 489 struct hl_pll_frequency_info freq_info = { {0} }; 490 u32 max_size = args->return_size; 491 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 492 int rc; 493 494 if ((!max_size) || (!out)) 495 return -EINVAL; 496 497 rc = hl_fw_cpucp_pll_info_get(hdev, args->pll_index, freq_info.output); 498 if (rc) 499 return rc; 500 501 return copy_to_user(out, &freq_info, 502 min((size_t) max_size, sizeof(freq_info))) ? -EFAULT : 0; 503 } 504 505 static int power_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 506 { 507 struct hl_device *hdev = hpriv->hdev; 508 u32 max_size = args->return_size; 509 struct hl_power_info power_info = {0}; 510 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 511 int rc; 512 513 if ((!max_size) || (!out)) 514 return -EINVAL; 515 516 rc = hl_fw_cpucp_power_get(hdev, &power_info.power); 517 if (rc) 518 return rc; 519 520 return copy_to_user(out, &power_info, 521 min((size_t) max_size, sizeof(power_info))) ? -EFAULT : 0; 522 } 523 524 static int open_stats_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 525 { 526 struct hl_device *hdev = hpriv->hdev; 527 u32 max_size = args->return_size; 528 struct hl_open_stats_info open_stats_info = {0}; 529 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 530 531 if ((!max_size) || (!out)) 532 return -EINVAL; 533 534 open_stats_info.last_open_period_ms = jiffies64_to_msecs( 535 hdev->last_open_session_duration_jif); 536 open_stats_info.open_counter = hdev->open_counter; 537 open_stats_info.is_compute_ctx_active = hdev->is_compute_ctx_active; 538 open_stats_info.compute_ctx_in_release = hdev->compute_ctx_in_release; 539 540 return copy_to_user(out, &open_stats_info, 541 min((size_t) max_size, sizeof(open_stats_info))) ? -EFAULT : 0; 542 } 543 544 static int dram_pending_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 545 { 546 struct hl_device *hdev = hpriv->hdev; 547 u32 max_size = args->return_size; 548 u32 pend_rows_num = 0; 549 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 550 int rc; 551 552 if ((!max_size) || (!out)) 553 return -EINVAL; 554 555 rc = hl_fw_dram_pending_row_get(hdev, &pend_rows_num); 556 if (rc) 557 return rc; 558 559 return copy_to_user(out, &pend_rows_num, 560 min_t(size_t, max_size, sizeof(pend_rows_num))) ? -EFAULT : 0; 561 } 562 563 static int dram_replaced_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 564 { 565 struct hl_device *hdev = hpriv->hdev; 566 u32 max_size = args->return_size; 567 struct cpucp_hbm_row_info info = {0}; 568 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 569 int rc; 570 571 if ((!max_size) || (!out)) 572 return -EINVAL; 573 574 rc = hl_fw_dram_replaced_row_get(hdev, &info); 575 if (rc) 576 return rc; 577 578 return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; 579 } 580 581 static int last_err_open_dev_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 582 { 583 struct hl_info_last_err_open_dev_time info = {0}; 584 struct hl_device *hdev = hpriv->hdev; 585 u32 max_size = args->return_size; 586 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 587 588 if ((!max_size) || (!out)) 589 return -EINVAL; 590 591 info.timestamp = ktime_to_ns(hdev->last_successful_open_ktime); 592 593 return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; 594 } 595 596 static int cs_timeout_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 597 { 598 struct hl_info_cs_timeout_event info = {0}; 599 struct hl_device *hdev = hpriv->hdev; 600 u32 max_size = args->return_size; 601 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 602 603 if ((!max_size) || (!out)) 604 return -EINVAL; 605 606 info.seq = hdev->captured_err_info.cs_timeout.seq; 607 info.timestamp = ktime_to_ns(hdev->captured_err_info.cs_timeout.timestamp); 608 609 return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; 610 } 611 612 static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 613 { 614 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 615 struct hl_device *hdev = hpriv->hdev; 616 u32 max_size = args->return_size; 617 struct razwi_info *razwi_info; 618 619 if ((!max_size) || (!out)) 620 return -EINVAL; 621 622 razwi_info = &hdev->captured_err_info.razwi_info; 623 if (!razwi_info->razwi_info_available) 624 return 0; 625 626 return copy_to_user(out, &razwi_info->razwi, 627 min_t(size_t, max_size, sizeof(struct hl_info_razwi_event))) ? -EFAULT : 0; 628 } 629 630 static int undefined_opcode_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 631 { 632 struct hl_device *hdev = hpriv->hdev; 633 u32 max_size = args->return_size; 634 struct hl_info_undefined_opcode_event info = {0}; 635 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 636 637 if ((!max_size) || (!out)) 638 return -EINVAL; 639 640 info.timestamp = ktime_to_ns(hdev->captured_err_info.undef_opcode.timestamp); 641 info.engine_id = hdev->captured_err_info.undef_opcode.engine_id; 642 info.cq_addr = hdev->captured_err_info.undef_opcode.cq_addr; 643 info.cq_size = hdev->captured_err_info.undef_opcode.cq_size; 644 info.stream_id = hdev->captured_err_info.undef_opcode.stream_id; 645 info.cb_addr_streams_len = hdev->captured_err_info.undef_opcode.cb_addr_streams_len; 646 memcpy(info.cb_addr_streams, hdev->captured_err_info.undef_opcode.cb_addr_streams, 647 sizeof(info.cb_addr_streams)); 648 649 return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; 650 } 651 652 static int dev_mem_alloc_page_sizes_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 653 { 654 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 655 struct hl_info_dev_memalloc_page_sizes info = {0}; 656 struct hl_device *hdev = hpriv->hdev; 657 u32 max_size = args->return_size; 658 659 if ((!max_size) || (!out)) 660 return -EINVAL; 661 662 /* 663 * Future ASICs that will support multiple DRAM page sizes will support only "powers of 2" 664 * pages (unlike some of the ASICs before supporting multiple page sizes). 665 * For this reason for all ASICs that not support multiple page size the function will 666 * return an empty bitmask indicating that multiple page sizes is not supported. 667 */ 668 info.page_order_bitmask = hdev->asic_prop.dmmu.supported_pages_mask; 669 670 return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; 671 } 672 673 static int sec_attest_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 674 { 675 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 676 struct cpucp_sec_attest_info *sec_attest_info; 677 struct hl_info_sec_attest *info; 678 u32 max_size = args->return_size; 679 int rc; 680 681 if ((!max_size) || (!out)) 682 return -EINVAL; 683 684 sec_attest_info = kmalloc(sizeof(*sec_attest_info), GFP_KERNEL); 685 if (!sec_attest_info) 686 return -ENOMEM; 687 688 info = kmalloc(sizeof(*info), GFP_KERNEL); 689 if (!info) { 690 rc = -ENOMEM; 691 goto free_sec_attest_info; 692 } 693 694 rc = hl_fw_get_sec_attest_info(hpriv->hdev, sec_attest_info, args->sec_attest_nonce); 695 if (rc) 696 goto free_info; 697 698 info->nonce = le32_to_cpu(sec_attest_info->nonce); 699 info->pcr_quote_len = le16_to_cpu(sec_attest_info->pcr_quote_len); 700 info->pub_data_len = le16_to_cpu(sec_attest_info->pub_data_len); 701 info->certificate_len = le16_to_cpu(sec_attest_info->certificate_len); 702 info->pcr_num_reg = sec_attest_info->pcr_num_reg; 703 info->pcr_reg_len = sec_attest_info->pcr_reg_len; 704 info->quote_sig_len = sec_attest_info->quote_sig_len; 705 memcpy(&info->pcr_data, &sec_attest_info->pcr_data, sizeof(info->pcr_data)); 706 memcpy(&info->pcr_quote, &sec_attest_info->pcr_quote, sizeof(info->pcr_quote)); 707 memcpy(&info->public_data, &sec_attest_info->public_data, sizeof(info->public_data)); 708 memcpy(&info->certificate, &sec_attest_info->certificate, sizeof(info->certificate)); 709 memcpy(&info->quote_sig, &sec_attest_info->quote_sig, sizeof(info->quote_sig)); 710 711 rc = copy_to_user(out, info, 712 min_t(size_t, max_size, sizeof(*info))) ? -EFAULT : 0; 713 714 free_info: 715 kfree(info); 716 free_sec_attest_info: 717 kfree(sec_attest_info); 718 719 return rc; 720 } 721 722 static int eventfd_register(struct hl_fpriv *hpriv, struct hl_info_args *args) 723 { 724 int rc; 725 726 /* check if there is already a registered on that process */ 727 mutex_lock(&hpriv->notifier_event.lock); 728 if (hpriv->notifier_event.eventfd) { 729 mutex_unlock(&hpriv->notifier_event.lock); 730 return -EINVAL; 731 } 732 733 hpriv->notifier_event.eventfd = eventfd_ctx_fdget(args->eventfd); 734 if (IS_ERR(hpriv->notifier_event.eventfd)) { 735 rc = PTR_ERR(hpriv->notifier_event.eventfd); 736 hpriv->notifier_event.eventfd = NULL; 737 mutex_unlock(&hpriv->notifier_event.lock); 738 return rc; 739 } 740 741 mutex_unlock(&hpriv->notifier_event.lock); 742 return 0; 743 } 744 745 static int eventfd_unregister(struct hl_fpriv *hpriv, struct hl_info_args *args) 746 { 747 mutex_lock(&hpriv->notifier_event.lock); 748 if (!hpriv->notifier_event.eventfd) { 749 mutex_unlock(&hpriv->notifier_event.lock); 750 return -EINVAL; 751 } 752 753 eventfd_ctx_put(hpriv->notifier_event.eventfd); 754 hpriv->notifier_event.eventfd = NULL; 755 mutex_unlock(&hpriv->notifier_event.lock); 756 return 0; 757 } 758 759 static int engine_status_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 760 { 761 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 762 u32 status_buf_size = args->return_size; 763 struct hl_device *hdev = hpriv->hdev; 764 struct engines_data eng_data; 765 int rc; 766 767 if ((status_buf_size < SZ_1K) || (status_buf_size > HL_ENGINES_DATA_MAX_SIZE) || (!out)) 768 return -EINVAL; 769 770 eng_data.actual_size = 0; 771 eng_data.allocated_buf_size = status_buf_size; 772 eng_data.buf = vmalloc(status_buf_size); 773 if (!eng_data.buf) 774 return -ENOMEM; 775 776 hdev->asic_funcs->is_device_idle(hdev, NULL, 0, &eng_data); 777 778 if (eng_data.actual_size > eng_data.allocated_buf_size) { 779 dev_err(hdev->dev, 780 "Engines data size (%d Bytes) is bigger than allocated size (%u Bytes)\n", 781 eng_data.actual_size, status_buf_size); 782 vfree(eng_data.buf); 783 return -ENOMEM; 784 } 785 786 args->user_buffer_actual_size = eng_data.actual_size; 787 rc = copy_to_user(out, eng_data.buf, min_t(size_t, status_buf_size, eng_data.actual_size)) ? 788 -EFAULT : 0; 789 790 vfree(eng_data.buf); 791 792 return rc; 793 } 794 795 static int page_fault_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 796 { 797 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 798 struct hl_device *hdev = hpriv->hdev; 799 u32 max_size = args->return_size; 800 struct page_fault_info *pgf_info; 801 802 if ((!max_size) || (!out)) 803 return -EINVAL; 804 805 pgf_info = &hdev->captured_err_info.page_fault_info; 806 if (!pgf_info->page_fault_info_available) 807 return 0; 808 809 return copy_to_user(out, &pgf_info->page_fault, 810 min_t(size_t, max_size, sizeof(struct hl_page_fault_info))) ? -EFAULT : 0; 811 } 812 813 static int user_mappings_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 814 { 815 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 816 u32 user_buf_size = args->return_size; 817 struct hl_device *hdev = hpriv->hdev; 818 struct page_fault_info *pgf_info; 819 u64 actual_size; 820 821 if (!out) 822 return -EINVAL; 823 824 pgf_info = &hdev->captured_err_info.page_fault_info; 825 if (!pgf_info->page_fault_info_available) 826 return 0; 827 828 args->array_size = pgf_info->num_of_user_mappings; 829 830 actual_size = pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping); 831 if (user_buf_size < actual_size) 832 return -ENOMEM; 833 834 return copy_to_user(out, pgf_info->user_mappings, actual_size) ? -EFAULT : 0; 835 } 836 837 static int hw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 838 { 839 void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer; 840 struct hl_device *hdev = hpriv->hdev; 841 u32 user_buf_size = args->return_size; 842 struct hw_err_info *info; 843 int rc; 844 845 if (!user_buf) 846 return -EINVAL; 847 848 info = &hdev->captured_err_info.hw_err; 849 if (!info->event_info_available) 850 return 0; 851 852 if (user_buf_size < sizeof(struct hl_info_hw_err_event)) 853 return -ENOMEM; 854 855 rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_hw_err_event)); 856 return rc ? -EFAULT : 0; 857 } 858 859 static int fw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 860 { 861 void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer; 862 struct hl_device *hdev = hpriv->hdev; 863 u32 user_buf_size = args->return_size; 864 struct fw_err_info *info; 865 int rc; 866 867 if (!user_buf) 868 return -EINVAL; 869 870 info = &hdev->captured_err_info.fw_err; 871 if (!info->event_info_available) 872 return 0; 873 874 if (user_buf_size < sizeof(struct hl_info_fw_err_event)) 875 return -ENOMEM; 876 877 rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_fw_err_event)); 878 return rc ? -EFAULT : 0; 879 } 880 881 static int engine_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 882 { 883 void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer; 884 struct hl_device *hdev = hpriv->hdev; 885 u32 user_buf_size = args->return_size; 886 struct engine_err_info *info; 887 int rc; 888 889 if (!user_buf) 890 return -EINVAL; 891 892 info = &hdev->captured_err_info.engine_err; 893 if (!info->event_info_available) 894 return 0; 895 896 if (user_buf_size < sizeof(struct hl_info_engine_err_event)) 897 return -ENOMEM; 898 899 rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_engine_err_event)); 900 return rc ? -EFAULT : 0; 901 } 902 903 static int send_fw_generic_request(struct hl_device *hdev, struct hl_info_args *info_args) 904 { 905 void __user *buff = (void __user *) (uintptr_t) info_args->return_pointer; 906 u32 size = info_args->return_size; 907 dma_addr_t dma_handle; 908 bool need_input_buff; 909 void *fw_buff; 910 int rc = 0; 911 912 switch (info_args->fw_sub_opcode) { 913 case HL_PASSTHROUGH_VERSIONS: 914 need_input_buff = false; 915 break; 916 default: 917 return -EINVAL; 918 } 919 920 if (size > SZ_1M) { 921 dev_err(hdev->dev, "buffer size cannot exceed 1MB\n"); 922 return -EINVAL; 923 } 924 925 fw_buff = hl_cpu_accessible_dma_pool_alloc(hdev, size, &dma_handle); 926 if (!fw_buff) 927 return -ENOMEM; 928 929 930 if (need_input_buff && copy_from_user(fw_buff, buff, size)) { 931 dev_dbg(hdev->dev, "Failed to copy from user FW buff\n"); 932 rc = -EFAULT; 933 goto free_buff; 934 } 935 936 rc = hl_fw_send_generic_request(hdev, info_args->fw_sub_opcode, dma_handle, &size); 937 if (rc) 938 goto free_buff; 939 940 if (copy_to_user(buff, fw_buff, min(size, info_args->return_size))) { 941 dev_dbg(hdev->dev, "Failed to copy to user FW generic req output\n"); 942 rc = -EFAULT; 943 } 944 945 free_buff: 946 hl_cpu_accessible_dma_pool_free(hdev, info_args->return_size, fw_buff); 947 948 return rc; 949 } 950 951 static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, 952 struct device *dev) 953 { 954 enum hl_device_status status; 955 struct hl_info_args *args = data; 956 struct hl_device *hdev = hpriv->hdev; 957 int rc; 958 959 if (args->pad) { 960 dev_dbg(hdev->dev, "Padding bytes must be 0\n"); 961 return -EINVAL; 962 } 963 964 /* 965 * Information is returned for the following opcodes even if the device 966 * is disabled or in reset. 967 */ 968 switch (args->op) { 969 case HL_INFO_HW_IP_INFO: 970 return hw_ip_info(hdev, args); 971 972 case HL_INFO_DEVICE_STATUS: 973 return device_status_info(hdev, args); 974 975 case HL_INFO_RESET_COUNT: 976 return get_reset_count(hdev, args); 977 978 case HL_INFO_HW_EVENTS: 979 return hw_events_info(hdev, false, args); 980 981 case HL_INFO_HW_EVENTS_AGGREGATE: 982 return hw_events_info(hdev, true, args); 983 984 case HL_INFO_CS_COUNTERS: 985 return cs_counters_info(hpriv, args); 986 987 case HL_INFO_CLK_THROTTLE_REASON: 988 return clk_throttle_info(hpriv, args); 989 990 case HL_INFO_SYNC_MANAGER: 991 return sync_manager_info(hpriv, args); 992 993 case HL_INFO_OPEN_STATS: 994 return open_stats_info(hpriv, args); 995 996 case HL_INFO_LAST_ERR_OPEN_DEV_TIME: 997 return last_err_open_dev_info(hpriv, args); 998 999 case HL_INFO_CS_TIMEOUT_EVENT: 1000 return cs_timeout_info(hpriv, args); 1001 1002 case HL_INFO_RAZWI_EVENT: 1003 return razwi_info(hpriv, args); 1004 1005 case HL_INFO_UNDEFINED_OPCODE_EVENT: 1006 return undefined_opcode_info(hpriv, args); 1007 1008 case HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES: 1009 return dev_mem_alloc_page_sizes_info(hpriv, args); 1010 1011 case HL_INFO_GET_EVENTS: 1012 return events_info(hpriv, args); 1013 1014 case HL_INFO_PAGE_FAULT_EVENT: 1015 return page_fault_info(hpriv, args); 1016 1017 case HL_INFO_USER_MAPPINGS: 1018 return user_mappings_info(hpriv, args); 1019 1020 case HL_INFO_UNREGISTER_EVENTFD: 1021 return eventfd_unregister(hpriv, args); 1022 1023 case HL_INFO_HW_ERR_EVENT: 1024 return hw_err_info(hpriv, args); 1025 1026 case HL_INFO_FW_ERR_EVENT: 1027 return fw_err_info(hpriv, args); 1028 1029 case HL_INFO_USER_ENGINE_ERR_EVENT: 1030 return engine_err_info(hpriv, args); 1031 1032 case HL_INFO_DRAM_USAGE: 1033 return dram_usage_info(hpriv, args); 1034 default: 1035 break; 1036 } 1037 1038 if (!hl_device_operational(hdev, &status)) { 1039 dev_dbg_ratelimited(dev, 1040 "Device is %s. Can't execute INFO IOCTL\n", 1041 hdev->status[status]); 1042 return -EBUSY; 1043 } 1044 1045 switch (args->op) { 1046 case HL_INFO_HW_IDLE: 1047 rc = hw_idle(hdev, args); 1048 break; 1049 1050 case HL_INFO_DEVICE_UTILIZATION: 1051 rc = device_utilization(hdev, args); 1052 break; 1053 1054 case HL_INFO_CLK_RATE: 1055 rc = get_clk_rate(hdev, args); 1056 break; 1057 1058 case HL_INFO_TIME_SYNC: 1059 return time_sync_info(hdev, args); 1060 1061 case HL_INFO_PCI_COUNTERS: 1062 return pci_counters_info(hpriv, args); 1063 1064 case HL_INFO_TOTAL_ENERGY: 1065 return total_energy_consumption_info(hpriv, args); 1066 1067 case HL_INFO_PLL_FREQUENCY: 1068 return pll_frequency_info(hpriv, args); 1069 1070 case HL_INFO_POWER: 1071 return power_info(hpriv, args); 1072 1073 1074 case HL_INFO_DRAM_REPLACED_ROWS: 1075 return dram_replaced_rows_info(hpriv, args); 1076 1077 case HL_INFO_DRAM_PENDING_ROWS: 1078 return dram_pending_rows_info(hpriv, args); 1079 1080 case HL_INFO_SECURED_ATTESTATION: 1081 return sec_attest_info(hpriv, args); 1082 1083 case HL_INFO_REGISTER_EVENTFD: 1084 return eventfd_register(hpriv, args); 1085 1086 case HL_INFO_ENGINE_STATUS: 1087 return engine_status_info(hpriv, args); 1088 1089 case HL_INFO_FW_GENERIC_REQ: 1090 return send_fw_generic_request(hdev, args); 1091 1092 default: 1093 dev_err(dev, "Invalid request %d\n", args->op); 1094 rc = -EINVAL; 1095 break; 1096 } 1097 1098 return rc; 1099 } 1100 1101 int hl_info_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv) 1102 { 1103 struct hl_fpriv *hpriv = file_priv->driver_priv; 1104 1105 return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev); 1106 } 1107 1108 static int hl_info_ioctl_control(struct hl_fpriv *hpriv, void *data) 1109 { 1110 struct hl_info_args *args = data; 1111 1112 switch (args->op) { 1113 case HL_INFO_GET_EVENTS: 1114 case HL_INFO_UNREGISTER_EVENTFD: 1115 case HL_INFO_REGISTER_EVENTFD: 1116 return -EOPNOTSUPP; 1117 default: 1118 break; 1119 } 1120 1121 return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev_ctrl); 1122 } 1123 1124 int hl_debug_ioctl(struct drm_device *ddev, void *data, struct drm_file *file_priv) 1125 { 1126 struct hl_fpriv *hpriv = file_priv->driver_priv; 1127 struct hl_device *hdev = hpriv->hdev; 1128 struct hl_debug_args *args = data; 1129 enum hl_device_status status; 1130 1131 int rc = 0; 1132 1133 if (!hl_device_operational(hdev, &status)) { 1134 dev_dbg_ratelimited(hdev->dev, 1135 "Device is %s. Can't execute DEBUG IOCTL\n", 1136 hdev->status[status]); 1137 return -EBUSY; 1138 } 1139 1140 switch (args->op) { 1141 case HL_DEBUG_OP_ETR: 1142 case HL_DEBUG_OP_ETF: 1143 case HL_DEBUG_OP_STM: 1144 case HL_DEBUG_OP_FUNNEL: 1145 case HL_DEBUG_OP_BMON: 1146 case HL_DEBUG_OP_SPMU: 1147 case HL_DEBUG_OP_TIMESTAMP: 1148 if (!hdev->in_debug) { 1149 dev_err_ratelimited(hdev->dev, 1150 "Rejecting debug configuration request because device not in debug mode\n"); 1151 return -EFAULT; 1152 } 1153 args->input_size = min(args->input_size, hl_debug_struct_size[args->op]); 1154 rc = debug_coresight(hdev, hpriv->ctx, args); 1155 break; 1156 1157 case HL_DEBUG_OP_SET_MODE: 1158 rc = hl_device_set_debug_mode(hdev, hpriv->ctx, (bool) args->enable); 1159 break; 1160 1161 default: 1162 dev_err(hdev->dev, "Invalid request %d\n", args->op); 1163 rc = -EINVAL; 1164 break; 1165 } 1166 1167 return rc; 1168 } 1169 1170 #define HL_IOCTL_DEF(ioctl, _func) \ 1171 [_IOC_NR(ioctl) - HL_COMMAND_START] = {.cmd = ioctl, .func = _func} 1172 1173 static const struct hl_ioctl_desc hl_ioctls_control[] = { 1174 HL_IOCTL_DEF(DRM_IOCTL_HL_INFO, hl_info_ioctl_control) 1175 }; 1176 1177 static long _hl_ioctl(struct hl_fpriv *hpriv, unsigned int cmd, unsigned long arg, 1178 const struct hl_ioctl_desc *ioctl, struct device *dev) 1179 { 1180 unsigned int nr = _IOC_NR(cmd); 1181 char stack_kdata[128] = {0}; 1182 char *kdata = NULL; 1183 unsigned int usize, asize; 1184 hl_ioctl_t *func; 1185 u32 hl_size; 1186 int retcode; 1187 1188 /* Do not trust userspace, use our own definition */ 1189 func = ioctl->func; 1190 1191 if (unlikely(!func)) { 1192 dev_dbg(dev, "no function\n"); 1193 retcode = -ENOTTY; 1194 goto out_err; 1195 } 1196 1197 hl_size = _IOC_SIZE(ioctl->cmd); 1198 usize = asize = _IOC_SIZE(cmd); 1199 if (hl_size > asize) 1200 asize = hl_size; 1201 1202 cmd = ioctl->cmd; 1203 1204 if (cmd & (IOC_IN | IOC_OUT)) { 1205 if (asize <= sizeof(stack_kdata)) { 1206 kdata = stack_kdata; 1207 } else { 1208 kdata = kzalloc(asize, GFP_KERNEL); 1209 if (!kdata) { 1210 retcode = -ENOMEM; 1211 goto out_err; 1212 } 1213 } 1214 } 1215 1216 if (cmd & IOC_IN) { 1217 if (copy_from_user(kdata, (void __user *)arg, usize)) { 1218 retcode = -EFAULT; 1219 goto out_err; 1220 } 1221 } 1222 1223 retcode = func(hpriv, kdata); 1224 1225 if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize)) 1226 retcode = -EFAULT; 1227 1228 out_err: 1229 if (retcode) { 1230 char task_comm[TASK_COMM_LEN]; 1231 1232 dev_dbg_ratelimited(dev, 1233 "error in ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n", 1234 task_pid_nr(current), get_task_comm(task_comm, current), cmd, nr); 1235 } 1236 1237 if (kdata != stack_kdata) 1238 kfree(kdata); 1239 1240 return retcode; 1241 } 1242 1243 long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg) 1244 { 1245 struct hl_fpriv *hpriv = filep->private_data; 1246 struct hl_device *hdev = hpriv->hdev; 1247 const struct hl_ioctl_desc *ioctl = NULL; 1248 unsigned int nr = _IOC_NR(cmd); 1249 1250 if (!hdev) { 1251 pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n"); 1252 return -ENODEV; 1253 } 1254 1255 if (nr == _IOC_NR(DRM_IOCTL_HL_INFO)) { 1256 ioctl = &hl_ioctls_control[nr - HL_COMMAND_START]; 1257 } else { 1258 char task_comm[TASK_COMM_LEN]; 1259 1260 dev_dbg_ratelimited(hdev->dev_ctrl, 1261 "invalid ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n", 1262 task_pid_nr(current), get_task_comm(task_comm, current), cmd, nr); 1263 return -ENOTTY; 1264 } 1265 1266 return _hl_ioctl(hpriv, cmd, arg, ioctl, hdev->dev_ctrl); 1267 } 1268