xref: /linux/drivers/accel/habanalabs/common/habanalabs_ioctl.c (revision 0526b56cbc3c489642bd6a5fe4b718dea7ef0ee8)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2016-2022 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #define pr_fmt(fmt)	"habanalabs: " fmt
9 
10 #include <uapi/drm/habanalabs_accel.h>
11 #include "habanalabs.h"
12 
13 #include <linux/fs.h>
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/uaccess.h>
18 #include <linux/vmalloc.h>
19 
20 static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = {
21 	[HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr),
22 	[HL_DEBUG_OP_ETF] = sizeof(struct hl_debug_params_etf),
23 	[HL_DEBUG_OP_STM] = sizeof(struct hl_debug_params_stm),
24 	[HL_DEBUG_OP_FUNNEL] = 0,
25 	[HL_DEBUG_OP_BMON] = sizeof(struct hl_debug_params_bmon),
26 	[HL_DEBUG_OP_SPMU] = sizeof(struct hl_debug_params_spmu),
27 	[HL_DEBUG_OP_TIMESTAMP] = 0
28 
29 };
30 
31 static int device_status_info(struct hl_device *hdev, struct hl_info_args *args)
32 {
33 	struct hl_info_device_status dev_stat = {0};
34 	u32 size = args->return_size;
35 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
36 
37 	if ((!size) || (!out))
38 		return -EINVAL;
39 
40 	dev_stat.status = hl_device_status(hdev);
41 
42 	return copy_to_user(out, &dev_stat,
43 			min((size_t)size, sizeof(dev_stat))) ? -EFAULT : 0;
44 }
45 
46 static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
47 {
48 	struct hl_info_hw_ip_info hw_ip = {0};
49 	u32 size = args->return_size;
50 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
51 	struct asic_fixed_properties *prop = &hdev->asic_prop;
52 	u64 sram_kmd_size, dram_kmd_size, dram_available_size;
53 
54 	if ((!size) || (!out))
55 		return -EINVAL;
56 
57 	sram_kmd_size = (prop->sram_user_base_address -
58 				prop->sram_base_address);
59 	dram_kmd_size = (prop->dram_user_base_address -
60 				prop->dram_base_address);
61 
62 	hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev);
63 	hw_ip.sram_base_address = prop->sram_user_base_address;
64 	hw_ip.dram_base_address =
65 			hdev->mmu_enable && prop->dram_supports_virtual_memory ?
66 			prop->dmmu.start_addr : prop->dram_user_base_address;
67 	hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask & 0xFF;
68 	hw_ip.tpc_enabled_mask_ext = prop->tpc_enabled_mask;
69 
70 	hw_ip.sram_size = prop->sram_size - sram_kmd_size;
71 
72 	dram_available_size = prop->dram_size - dram_kmd_size;
73 
74 	if (hdev->mmu_enable == MMU_EN_ALL)
75 		hw_ip.dram_size = DIV_ROUND_DOWN_ULL(dram_available_size,
76 				prop->dram_page_size) * prop->dram_page_size;
77 	else
78 		hw_ip.dram_size = dram_available_size;
79 
80 	if (hw_ip.dram_size > PAGE_SIZE)
81 		hw_ip.dram_enabled = 1;
82 
83 	hw_ip.dram_page_size = prop->dram_page_size;
84 	hw_ip.device_mem_alloc_default_page_size = prop->device_mem_alloc_default_page_size;
85 	hw_ip.num_of_events = prop->num_of_events;
86 
87 	memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version,
88 		min(VERSION_MAX_LEN, HL_INFO_VERSION_MAX_LEN));
89 
90 	memcpy(hw_ip.card_name, prop->cpucp_info.card_name,
91 		min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN));
92 
93 	hw_ip.cpld_version = le32_to_cpu(prop->cpucp_info.cpld_version);
94 	hw_ip.module_id = le32_to_cpu(prop->cpucp_info.card_location);
95 
96 	hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
97 	hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
98 	hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
99 	hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor;
100 
101 	hw_ip.decoder_enabled_mask = prop->decoder_enabled_mask;
102 	hw_ip.mme_master_slave_mode = prop->mme_master_slave_mode;
103 	hw_ip.first_available_interrupt_id = prop->first_available_user_interrupt;
104 	hw_ip.number_of_user_interrupts = prop->user_interrupt_count;
105 	hw_ip.tpc_interrupt_id = prop->tpc_interrupt_id;
106 
107 	hw_ip.edma_enabled_mask = prop->edma_enabled_mask;
108 	hw_ip.server_type = prop->server_type;
109 	hw_ip.security_enabled = prop->fw_security_enabled;
110 	hw_ip.revision_id = hdev->pdev->revision;
111 	hw_ip.rotator_enabled_mask = prop->rotator_enabled_mask;
112 	hw_ip.engine_core_interrupt_reg_addr = prop->engine_core_interrupt_reg_addr;
113 	hw_ip.reserved_dram_size = dram_kmd_size;
114 
115 	return copy_to_user(out, &hw_ip,
116 		min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0;
117 }
118 
119 static int hw_events_info(struct hl_device *hdev, bool aggregate,
120 			struct hl_info_args *args)
121 {
122 	u32 size, max_size = args->return_size;
123 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
124 	void *arr;
125 
126 	if ((!max_size) || (!out))
127 		return -EINVAL;
128 
129 	arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size);
130 	if (!arr) {
131 		dev_err(hdev->dev, "Events info not supported\n");
132 		return -EOPNOTSUPP;
133 	}
134 
135 	return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0;
136 }
137 
138 static int events_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
139 {
140 	u32 max_size = args->return_size;
141 	u64 events_mask;
142 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
143 
144 	if ((max_size < sizeof(u64)) || (!out))
145 		return -EINVAL;
146 
147 	mutex_lock(&hpriv->notifier_event.lock);
148 	events_mask = hpriv->notifier_event.events_mask;
149 	hpriv->notifier_event.events_mask = 0;
150 	mutex_unlock(&hpriv->notifier_event.lock);
151 
152 	return copy_to_user(out, &events_mask, sizeof(u64)) ? -EFAULT : 0;
153 }
154 
155 static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
156 {
157 	struct hl_device *hdev = hpriv->hdev;
158 	struct hl_info_dram_usage dram_usage = {0};
159 	u32 max_size = args->return_size;
160 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
161 	struct asic_fixed_properties *prop = &hdev->asic_prop;
162 	u64 dram_kmd_size;
163 
164 	if ((!max_size) || (!out))
165 		return -EINVAL;
166 
167 	dram_kmd_size = (prop->dram_user_base_address -
168 				prop->dram_base_address);
169 	dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) -
170 					atomic64_read(&hdev->dram_used_mem);
171 	if (hpriv->ctx)
172 		dram_usage.ctx_dram_mem =
173 			atomic64_read(&hpriv->ctx->dram_phys_mem);
174 
175 	return copy_to_user(out, &dram_usage,
176 		min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0;
177 }
178 
179 static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
180 {
181 	struct hl_info_hw_idle hw_idle = {0};
182 	u32 max_size = args->return_size;
183 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
184 
185 	if ((!max_size) || (!out))
186 		return -EINVAL;
187 
188 	hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
189 					hw_idle.busy_engines_mask_ext,
190 					HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);
191 	hw_idle.busy_engines_mask =
192 			lower_32_bits(hw_idle.busy_engines_mask_ext[0]);
193 
194 	return copy_to_user(out, &hw_idle,
195 		min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
196 }
197 
198 static int debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, struct hl_debug_args *args)
199 {
200 	struct hl_debug_params *params;
201 	void *input = NULL, *output = NULL;
202 	int rc;
203 
204 	params = kzalloc(sizeof(*params), GFP_KERNEL);
205 	if (!params)
206 		return -ENOMEM;
207 
208 	params->reg_idx = args->reg_idx;
209 	params->enable = args->enable;
210 	params->op = args->op;
211 
212 	if (args->input_ptr && args->input_size) {
213 		input = kzalloc(hl_debug_struct_size[args->op], GFP_KERNEL);
214 		if (!input) {
215 			rc = -ENOMEM;
216 			goto out;
217 		}
218 
219 		if (copy_from_user(input, u64_to_user_ptr(args->input_ptr),
220 					args->input_size)) {
221 			rc = -EFAULT;
222 			dev_err(hdev->dev, "failed to copy input debug data\n");
223 			goto out;
224 		}
225 
226 		params->input = input;
227 	}
228 
229 	if (args->output_ptr && args->output_size) {
230 		output = kzalloc(args->output_size, GFP_KERNEL);
231 		if (!output) {
232 			rc = -ENOMEM;
233 			goto out;
234 		}
235 
236 		params->output = output;
237 		params->output_size = args->output_size;
238 	}
239 
240 	rc = hdev->asic_funcs->debug_coresight(hdev, ctx, params);
241 	if (rc) {
242 		dev_err(hdev->dev,
243 			"debug coresight operation failed %d\n", rc);
244 		goto out;
245 	}
246 
247 	if (output && copy_to_user((void __user *) (uintptr_t) args->output_ptr,
248 					output, args->output_size)) {
249 		dev_err(hdev->dev, "copy to user failed in debug ioctl\n");
250 		rc = -EFAULT;
251 		goto out;
252 	}
253 
254 
255 out:
256 	kfree(params);
257 	kfree(output);
258 	kfree(input);
259 
260 	return rc;
261 }
262 
263 static int device_utilization(struct hl_device *hdev, struct hl_info_args *args)
264 {
265 	struct hl_info_device_utilization device_util = {0};
266 	u32 max_size = args->return_size;
267 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
268 	int rc;
269 
270 	if ((!max_size) || (!out))
271 		return -EINVAL;
272 
273 	rc = hl_device_utilization(hdev, &device_util.utilization);
274 	if (rc)
275 		return -EINVAL;
276 
277 	return copy_to_user(out, &device_util,
278 		min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0;
279 }
280 
281 static int get_clk_rate(struct hl_device *hdev, struct hl_info_args *args)
282 {
283 	struct hl_info_clk_rate clk_rate = {0};
284 	u32 max_size = args->return_size;
285 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
286 	int rc;
287 
288 	if ((!max_size) || (!out))
289 		return -EINVAL;
290 
291 	rc = hl_fw_get_clk_rate(hdev, &clk_rate.cur_clk_rate_mhz, &clk_rate.max_clk_rate_mhz);
292 	if (rc)
293 		return rc;
294 
295 	return copy_to_user(out, &clk_rate, min_t(size_t, max_size, sizeof(clk_rate)))
296 										? -EFAULT : 0;
297 }
298 
299 static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args)
300 {
301 	struct hl_info_reset_count reset_count = {0};
302 	u32 max_size = args->return_size;
303 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
304 
305 	if ((!max_size) || (!out))
306 		return -EINVAL;
307 
308 	reset_count.hard_reset_cnt = hdev->reset_info.hard_reset_cnt;
309 	reset_count.soft_reset_cnt = hdev->reset_info.compute_reset_cnt;
310 
311 	return copy_to_user(out, &reset_count,
312 		min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0;
313 }
314 
315 static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args)
316 {
317 	struct hl_info_time_sync time_sync = {0};
318 	u32 max_size = args->return_size;
319 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
320 
321 	if ((!max_size) || (!out))
322 		return -EINVAL;
323 
324 	time_sync.device_time = hdev->asic_funcs->get_device_time(hdev);
325 	time_sync.host_time = ktime_get_raw_ns();
326 
327 	return copy_to_user(out, &time_sync,
328 		min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0;
329 }
330 
331 static int pci_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
332 {
333 	struct hl_device *hdev = hpriv->hdev;
334 	struct hl_info_pci_counters pci_counters = {0};
335 	u32 max_size = args->return_size;
336 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
337 	int rc;
338 
339 	if ((!max_size) || (!out))
340 		return -EINVAL;
341 
342 	rc = hl_fw_cpucp_pci_counters_get(hdev, &pci_counters);
343 	if (rc)
344 		return rc;
345 
346 	return copy_to_user(out, &pci_counters,
347 		min((size_t) max_size, sizeof(pci_counters))) ? -EFAULT : 0;
348 }
349 
350 static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
351 {
352 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
353 	struct hl_device *hdev = hpriv->hdev;
354 	struct hl_info_clk_throttle clk_throttle = {0};
355 	ktime_t end_time, zero_time = ktime_set(0, 0);
356 	u32 max_size = args->return_size;
357 	int i;
358 
359 	if ((!max_size) || (!out))
360 		return -EINVAL;
361 
362 	mutex_lock(&hdev->clk_throttling.lock);
363 
364 	clk_throttle.clk_throttling_reason = hdev->clk_throttling.current_reason;
365 
366 	for (i = 0 ; i < HL_CLK_THROTTLE_TYPE_MAX ; i++) {
367 		if (!(hdev->clk_throttling.aggregated_reason & BIT(i)))
368 			continue;
369 
370 		clk_throttle.clk_throttling_timestamp_us[i] =
371 			ktime_to_us(hdev->clk_throttling.timestamp[i].start);
372 
373 		if (ktime_compare(hdev->clk_throttling.timestamp[i].end, zero_time))
374 			end_time = hdev->clk_throttling.timestamp[i].end;
375 		else
376 			end_time = ktime_get();
377 
378 		clk_throttle.clk_throttling_duration_ns[i] =
379 			ktime_to_ns(ktime_sub(end_time,
380 				hdev->clk_throttling.timestamp[i].start));
381 
382 	}
383 	mutex_unlock(&hdev->clk_throttling.lock);
384 
385 	return copy_to_user(out, &clk_throttle,
386 		min((size_t) max_size, sizeof(clk_throttle))) ? -EFAULT : 0;
387 }
388 
389 static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
390 {
391 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
392 	struct hl_info_cs_counters cs_counters = {0};
393 	struct hl_device *hdev = hpriv->hdev;
394 	struct hl_cs_counters_atomic *cntr;
395 	u32 max_size = args->return_size;
396 
397 	cntr = &hdev->aggregated_cs_counters;
398 
399 	if ((!max_size) || (!out))
400 		return -EINVAL;
401 
402 	cs_counters.total_out_of_mem_drop_cnt =
403 			atomic64_read(&cntr->out_of_mem_drop_cnt);
404 	cs_counters.total_parsing_drop_cnt =
405 			atomic64_read(&cntr->parsing_drop_cnt);
406 	cs_counters.total_queue_full_drop_cnt =
407 			atomic64_read(&cntr->queue_full_drop_cnt);
408 	cs_counters.total_device_in_reset_drop_cnt =
409 			atomic64_read(&cntr->device_in_reset_drop_cnt);
410 	cs_counters.total_max_cs_in_flight_drop_cnt =
411 			atomic64_read(&cntr->max_cs_in_flight_drop_cnt);
412 	cs_counters.total_validation_drop_cnt =
413 			atomic64_read(&cntr->validation_drop_cnt);
414 
415 	if (hpriv->ctx) {
416 		cs_counters.ctx_out_of_mem_drop_cnt =
417 				atomic64_read(
418 				&hpriv->ctx->cs_counters.out_of_mem_drop_cnt);
419 		cs_counters.ctx_parsing_drop_cnt =
420 				atomic64_read(
421 				&hpriv->ctx->cs_counters.parsing_drop_cnt);
422 		cs_counters.ctx_queue_full_drop_cnt =
423 				atomic64_read(
424 				&hpriv->ctx->cs_counters.queue_full_drop_cnt);
425 		cs_counters.ctx_device_in_reset_drop_cnt =
426 				atomic64_read(
427 			&hpriv->ctx->cs_counters.device_in_reset_drop_cnt);
428 		cs_counters.ctx_max_cs_in_flight_drop_cnt =
429 				atomic64_read(
430 			&hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt);
431 		cs_counters.ctx_validation_drop_cnt =
432 				atomic64_read(
433 				&hpriv->ctx->cs_counters.validation_drop_cnt);
434 	}
435 
436 	return copy_to_user(out, &cs_counters,
437 		min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0;
438 }
439 
440 static int sync_manager_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
441 {
442 	struct hl_device *hdev = hpriv->hdev;
443 	struct asic_fixed_properties *prop = &hdev->asic_prop;
444 	struct hl_info_sync_manager sm_info = {0};
445 	u32 max_size = args->return_size;
446 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
447 
448 	if ((!max_size) || (!out))
449 		return -EINVAL;
450 
451 	if (args->dcore_id >= HL_MAX_DCORES)
452 		return -EINVAL;
453 
454 	sm_info.first_available_sync_object =
455 			prop->first_available_user_sob[args->dcore_id];
456 	sm_info.first_available_monitor =
457 			prop->first_available_user_mon[args->dcore_id];
458 	sm_info.first_available_cq =
459 			prop->first_available_cq[args->dcore_id];
460 
461 	return copy_to_user(out, &sm_info, min_t(size_t, (size_t) max_size,
462 			sizeof(sm_info))) ? -EFAULT : 0;
463 }
464 
465 static int total_energy_consumption_info(struct hl_fpriv *hpriv,
466 			struct hl_info_args *args)
467 {
468 	struct hl_device *hdev = hpriv->hdev;
469 	struct hl_info_energy total_energy = {0};
470 	u32 max_size = args->return_size;
471 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
472 	int rc;
473 
474 	if ((!max_size) || (!out))
475 		return -EINVAL;
476 
477 	rc = hl_fw_cpucp_total_energy_get(hdev,
478 			&total_energy.total_energy_consumption);
479 	if (rc)
480 		return rc;
481 
482 	return copy_to_user(out, &total_energy,
483 		min((size_t) max_size, sizeof(total_energy))) ? -EFAULT : 0;
484 }
485 
486 static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
487 {
488 	struct hl_device *hdev = hpriv->hdev;
489 	struct hl_pll_frequency_info freq_info = { {0} };
490 	u32 max_size = args->return_size;
491 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
492 	int rc;
493 
494 	if ((!max_size) || (!out))
495 		return -EINVAL;
496 
497 	rc = hl_fw_cpucp_pll_info_get(hdev, args->pll_index, freq_info.output);
498 	if (rc)
499 		return rc;
500 
501 	return copy_to_user(out, &freq_info,
502 		min((size_t) max_size, sizeof(freq_info))) ? -EFAULT : 0;
503 }
504 
505 static int power_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
506 {
507 	struct hl_device *hdev = hpriv->hdev;
508 	u32 max_size = args->return_size;
509 	struct hl_power_info power_info = {0};
510 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
511 	int rc;
512 
513 	if ((!max_size) || (!out))
514 		return -EINVAL;
515 
516 	rc = hl_fw_cpucp_power_get(hdev, &power_info.power);
517 	if (rc)
518 		return rc;
519 
520 	return copy_to_user(out, &power_info,
521 		min((size_t) max_size, sizeof(power_info))) ? -EFAULT : 0;
522 }
523 
524 static int open_stats_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
525 {
526 	struct hl_device *hdev = hpriv->hdev;
527 	u32 max_size = args->return_size;
528 	struct hl_open_stats_info open_stats_info = {0};
529 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
530 
531 	if ((!max_size) || (!out))
532 		return -EINVAL;
533 
534 	open_stats_info.last_open_period_ms = jiffies64_to_msecs(
535 		hdev->last_open_session_duration_jif);
536 	open_stats_info.open_counter = hdev->open_counter;
537 	open_stats_info.is_compute_ctx_active = hdev->is_compute_ctx_active;
538 	open_stats_info.compute_ctx_in_release = hdev->compute_ctx_in_release;
539 
540 	return copy_to_user(out, &open_stats_info,
541 		min((size_t) max_size, sizeof(open_stats_info))) ? -EFAULT : 0;
542 }
543 
544 static int dram_pending_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
545 {
546 	struct hl_device *hdev = hpriv->hdev;
547 	u32 max_size = args->return_size;
548 	u32 pend_rows_num = 0;
549 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
550 	int rc;
551 
552 	if ((!max_size) || (!out))
553 		return -EINVAL;
554 
555 	rc = hl_fw_dram_pending_row_get(hdev, &pend_rows_num);
556 	if (rc)
557 		return rc;
558 
559 	return copy_to_user(out, &pend_rows_num,
560 			min_t(size_t, max_size, sizeof(pend_rows_num))) ? -EFAULT : 0;
561 }
562 
563 static int dram_replaced_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
564 {
565 	struct hl_device *hdev = hpriv->hdev;
566 	u32 max_size = args->return_size;
567 	struct cpucp_hbm_row_info info = {0};
568 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
569 	int rc;
570 
571 	if ((!max_size) || (!out))
572 		return -EINVAL;
573 
574 	rc = hl_fw_dram_replaced_row_get(hdev, &info);
575 	if (rc)
576 		return rc;
577 
578 	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
579 }
580 
581 static int last_err_open_dev_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
582 {
583 	struct hl_info_last_err_open_dev_time info = {0};
584 	struct hl_device *hdev = hpriv->hdev;
585 	u32 max_size = args->return_size;
586 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
587 
588 	if ((!max_size) || (!out))
589 		return -EINVAL;
590 
591 	info.timestamp = ktime_to_ns(hdev->last_successful_open_ktime);
592 
593 	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
594 }
595 
596 static int cs_timeout_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
597 {
598 	struct hl_info_cs_timeout_event info = {0};
599 	struct hl_device *hdev = hpriv->hdev;
600 	u32 max_size = args->return_size;
601 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
602 
603 	if ((!max_size) || (!out))
604 		return -EINVAL;
605 
606 	info.seq = hdev->captured_err_info.cs_timeout.seq;
607 	info.timestamp = ktime_to_ns(hdev->captured_err_info.cs_timeout.timestamp);
608 
609 	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
610 }
611 
612 static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
613 {
614 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
615 	struct hl_device *hdev = hpriv->hdev;
616 	u32 max_size = args->return_size;
617 	struct razwi_info *razwi_info;
618 
619 	if ((!max_size) || (!out))
620 		return -EINVAL;
621 
622 	razwi_info = &hdev->captured_err_info.razwi_info;
623 	if (!razwi_info->razwi_info_available)
624 		return 0;
625 
626 	return copy_to_user(out, &razwi_info->razwi,
627 			min_t(size_t, max_size, sizeof(struct hl_info_razwi_event))) ? -EFAULT : 0;
628 }
629 
630 static int undefined_opcode_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
631 {
632 	struct hl_device *hdev = hpriv->hdev;
633 	u32 max_size = args->return_size;
634 	struct hl_info_undefined_opcode_event info = {0};
635 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
636 
637 	if ((!max_size) || (!out))
638 		return -EINVAL;
639 
640 	info.timestamp = ktime_to_ns(hdev->captured_err_info.undef_opcode.timestamp);
641 	info.engine_id = hdev->captured_err_info.undef_opcode.engine_id;
642 	info.cq_addr = hdev->captured_err_info.undef_opcode.cq_addr;
643 	info.cq_size = hdev->captured_err_info.undef_opcode.cq_size;
644 	info.stream_id = hdev->captured_err_info.undef_opcode.stream_id;
645 	info.cb_addr_streams_len = hdev->captured_err_info.undef_opcode.cb_addr_streams_len;
646 	memcpy(info.cb_addr_streams, hdev->captured_err_info.undef_opcode.cb_addr_streams,
647 			sizeof(info.cb_addr_streams));
648 
649 	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
650 }
651 
652 static int dev_mem_alloc_page_sizes_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
653 {
654 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
655 	struct hl_info_dev_memalloc_page_sizes info = {0};
656 	struct hl_device *hdev = hpriv->hdev;
657 	u32 max_size = args->return_size;
658 
659 	if ((!max_size) || (!out))
660 		return -EINVAL;
661 
662 	/*
663 	 * Future ASICs that will support multiple DRAM page sizes will support only "powers of 2"
664 	 * pages (unlike some of the ASICs before supporting multiple page sizes).
665 	 * For this reason for all ASICs that not support multiple page size the function will
666 	 * return an empty bitmask indicating that multiple page sizes is not supported.
667 	 */
668 	info.page_order_bitmask = hdev->asic_prop.dmmu.supported_pages_mask;
669 
670 	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
671 }
672 
673 static int sec_attest_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
674 {
675 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
676 	struct cpucp_sec_attest_info *sec_attest_info;
677 	struct hl_info_sec_attest *info;
678 	u32 max_size = args->return_size;
679 	int rc;
680 
681 	if ((!max_size) || (!out))
682 		return -EINVAL;
683 
684 	sec_attest_info = kmalloc(sizeof(*sec_attest_info), GFP_KERNEL);
685 	if (!sec_attest_info)
686 		return -ENOMEM;
687 
688 	info = kmalloc(sizeof(*info), GFP_KERNEL);
689 	if (!info) {
690 		rc = -ENOMEM;
691 		goto free_sec_attest_info;
692 	}
693 
694 	rc = hl_fw_get_sec_attest_info(hpriv->hdev, sec_attest_info, args->sec_attest_nonce);
695 	if (rc)
696 		goto free_info;
697 
698 	info->nonce = le32_to_cpu(sec_attest_info->nonce);
699 	info->pcr_quote_len = le16_to_cpu(sec_attest_info->pcr_quote_len);
700 	info->pub_data_len = le16_to_cpu(sec_attest_info->pub_data_len);
701 	info->certificate_len = le16_to_cpu(sec_attest_info->certificate_len);
702 	info->pcr_num_reg = sec_attest_info->pcr_num_reg;
703 	info->pcr_reg_len = sec_attest_info->pcr_reg_len;
704 	info->quote_sig_len = sec_attest_info->quote_sig_len;
705 	memcpy(&info->pcr_data, &sec_attest_info->pcr_data, sizeof(info->pcr_data));
706 	memcpy(&info->pcr_quote, &sec_attest_info->pcr_quote, sizeof(info->pcr_quote));
707 	memcpy(&info->public_data, &sec_attest_info->public_data, sizeof(info->public_data));
708 	memcpy(&info->certificate, &sec_attest_info->certificate, sizeof(info->certificate));
709 	memcpy(&info->quote_sig, &sec_attest_info->quote_sig, sizeof(info->quote_sig));
710 
711 	rc = copy_to_user(out, info,
712 				min_t(size_t, max_size, sizeof(*info))) ? -EFAULT : 0;
713 
714 free_info:
715 	kfree(info);
716 free_sec_attest_info:
717 	kfree(sec_attest_info);
718 
719 	return rc;
720 }
721 
722 static int eventfd_register(struct hl_fpriv *hpriv, struct hl_info_args *args)
723 {
724 	int rc;
725 
726 	/* check if there is already a registered on that process */
727 	mutex_lock(&hpriv->notifier_event.lock);
728 	if (hpriv->notifier_event.eventfd) {
729 		mutex_unlock(&hpriv->notifier_event.lock);
730 		return -EINVAL;
731 	}
732 
733 	hpriv->notifier_event.eventfd = eventfd_ctx_fdget(args->eventfd);
734 	if (IS_ERR(hpriv->notifier_event.eventfd)) {
735 		rc = PTR_ERR(hpriv->notifier_event.eventfd);
736 		hpriv->notifier_event.eventfd = NULL;
737 		mutex_unlock(&hpriv->notifier_event.lock);
738 		return rc;
739 	}
740 
741 	mutex_unlock(&hpriv->notifier_event.lock);
742 	return 0;
743 }
744 
745 static int eventfd_unregister(struct hl_fpriv *hpriv, struct hl_info_args *args)
746 {
747 	mutex_lock(&hpriv->notifier_event.lock);
748 	if (!hpriv->notifier_event.eventfd) {
749 		mutex_unlock(&hpriv->notifier_event.lock);
750 		return -EINVAL;
751 	}
752 
753 	eventfd_ctx_put(hpriv->notifier_event.eventfd);
754 	hpriv->notifier_event.eventfd = NULL;
755 	mutex_unlock(&hpriv->notifier_event.lock);
756 	return 0;
757 }
758 
759 static int engine_status_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
760 {
761 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
762 	u32 status_buf_size = args->return_size;
763 	struct hl_device *hdev = hpriv->hdev;
764 	struct engines_data eng_data;
765 	int rc;
766 
767 	if ((status_buf_size < SZ_1K) || (status_buf_size > HL_ENGINES_DATA_MAX_SIZE) || (!out))
768 		return -EINVAL;
769 
770 	eng_data.actual_size = 0;
771 	eng_data.allocated_buf_size = status_buf_size;
772 	eng_data.buf = vmalloc(status_buf_size);
773 	if (!eng_data.buf)
774 		return -ENOMEM;
775 
776 	hdev->asic_funcs->is_device_idle(hdev, NULL, 0, &eng_data);
777 
778 	if (eng_data.actual_size > eng_data.allocated_buf_size) {
779 		dev_err(hdev->dev,
780 			"Engines data size (%d Bytes) is bigger than allocated size (%u Bytes)\n",
781 			eng_data.actual_size, status_buf_size);
782 		vfree(eng_data.buf);
783 		return -ENOMEM;
784 	}
785 
786 	args->user_buffer_actual_size = eng_data.actual_size;
787 	rc = copy_to_user(out, eng_data.buf, min_t(size_t, status_buf_size, eng_data.actual_size)) ?
788 				-EFAULT : 0;
789 
790 	vfree(eng_data.buf);
791 
792 	return rc;
793 }
794 
795 static int page_fault_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
796 {
797 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
798 	struct hl_device *hdev = hpriv->hdev;
799 	u32 max_size = args->return_size;
800 	struct page_fault_info *pgf_info;
801 
802 	if ((!max_size) || (!out))
803 		return -EINVAL;
804 
805 	pgf_info = &hdev->captured_err_info.page_fault_info;
806 	if (!pgf_info->page_fault_info_available)
807 		return 0;
808 
809 	return copy_to_user(out, &pgf_info->page_fault,
810 			min_t(size_t, max_size, sizeof(struct hl_page_fault_info))) ? -EFAULT : 0;
811 }
812 
813 static int user_mappings_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
814 {
815 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
816 	u32 user_buf_size = args->return_size;
817 	struct hl_device *hdev = hpriv->hdev;
818 	struct page_fault_info *pgf_info;
819 	u64 actual_size;
820 
821 	if (!out)
822 		return -EINVAL;
823 
824 	pgf_info = &hdev->captured_err_info.page_fault_info;
825 	if (!pgf_info->page_fault_info_available)
826 		return 0;
827 
828 	args->array_size = pgf_info->num_of_user_mappings;
829 
830 	actual_size = pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping);
831 	if (user_buf_size < actual_size)
832 		return -ENOMEM;
833 
834 	return copy_to_user(out, pgf_info->user_mappings, actual_size) ? -EFAULT : 0;
835 }
836 
837 static int hw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
838 {
839 	void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer;
840 	struct hl_device *hdev = hpriv->hdev;
841 	u32 user_buf_size = args->return_size;
842 	struct hw_err_info *info;
843 	int rc;
844 
845 	if ((!user_buf_size) || (!user_buf))
846 		return -EINVAL;
847 
848 	if (user_buf_size < sizeof(struct hl_info_hw_err_event))
849 		return -ENOMEM;
850 
851 	info = &hdev->captured_err_info.hw_err;
852 	if (!info->event_info_available)
853 		return -ENOENT;
854 
855 	rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_hw_err_event));
856 	return rc ? -EFAULT : 0;
857 }
858 
859 static int fw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
860 {
861 	void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer;
862 	struct hl_device *hdev = hpriv->hdev;
863 	u32 user_buf_size = args->return_size;
864 	struct fw_err_info *info;
865 	int rc;
866 
867 	if ((!user_buf_size) || (!user_buf))
868 		return -EINVAL;
869 
870 	if (user_buf_size < sizeof(struct hl_info_fw_err_event))
871 		return -ENOMEM;
872 
873 	info = &hdev->captured_err_info.fw_err;
874 	if (!info->event_info_available)
875 		return -ENOENT;
876 
877 	rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_fw_err_event));
878 	return rc ? -EFAULT : 0;
879 }
880 
881 static int send_fw_generic_request(struct hl_device *hdev, struct hl_info_args *info_args)
882 {
883 	void __user *buff = (void __user *) (uintptr_t) info_args->return_pointer;
884 	u32 size = info_args->return_size;
885 	dma_addr_t dma_handle;
886 	bool need_input_buff;
887 	void *fw_buff;
888 	int rc = 0;
889 
890 	switch (info_args->fw_sub_opcode) {
891 	case HL_PASSTHROUGH_VERSIONS:
892 		need_input_buff = false;
893 		break;
894 	default:
895 		return -EINVAL;
896 	}
897 
898 	if (size > SZ_1M) {
899 		dev_err(hdev->dev, "buffer size cannot exceed 1MB\n");
900 		return -EINVAL;
901 	}
902 
903 	fw_buff = hl_cpu_accessible_dma_pool_alloc(hdev, size, &dma_handle);
904 	if (!fw_buff)
905 		return -ENOMEM;
906 
907 
908 	if (need_input_buff && copy_from_user(fw_buff, buff, size)) {
909 		dev_dbg(hdev->dev, "Failed to copy from user FW buff\n");
910 		rc = -EFAULT;
911 		goto free_buff;
912 	}
913 
914 	rc = hl_fw_send_generic_request(hdev, info_args->fw_sub_opcode, dma_handle, &size);
915 	if (rc)
916 		goto free_buff;
917 
918 	if (copy_to_user(buff, fw_buff, min(size, info_args->return_size))) {
919 		dev_dbg(hdev->dev, "Failed to copy to user FW generic req output\n");
920 		rc = -EFAULT;
921 	}
922 
923 free_buff:
924 	hl_cpu_accessible_dma_pool_free(hdev, info_args->return_size, fw_buff);
925 
926 	return rc;
927 }
928 
929 static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
930 				struct device *dev)
931 {
932 	enum hl_device_status status;
933 	struct hl_info_args *args = data;
934 	struct hl_device *hdev = hpriv->hdev;
935 	int rc;
936 
937 	if (args->pad) {
938 		dev_dbg(hdev->dev, "Padding bytes must be 0\n");
939 		return -EINVAL;
940 	}
941 
942 	/*
943 	 * Information is returned for the following opcodes even if the device
944 	 * is disabled or in reset.
945 	 */
946 	switch (args->op) {
947 	case HL_INFO_HW_IP_INFO:
948 		return hw_ip_info(hdev, args);
949 
950 	case HL_INFO_DEVICE_STATUS:
951 		return device_status_info(hdev, args);
952 
953 	case HL_INFO_RESET_COUNT:
954 		return get_reset_count(hdev, args);
955 
956 	case HL_INFO_HW_EVENTS:
957 		return hw_events_info(hdev, false, args);
958 
959 	case HL_INFO_HW_EVENTS_AGGREGATE:
960 		return hw_events_info(hdev, true, args);
961 
962 	case HL_INFO_CS_COUNTERS:
963 		return cs_counters_info(hpriv, args);
964 
965 	case HL_INFO_CLK_THROTTLE_REASON:
966 		return clk_throttle_info(hpriv, args);
967 
968 	case HL_INFO_SYNC_MANAGER:
969 		return sync_manager_info(hpriv, args);
970 
971 	case HL_INFO_OPEN_STATS:
972 		return open_stats_info(hpriv, args);
973 
974 	case HL_INFO_LAST_ERR_OPEN_DEV_TIME:
975 		return last_err_open_dev_info(hpriv, args);
976 
977 	case HL_INFO_CS_TIMEOUT_EVENT:
978 		return cs_timeout_info(hpriv, args);
979 
980 	case HL_INFO_RAZWI_EVENT:
981 		return razwi_info(hpriv, args);
982 
983 	case HL_INFO_UNDEFINED_OPCODE_EVENT:
984 		return undefined_opcode_info(hpriv, args);
985 
986 	case HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES:
987 		return dev_mem_alloc_page_sizes_info(hpriv, args);
988 
989 	case HL_INFO_GET_EVENTS:
990 		return events_info(hpriv, args);
991 
992 	case HL_INFO_PAGE_FAULT_EVENT:
993 		return page_fault_info(hpriv, args);
994 
995 	case HL_INFO_USER_MAPPINGS:
996 		return user_mappings_info(hpriv, args);
997 
998 	case HL_INFO_UNREGISTER_EVENTFD:
999 		return eventfd_unregister(hpriv, args);
1000 
1001 	case HL_INFO_HW_ERR_EVENT:
1002 		return hw_err_info(hpriv, args);
1003 
1004 	case HL_INFO_FW_ERR_EVENT:
1005 		return fw_err_info(hpriv, args);
1006 
1007 	case HL_INFO_DRAM_USAGE:
1008 		return dram_usage_info(hpriv, args);
1009 	default:
1010 		break;
1011 	}
1012 
1013 	if (!hl_device_operational(hdev, &status)) {
1014 		dev_dbg_ratelimited(dev,
1015 			"Device is %s. Can't execute INFO IOCTL\n",
1016 			hdev->status[status]);
1017 		return -EBUSY;
1018 	}
1019 
1020 	switch (args->op) {
1021 	case HL_INFO_HW_IDLE:
1022 		rc = hw_idle(hdev, args);
1023 		break;
1024 
1025 	case HL_INFO_DEVICE_UTILIZATION:
1026 		rc = device_utilization(hdev, args);
1027 		break;
1028 
1029 	case HL_INFO_CLK_RATE:
1030 		rc = get_clk_rate(hdev, args);
1031 		break;
1032 
1033 	case HL_INFO_TIME_SYNC:
1034 		return time_sync_info(hdev, args);
1035 
1036 	case HL_INFO_PCI_COUNTERS:
1037 		return pci_counters_info(hpriv, args);
1038 
1039 	case HL_INFO_TOTAL_ENERGY:
1040 		return total_energy_consumption_info(hpriv, args);
1041 
1042 	case HL_INFO_PLL_FREQUENCY:
1043 		return pll_frequency_info(hpriv, args);
1044 
1045 	case HL_INFO_POWER:
1046 		return power_info(hpriv, args);
1047 
1048 
1049 	case HL_INFO_DRAM_REPLACED_ROWS:
1050 		return dram_replaced_rows_info(hpriv, args);
1051 
1052 	case HL_INFO_DRAM_PENDING_ROWS:
1053 		return dram_pending_rows_info(hpriv, args);
1054 
1055 	case HL_INFO_SECURED_ATTESTATION:
1056 		return sec_attest_info(hpriv, args);
1057 
1058 	case HL_INFO_REGISTER_EVENTFD:
1059 		return eventfd_register(hpriv, args);
1060 
1061 	case HL_INFO_ENGINE_STATUS:
1062 		return engine_status_info(hpriv, args);
1063 
1064 	case HL_INFO_FW_GENERIC_REQ:
1065 		return send_fw_generic_request(hdev, args);
1066 
1067 	default:
1068 		dev_err(dev, "Invalid request %d\n", args->op);
1069 		rc = -EINVAL;
1070 		break;
1071 	}
1072 
1073 	return rc;
1074 }
1075 
1076 static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
1077 {
1078 	return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev);
1079 }
1080 
1081 static int hl_info_ioctl_control(struct hl_fpriv *hpriv, void *data)
1082 {
1083 	return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev_ctrl);
1084 }
1085 
1086 static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
1087 {
1088 	struct hl_debug_args *args = data;
1089 	struct hl_device *hdev = hpriv->hdev;
1090 	enum hl_device_status status;
1091 
1092 	int rc = 0;
1093 
1094 	if (!hl_device_operational(hdev, &status)) {
1095 		dev_dbg_ratelimited(hdev->dev,
1096 			"Device is %s. Can't execute DEBUG IOCTL\n",
1097 			hdev->status[status]);
1098 		return -EBUSY;
1099 	}
1100 
1101 	switch (args->op) {
1102 	case HL_DEBUG_OP_ETR:
1103 	case HL_DEBUG_OP_ETF:
1104 	case HL_DEBUG_OP_STM:
1105 	case HL_DEBUG_OP_FUNNEL:
1106 	case HL_DEBUG_OP_BMON:
1107 	case HL_DEBUG_OP_SPMU:
1108 	case HL_DEBUG_OP_TIMESTAMP:
1109 		if (!hdev->in_debug) {
1110 			dev_err_ratelimited(hdev->dev,
1111 				"Rejecting debug configuration request because device not in debug mode\n");
1112 			return -EFAULT;
1113 		}
1114 		args->input_size = min(args->input_size, hl_debug_struct_size[args->op]);
1115 		rc = debug_coresight(hdev, hpriv->ctx, args);
1116 		break;
1117 
1118 	case HL_DEBUG_OP_SET_MODE:
1119 		rc = hl_device_set_debug_mode(hdev, hpriv->ctx, (bool) args->enable);
1120 		break;
1121 
1122 	default:
1123 		dev_err(hdev->dev, "Invalid request %d\n", args->op);
1124 		rc = -EINVAL;
1125 		break;
1126 	}
1127 
1128 	return rc;
1129 }
1130 
1131 #define HL_IOCTL_DEF(ioctl, _func) \
1132 	[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func}
1133 
1134 static const struct hl_ioctl_desc hl_ioctls[] = {
1135 	HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl),
1136 	HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl),
1137 	HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl),
1138 	HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_wait_ioctl),
1139 	HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl),
1140 	HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl)
1141 };
1142 
1143 static const struct hl_ioctl_desc hl_ioctls_control[] = {
1144 	HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl_control)
1145 };
1146 
1147 static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
1148 		const struct hl_ioctl_desc *ioctl, struct device *dev)
1149 {
1150 	struct hl_fpriv *hpriv = filep->private_data;
1151 	unsigned int nr = _IOC_NR(cmd);
1152 	char stack_kdata[128] = {0};
1153 	char *kdata = NULL;
1154 	unsigned int usize, asize;
1155 	hl_ioctl_t *func;
1156 	u32 hl_size;
1157 	int retcode;
1158 
1159 	/* Do not trust userspace, use our own definition */
1160 	func = ioctl->func;
1161 
1162 	if (unlikely(!func)) {
1163 		dev_dbg(dev, "no function\n");
1164 		retcode = -ENOTTY;
1165 		goto out_err;
1166 	}
1167 
1168 	hl_size = _IOC_SIZE(ioctl->cmd);
1169 	usize = asize = _IOC_SIZE(cmd);
1170 	if (hl_size > asize)
1171 		asize = hl_size;
1172 
1173 	cmd = ioctl->cmd;
1174 
1175 	if (cmd & (IOC_IN | IOC_OUT)) {
1176 		if (asize <= sizeof(stack_kdata)) {
1177 			kdata = stack_kdata;
1178 		} else {
1179 			kdata = kzalloc(asize, GFP_KERNEL);
1180 			if (!kdata) {
1181 				retcode = -ENOMEM;
1182 				goto out_err;
1183 			}
1184 		}
1185 	}
1186 
1187 	if (cmd & IOC_IN) {
1188 		if (copy_from_user(kdata, (void __user *)arg, usize)) {
1189 			retcode = -EFAULT;
1190 			goto out_err;
1191 		}
1192 	}
1193 
1194 	retcode = func(hpriv, kdata);
1195 
1196 	if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize))
1197 		retcode = -EFAULT;
1198 
1199 out_err:
1200 	if (retcode)
1201 		dev_dbg(dev, "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
1202 			  task_pid_nr(current), cmd, nr);
1203 
1204 	if (kdata != stack_kdata)
1205 		kfree(kdata);
1206 
1207 	return retcode;
1208 }
1209 
1210 long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
1211 {
1212 	struct hl_fpriv *hpriv = filep->private_data;
1213 	struct hl_device *hdev = hpriv->hdev;
1214 	const struct hl_ioctl_desc *ioctl = NULL;
1215 	unsigned int nr = _IOC_NR(cmd);
1216 
1217 	if (!hdev) {
1218 		pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n");
1219 		return -ENODEV;
1220 	}
1221 
1222 	if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) {
1223 		ioctl = &hl_ioctls[nr];
1224 	} else {
1225 		dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n",
1226 			task_pid_nr(current), nr);
1227 		return -ENOTTY;
1228 	}
1229 
1230 	return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev);
1231 }
1232 
1233 long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
1234 {
1235 	struct hl_fpriv *hpriv = filep->private_data;
1236 	struct hl_device *hdev = hpriv->hdev;
1237 	const struct hl_ioctl_desc *ioctl = NULL;
1238 	unsigned int nr = _IOC_NR(cmd);
1239 
1240 	if (!hdev) {
1241 		pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n");
1242 		return -ENODEV;
1243 	}
1244 
1245 	if (nr == _IOC_NR(HL_IOCTL_INFO)) {
1246 		ioctl = &hl_ioctls_control[nr];
1247 	} else {
1248 		dev_err(hdev->dev_ctrl, "invalid ioctl: pid=%d, nr=0x%02x\n",
1249 			task_pid_nr(current), nr);
1250 		return -ENOTTY;
1251 	}
1252 
1253 	return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev_ctrl);
1254 }
1255