xref: /linux/drivers/accel/habanalabs/common/device.c (revision 460e462d22542adfafd8a5bc979437df73f1cbf3)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2016-2022 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #define pr_fmt(fmt)			"habanalabs: " fmt
9 
10 #include <uapi/drm/habanalabs_accel.h>
11 #include "habanalabs.h"
12 
13 #include <linux/pci.h>
14 #include <linux/hwmon.h>
15 #include <linux/vmalloc.h>
16 
17 #include <drm/drm_accel.h>
18 #include <drm/drm_drv.h>
19 
20 #include <trace/events/habanalabs.h>
21 
22 #define HL_RESET_DELAY_USEC			10000	/* 10ms */
23 
24 #define HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC	30
25 
26 enum dma_alloc_type {
27 	DMA_ALLOC_COHERENT,
28 	DMA_ALLOC_POOL,
29 };
30 
31 #define MEM_SCRUB_DEFAULT_VAL 0x1122334455667788
32 
33 /*
34  * hl_set_dram_bar- sets the bar to allow later access to address
35  *
36  * @hdev: pointer to habanalabs device structure.
37  * @addr: the address the caller wants to access.
38  * @region: the PCI region.
39  * @new_bar_region_base: the new BAR region base address.
40  *
41  * @return: the old BAR base address on success, U64_MAX for failure.
42  *	    The caller should set it back to the old address after use.
43  *
44  * In case the bar space does not cover the whole address space,
45  * the bar base address should be set to allow access to a given address.
46  * This function can be called also if the bar doesn't need to be set,
47  * in that case it just won't change the base.
48  */
49 static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_region *region,
50 				u64 *new_bar_region_base)
51 {
52 	struct asic_fixed_properties *prop = &hdev->asic_prop;
53 	u64 bar_base_addr, old_base;
54 
55 	if (is_power_of_2(prop->dram_pci_bar_size))
56 		bar_base_addr = addr & ~(prop->dram_pci_bar_size - 0x1ull);
57 	else
58 		bar_base_addr = DIV_ROUND_DOWN_ULL(addr, prop->dram_pci_bar_size) *
59 				prop->dram_pci_bar_size;
60 
61 	old_base = hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr);
62 
63 	/* in case of success we need to update the new BAR base */
64 	if ((old_base != U64_MAX) && new_bar_region_base)
65 		*new_bar_region_base = bar_base_addr;
66 
67 	return old_base;
68 }
69 
70 int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val,
71 	enum debugfs_access_type acc_type, enum pci_region region_type, bool set_dram_bar)
72 {
73 	struct pci_mem_region *region = &hdev->pci_mem_region[region_type];
74 	u64 old_base = 0, rc, bar_region_base = region->region_base;
75 	void __iomem *acc_addr;
76 
77 	if (set_dram_bar) {
78 		old_base = hl_set_dram_bar(hdev, addr, region, &bar_region_base);
79 		if (old_base == U64_MAX)
80 			return -EIO;
81 	}
82 
83 	acc_addr = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
84 			(addr - bar_region_base);
85 
86 	switch (acc_type) {
87 	case DEBUGFS_READ8:
88 		*val = readb(acc_addr);
89 		break;
90 	case DEBUGFS_WRITE8:
91 		writeb(*val, acc_addr);
92 		break;
93 	case DEBUGFS_READ32:
94 		*val = readl(acc_addr);
95 		break;
96 	case DEBUGFS_WRITE32:
97 		writel(*val, acc_addr);
98 		break;
99 	case DEBUGFS_READ64:
100 		*val = readq(acc_addr);
101 		break;
102 	case DEBUGFS_WRITE64:
103 		writeq(*val, acc_addr);
104 		break;
105 	}
106 
107 	if (set_dram_bar) {
108 		rc = hl_set_dram_bar(hdev, old_base, region, NULL);
109 		if (rc == U64_MAX)
110 			return -EIO;
111 	}
112 
113 	return 0;
114 }
115 
116 static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
117 					gfp_t flag, enum dma_alloc_type alloc_type,
118 					const char *caller)
119 {
120 	void *ptr = NULL;
121 
122 	switch (alloc_type) {
123 	case DMA_ALLOC_COHERENT:
124 		ptr = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, size, dma_handle, flag);
125 		break;
126 	case DMA_ALLOC_POOL:
127 		ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, size, flag, dma_handle);
128 		break;
129 	}
130 
131 	if (trace_habanalabs_dma_alloc_enabled() && !ZERO_OR_NULL_PTR(ptr))
132 		trace_habanalabs_dma_alloc(hdev->dev, (u64) (uintptr_t) ptr, *dma_handle, size,
133 						caller);
134 
135 	return ptr;
136 }
137 
138 static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *cpu_addr,
139 					dma_addr_t dma_handle, enum dma_alloc_type alloc_type,
140 					const char *caller)
141 {
142 	/* this is needed to avoid warning on using freed pointer */
143 	u64 store_cpu_addr = (u64) (uintptr_t) cpu_addr;
144 
145 	switch (alloc_type) {
146 	case DMA_ALLOC_COHERENT:
147 		hdev->asic_funcs->asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle);
148 		break;
149 	case DMA_ALLOC_POOL:
150 		hdev->asic_funcs->asic_dma_pool_free(hdev, cpu_addr, dma_handle);
151 		break;
152 	}
153 
154 	trace_habanalabs_dma_free(hdev->dev, store_cpu_addr, dma_handle, size, caller);
155 }
156 
157 void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
158 					gfp_t flag, const char *caller)
159 {
160 	return hl_dma_alloc_common(hdev, size, dma_handle, flag, DMA_ALLOC_COHERENT, caller);
161 }
162 
163 void hl_asic_dma_free_coherent_caller(struct hl_device *hdev, size_t size, void *cpu_addr,
164 					dma_addr_t dma_handle, const char *caller)
165 {
166 	hl_asic_dma_free_common(hdev, size, cpu_addr, dma_handle, DMA_ALLOC_COHERENT, caller);
167 }
168 
169 void *hl_asic_dma_pool_zalloc_caller(struct hl_device *hdev, size_t size, gfp_t mem_flags,
170 					dma_addr_t *dma_handle, const char *caller)
171 {
172 	return hl_dma_alloc_common(hdev, size, dma_handle, mem_flags, DMA_ALLOC_POOL, caller);
173 }
174 
175 void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr,
176 					const char *caller)
177 {
178 	hl_asic_dma_free_common(hdev, 0, vaddr, dma_addr, DMA_ALLOC_POOL, caller);
179 }
180 
181 void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle)
182 {
183 	return hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
184 }
185 
186 void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr)
187 {
188 	hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, size, vaddr);
189 }
190 
191 int hl_dma_map_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,
192 				enum dma_data_direction dir, const char *caller)
193 {
194 	struct asic_fixed_properties *prop = &hdev->asic_prop;
195 	struct scatterlist *sg;
196 	int rc, i;
197 
198 	rc = hdev->asic_funcs->dma_map_sgtable(hdev, sgt, dir);
199 	if (rc)
200 		return rc;
201 
202 	if (!trace_habanalabs_dma_map_page_enabled())
203 		return 0;
204 
205 	for_each_sgtable_dma_sg(sgt, sg, i)
206 		trace_habanalabs_dma_map_page(hdev->dev,
207 				page_to_phys(sg_page(sg)),
208 				sg->dma_address - prop->device_dma_offset_for_host_access,
209 #ifdef CONFIG_NEED_SG_DMA_LENGTH
210 				sg->dma_length,
211 #else
212 				sg->length,
213 #endif
214 				dir, caller);
215 
216 	return 0;
217 }
218 
219 int hl_asic_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt,
220 				enum dma_data_direction dir)
221 {
222 	struct asic_fixed_properties *prop = &hdev->asic_prop;
223 	struct scatterlist *sg;
224 	int rc, i;
225 
226 	rc = dma_map_sgtable(&hdev->pdev->dev, sgt, dir, 0);
227 	if (rc)
228 		return rc;
229 
230 	/* Shift to the device's base physical address of host memory if necessary */
231 	if (prop->device_dma_offset_for_host_access)
232 		for_each_sgtable_dma_sg(sgt, sg, i)
233 			sg->dma_address += prop->device_dma_offset_for_host_access;
234 
235 	return 0;
236 }
237 
238 void hl_dma_unmap_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,
239 					enum dma_data_direction dir, const char *caller)
240 {
241 	struct asic_fixed_properties *prop = &hdev->asic_prop;
242 	struct scatterlist *sg;
243 	int i;
244 
245 	hdev->asic_funcs->dma_unmap_sgtable(hdev, sgt, dir);
246 
247 	if (trace_habanalabs_dma_unmap_page_enabled()) {
248 		for_each_sgtable_dma_sg(sgt, sg, i)
249 			trace_habanalabs_dma_unmap_page(hdev->dev, page_to_phys(sg_page(sg)),
250 					sg->dma_address - prop->device_dma_offset_for_host_access,
251 #ifdef CONFIG_NEED_SG_DMA_LENGTH
252 					sg->dma_length,
253 #else
254 					sg->length,
255 #endif
256 					dir, caller);
257 	}
258 }
259 
260 void hl_asic_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,
261 				enum dma_data_direction dir)
262 {
263 	struct asic_fixed_properties *prop = &hdev->asic_prop;
264 	struct scatterlist *sg;
265 	int i;
266 
267 	/* Cancel the device's base physical address of host memory if necessary */
268 	if (prop->device_dma_offset_for_host_access)
269 		for_each_sgtable_dma_sg(sgt, sg, i)
270 			sg->dma_address -= prop->device_dma_offset_for_host_access;
271 
272 	dma_unmap_sgtable(&hdev->pdev->dev, sgt, dir, 0);
273 }
274 
275 /*
276  * hl_access_cfg_region - access the config region
277  *
278  * @hdev: pointer to habanalabs device structure
279  * @addr: the address to access
280  * @val: the value to write from or read to
281  * @acc_type: the type of access (read/write 64/32)
282  */
283 int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val,
284 	enum debugfs_access_type acc_type)
285 {
286 	struct pci_mem_region *cfg_region = &hdev->pci_mem_region[PCI_REGION_CFG];
287 	u32 val_h, val_l;
288 
289 	if (!IS_ALIGNED(addr, sizeof(u32))) {
290 		dev_err(hdev->dev, "address %#llx not a multiple of %zu\n", addr, sizeof(u32));
291 		return -EINVAL;
292 	}
293 
294 	switch (acc_type) {
295 	case DEBUGFS_READ32:
296 		*val = RREG32(addr - cfg_region->region_base);
297 		break;
298 	case DEBUGFS_WRITE32:
299 		WREG32(addr - cfg_region->region_base, *val);
300 		break;
301 	case DEBUGFS_READ64:
302 		val_l = RREG32(addr - cfg_region->region_base);
303 		val_h = RREG32(addr + sizeof(u32) - cfg_region->region_base);
304 
305 		*val = (((u64) val_h) << 32) | val_l;
306 		break;
307 	case DEBUGFS_WRITE64:
308 		WREG32(addr - cfg_region->region_base, lower_32_bits(*val));
309 		WREG32(addr + sizeof(u32) - cfg_region->region_base, upper_32_bits(*val));
310 		break;
311 	default:
312 		dev_err(hdev->dev, "access type %d is not supported\n", acc_type);
313 		return -EOPNOTSUPP;
314 	}
315 
316 	return 0;
317 }
318 
319 /*
320  * hl_access_dev_mem - access device memory
321  *
322  * @hdev: pointer to habanalabs device structure
323  * @region_type: the type of the region the address belongs to
324  * @addr: the address to access
325  * @val: the value to write from or read to
326  * @acc_type: the type of access (r/w, 32/64)
327  */
328 int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type,
329 			u64 addr, u64 *val, enum debugfs_access_type acc_type)
330 {
331 	switch (region_type) {
332 	case PCI_REGION_CFG:
333 		return hl_access_cfg_region(hdev, addr, val, acc_type);
334 	case PCI_REGION_SRAM:
335 	case PCI_REGION_DRAM:
336 		return hl_access_sram_dram_region(hdev, addr, val, acc_type,
337 				region_type, (region_type == PCI_REGION_DRAM));
338 	default:
339 		return -EFAULT;
340 	}
341 
342 	return 0;
343 }
344 
345 void hl_engine_data_sprintf(struct engines_data *e, const char *fmt, ...)
346 {
347 	va_list args;
348 	int str_size;
349 
350 	va_start(args, fmt);
351 	/* Calculate formatted string length. Assuming each string is null terminated, hence
352 	 * increment result by 1
353 	 */
354 	str_size = vsnprintf(NULL, 0, fmt, args) + 1;
355 	va_end(args);
356 
357 	if ((e->actual_size + str_size) < e->allocated_buf_size) {
358 		va_start(args, fmt);
359 		vsnprintf(e->buf + e->actual_size, str_size, fmt, args);
360 		va_end(args);
361 	}
362 
363 	/* Need to update the size even when not updating destination buffer to get the exact size
364 	 * of all input strings
365 	 */
366 	e->actual_size += str_size;
367 }
368 
369 enum hl_device_status hl_device_status(struct hl_device *hdev)
370 {
371 	enum hl_device_status status;
372 
373 	if (hdev->device_fini_pending) {
374 		status = HL_DEVICE_STATUS_MALFUNCTION;
375 	} else if (hdev->reset_info.in_reset) {
376 		if (hdev->reset_info.in_compute_reset)
377 			status = HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE;
378 		else
379 			status = HL_DEVICE_STATUS_IN_RESET;
380 	} else if (hdev->reset_info.needs_reset) {
381 		status = HL_DEVICE_STATUS_NEEDS_RESET;
382 	} else if (hdev->disabled) {
383 		status = HL_DEVICE_STATUS_MALFUNCTION;
384 	} else if (!hdev->init_done) {
385 		status = HL_DEVICE_STATUS_IN_DEVICE_CREATION;
386 	} else {
387 		status = HL_DEVICE_STATUS_OPERATIONAL;
388 	}
389 
390 	return status;
391 }
392 
393 bool hl_device_operational(struct hl_device *hdev,
394 		enum hl_device_status *status)
395 {
396 	enum hl_device_status current_status;
397 
398 	current_status = hl_device_status(hdev);
399 	if (status)
400 		*status = current_status;
401 
402 	switch (current_status) {
403 	case HL_DEVICE_STATUS_MALFUNCTION:
404 	case HL_DEVICE_STATUS_IN_RESET:
405 	case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE:
406 	case HL_DEVICE_STATUS_NEEDS_RESET:
407 		return false;
408 	case HL_DEVICE_STATUS_OPERATIONAL:
409 	case HL_DEVICE_STATUS_IN_DEVICE_CREATION:
410 	default:
411 		return true;
412 	}
413 }
414 
415 bool hl_ctrl_device_operational(struct hl_device *hdev,
416 		enum hl_device_status *status)
417 {
418 	enum hl_device_status current_status;
419 
420 	current_status = hl_device_status(hdev);
421 	if (status)
422 		*status = current_status;
423 
424 	switch (current_status) {
425 	case HL_DEVICE_STATUS_MALFUNCTION:
426 		return false;
427 	case HL_DEVICE_STATUS_IN_RESET:
428 	case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE:
429 	case HL_DEVICE_STATUS_NEEDS_RESET:
430 	case HL_DEVICE_STATUS_OPERATIONAL:
431 	case HL_DEVICE_STATUS_IN_DEVICE_CREATION:
432 	default:
433 		return true;
434 	}
435 }
436 
437 static void print_idle_status_mask(struct hl_device *hdev, const char *message,
438 					u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE])
439 {
440 	if (idle_mask[3])
441 		dev_err(hdev->dev, "%s (mask %#llx_%016llx_%016llx_%016llx)\n",
442 			message, idle_mask[3], idle_mask[2], idle_mask[1], idle_mask[0]);
443 	else if (idle_mask[2])
444 		dev_err(hdev->dev, "%s (mask %#llx_%016llx_%016llx)\n",
445 			message, idle_mask[2], idle_mask[1], idle_mask[0]);
446 	else if (idle_mask[1])
447 		dev_err(hdev->dev, "%s (mask %#llx_%016llx)\n",
448 			message, idle_mask[1], idle_mask[0]);
449 	else
450 		dev_err(hdev->dev, "%s (mask %#llx)\n", message, idle_mask[0]);
451 }
452 
453 static void hpriv_release(struct kref *ref)
454 {
455 	u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
456 	bool reset_device, device_is_idle = true;
457 	struct hl_fpriv *hpriv;
458 	struct hl_device *hdev;
459 
460 	hpriv = container_of(ref, struct hl_fpriv, refcount);
461 
462 	hdev = hpriv->hdev;
463 
464 	hdev->asic_funcs->send_device_activity(hdev, false);
465 
466 	hl_debugfs_remove_file(hpriv);
467 
468 	mutex_destroy(&hpriv->ctx_lock);
469 	mutex_destroy(&hpriv->restore_phase_mutex);
470 
471 	/* There should be no memory buffers at this point and handles IDR can be destroyed */
472 	hl_mem_mgr_idr_destroy(&hpriv->mem_mgr);
473 
474 	/* Device should be reset if reset-upon-device-release is enabled, or if there is a pending
475 	 * reset that waits for device release.
476 	 */
477 	reset_device = hdev->reset_upon_device_release || hdev->reset_info.watchdog_active;
478 
479 	/* Check the device idle status and reset if not idle.
480 	 * Skip it if already in reset, or if device is going to be reset in any case.
481 	 */
482 	if (!hdev->reset_info.in_reset && !reset_device && !hdev->pldm)
483 		device_is_idle = hdev->asic_funcs->is_device_idle(hdev, idle_mask,
484 							HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);
485 	if (!device_is_idle) {
486 		print_idle_status_mask(hdev, "device is not idle after user context is closed",
487 					idle_mask);
488 		reset_device = true;
489 	}
490 
491 	/* We need to remove the user from the list to make sure the reset process won't
492 	 * try to kill the user process. Because, if we got here, it means there are no
493 	 * more driver/device resources that the user process is occupying so there is
494 	 * no need to kill it
495 	 *
496 	 * However, we can't set the compute_ctx to NULL at this stage. This is to prevent
497 	 * a race between the release and opening the device again. We don't want to let
498 	 * a user open the device while there a reset is about to happen.
499 	 */
500 	mutex_lock(&hdev->fpriv_list_lock);
501 	list_del(&hpriv->dev_node);
502 	mutex_unlock(&hdev->fpriv_list_lock);
503 
504 	put_pid(hpriv->taskpid);
505 
506 	if (reset_device) {
507 		hl_device_reset(hdev, HL_DRV_RESET_DEV_RELEASE);
508 	} else {
509 		/* Scrubbing is handled within hl_device_reset(), so here need to do it directly */
510 		int rc = hdev->asic_funcs->scrub_device_mem(hdev);
511 
512 		if (rc) {
513 			dev_err(hdev->dev, "failed to scrub memory from hpriv release (%d)\n", rc);
514 			hl_device_reset(hdev, HL_DRV_RESET_HARD);
515 		}
516 	}
517 
518 	/* Now we can mark the compute_ctx as not active. Even if a reset is running in a different
519 	 * thread, we don't care because the in_reset is marked so if a user will try to open
520 	 * the device it will fail on that, even if compute_ctx is false.
521 	 */
522 	mutex_lock(&hdev->fpriv_list_lock);
523 	hdev->is_compute_ctx_active = false;
524 	mutex_unlock(&hdev->fpriv_list_lock);
525 
526 	hdev->compute_ctx_in_release = 0;
527 
528 	/* release the eventfd */
529 	if (hpriv->notifier_event.eventfd)
530 		eventfd_ctx_put(hpriv->notifier_event.eventfd);
531 
532 	mutex_destroy(&hpriv->notifier_event.lock);
533 
534 	kfree(hpriv);
535 }
536 
537 void hl_hpriv_get(struct hl_fpriv *hpriv)
538 {
539 	kref_get(&hpriv->refcount);
540 }
541 
542 int hl_hpriv_put(struct hl_fpriv *hpriv)
543 {
544 	return kref_put(&hpriv->refcount, hpriv_release);
545 }
546 
547 static void print_device_in_use_info(struct hl_device *hdev, const char *message)
548 {
549 	u32 active_cs_num, dmabuf_export_cnt;
550 	bool unknown_reason = true;
551 	char buf[128];
552 	size_t size;
553 	int offset;
554 
555 	size = sizeof(buf);
556 	offset = 0;
557 
558 	active_cs_num = hl_get_active_cs_num(hdev);
559 	if (active_cs_num) {
560 		unknown_reason = false;
561 		offset += scnprintf(buf + offset, size - offset, " [%u active CS]", active_cs_num);
562 	}
563 
564 	dmabuf_export_cnt = atomic_read(&hdev->dmabuf_export_cnt);
565 	if (dmabuf_export_cnt) {
566 		unknown_reason = false;
567 		offset += scnprintf(buf + offset, size - offset, " [%u exported dma-buf]",
568 					dmabuf_export_cnt);
569 	}
570 
571 	if (unknown_reason)
572 		scnprintf(buf + offset, size - offset, " [unknown reason]");
573 
574 	dev_notice(hdev->dev, "%s%s\n", message, buf);
575 }
576 
577 /*
578  * hl_device_release() - release function for habanalabs device.
579  * @ddev: pointer to DRM device structure.
580  * @file: pointer to DRM file private data structure.
581  *
582  * Called when process closes an habanalabs device
583  */
584 void hl_device_release(struct drm_device *ddev, struct drm_file *file_priv)
585 {
586 	struct hl_fpriv *hpriv = file_priv->driver_priv;
587 	struct hl_device *hdev = to_hl_device(ddev);
588 
589 	if (!hdev) {
590 		pr_crit("Closing FD after device was removed. Memory leak will occur and it is advised to reboot.\n");
591 		put_pid(hpriv->taskpid);
592 	}
593 
594 	hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
595 
596 	/* Memory buffers might be still in use at this point and thus the handles IDR destruction
597 	 * is postponed to hpriv_release().
598 	 */
599 	hl_mem_mgr_fini(&hpriv->mem_mgr);
600 
601 	hdev->compute_ctx_in_release = 1;
602 
603 	if (!hl_hpriv_put(hpriv)) {
604 		print_device_in_use_info(hdev, "User process closed FD but device still in use");
605 		hl_device_reset(hdev, HL_DRV_RESET_HARD);
606 	}
607 
608 	hdev->last_open_session_duration_jif = jiffies - hdev->last_successful_open_jif;
609 }
610 
611 static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
612 {
613 	struct hl_fpriv *hpriv = filp->private_data;
614 	struct hl_device *hdev = hpriv->hdev;
615 
616 	filp->private_data = NULL;
617 
618 	if (!hdev) {
619 		pr_err("Closing FD after device was removed\n");
620 		goto out;
621 	}
622 
623 	mutex_lock(&hdev->fpriv_ctrl_list_lock);
624 	list_del(&hpriv->dev_node);
625 	mutex_unlock(&hdev->fpriv_ctrl_list_lock);
626 out:
627 	put_pid(hpriv->taskpid);
628 
629 	kfree(hpriv);
630 
631 	return 0;
632 }
633 
634 static int __hl_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
635 {
636 	struct hl_device *hdev = hpriv->hdev;
637 	unsigned long vm_pgoff;
638 
639 	if (!hdev) {
640 		pr_err_ratelimited("Trying to mmap after device was removed! Please close FD\n");
641 		return -ENODEV;
642 	}
643 
644 	vm_pgoff = vma->vm_pgoff;
645 
646 	switch (vm_pgoff & HL_MMAP_TYPE_MASK) {
647 	case HL_MMAP_TYPE_BLOCK:
648 		vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);
649 		return hl_hw_block_mmap(hpriv, vma);
650 
651 	case HL_MMAP_TYPE_CB:
652 	case HL_MMAP_TYPE_TS_BUFF:
653 		return hl_mem_mgr_mmap(&hpriv->mem_mgr, vma, NULL);
654 	}
655 	return -EINVAL;
656 }
657 
658 /*
659  * hl_mmap - mmap function for habanalabs device
660  *
661  * @*filp: pointer to file structure
662  * @*vma: pointer to vm_area_struct of the process
663  *
664  * Called when process does an mmap on habanalabs device. Call the relevant mmap
665  * function at the end of the common code.
666  */
667 int hl_mmap(struct file *filp, struct vm_area_struct *vma)
668 {
669 	struct drm_file *file_priv = filp->private_data;
670 	struct hl_fpriv *hpriv = file_priv->driver_priv;
671 
672 	return __hl_mmap(hpriv, vma);
673 }
674 
675 static const struct file_operations hl_ctrl_ops = {
676 	.owner = THIS_MODULE,
677 	.open = hl_device_open_ctrl,
678 	.release = hl_device_release_ctrl,
679 	.unlocked_ioctl = hl_ioctl_control,
680 	.compat_ioctl = hl_ioctl_control
681 };
682 
683 static void device_release_func(struct device *dev)
684 {
685 	kfree(dev);
686 }
687 
688 /*
689  * device_init_cdev - Initialize cdev and device for habanalabs device
690  *
691  * @hdev: pointer to habanalabs device structure
692  * @class: pointer to the class object of the device
693  * @minor: minor number of the specific device
694  * @fops: file operations to install for this device
695  * @name: name of the device as it will appear in the filesystem
696  * @cdev: pointer to the char device object that will be initialized
697  * @dev: pointer to the device object that will be initialized
698  *
699  * Initialize a cdev and a Linux device for habanalabs's device.
700  */
701 static int device_init_cdev(struct hl_device *hdev, const struct class *class,
702 				int minor, const struct file_operations *fops,
703 				char *name, struct cdev *cdev,
704 				struct device **dev)
705 {
706 	cdev_init(cdev, fops);
707 	cdev->owner = THIS_MODULE;
708 
709 	*dev = kzalloc(sizeof(**dev), GFP_KERNEL);
710 	if (!*dev)
711 		return -ENOMEM;
712 
713 	device_initialize(*dev);
714 	(*dev)->devt = MKDEV(hdev->major, minor);
715 	(*dev)->class = class;
716 	(*dev)->release = device_release_func;
717 	dev_set_drvdata(*dev, hdev);
718 	dev_set_name(*dev, "%s", name);
719 
720 	return 0;
721 }
722 
723 static int cdev_sysfs_debugfs_add(struct hl_device *hdev)
724 {
725 	const struct class *accel_class = hdev->drm.accel->kdev->class;
726 	char name[32];
727 	int rc;
728 
729 	hdev->cdev_idx = hdev->drm.accel->index;
730 
731 	/* Initialize cdev and device structures for the control device */
732 	snprintf(name, sizeof(name), "accel_controlD%d", hdev->cdev_idx);
733 	rc = device_init_cdev(hdev, accel_class, hdev->cdev_idx, &hl_ctrl_ops, name,
734 				&hdev->cdev_ctrl, &hdev->dev_ctrl);
735 	if (rc)
736 		return rc;
737 
738 	rc = cdev_device_add(&hdev->cdev_ctrl, hdev->dev_ctrl);
739 	if (rc) {
740 		dev_err(hdev->dev_ctrl,
741 			"failed to add an accel control char device to the system\n");
742 		goto free_ctrl_device;
743 	}
744 
745 	rc = hl_sysfs_init(hdev);
746 	if (rc) {
747 		dev_err(hdev->dev, "failed to initialize sysfs\n");
748 		goto delete_ctrl_cdev_device;
749 	}
750 
751 	hl_debugfs_add_device(hdev);
752 
753 	hdev->cdev_sysfs_debugfs_created = true;
754 
755 	return 0;
756 
757 delete_ctrl_cdev_device:
758 	cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
759 free_ctrl_device:
760 	put_device(hdev->dev_ctrl);
761 	return rc;
762 }
763 
764 static void cdev_sysfs_debugfs_remove(struct hl_device *hdev)
765 {
766 	if (!hdev->cdev_sysfs_debugfs_created)
767 		return;
768 
769 	hl_sysfs_fini(hdev);
770 
771 	cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
772 	put_device(hdev->dev_ctrl);
773 }
774 
775 static void device_hard_reset_pending(struct work_struct *work)
776 {
777 	struct hl_device_reset_work *device_reset_work =
778 		container_of(work, struct hl_device_reset_work, reset_work.work);
779 	struct hl_device *hdev = device_reset_work->hdev;
780 	u32 flags;
781 	int rc;
782 
783 	flags = device_reset_work->flags | HL_DRV_RESET_FROM_RESET_THR;
784 
785 	rc = hl_device_reset(hdev, flags);
786 
787 	if ((rc == -EBUSY) && !hdev->device_fini_pending) {
788 		struct hl_ctx *ctx = hl_get_compute_ctx(hdev);
789 
790 		if (ctx) {
791 			/* The read refcount value should subtracted by one, because the read is
792 			 * protected with hl_get_compute_ctx().
793 			 */
794 			dev_info(hdev->dev,
795 				"Could not reset device (compute_ctx refcount %u). will try again in %u seconds",
796 				kref_read(&ctx->refcount) - 1, HL_PENDING_RESET_PER_SEC);
797 			hl_ctx_put(ctx);
798 		} else {
799 			dev_info(hdev->dev, "Could not reset device. will try again in %u seconds",
800 				HL_PENDING_RESET_PER_SEC);
801 		}
802 
803 		queue_delayed_work(hdev->reset_wq, &device_reset_work->reset_work,
804 					msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000));
805 	}
806 }
807 
808 static void device_release_watchdog_func(struct work_struct *work)
809 {
810 	struct hl_device_reset_work *watchdog_work =
811 			container_of(work, struct hl_device_reset_work, reset_work.work);
812 	struct hl_device *hdev = watchdog_work->hdev;
813 	u32 flags;
814 
815 	dev_dbg(hdev->dev, "Device wasn't released in time. Initiate hard-reset.\n");
816 
817 	flags = watchdog_work->flags | HL_DRV_RESET_HARD | HL_DRV_RESET_FROM_WD_THR;
818 
819 	hl_device_reset(hdev, flags);
820 }
821 
822 /*
823  * device_early_init - do some early initialization for the habanalabs device
824  *
825  * @hdev: pointer to habanalabs device structure
826  *
827  * Install the relevant function pointers and call the early_init function,
828  * if such a function exists
829  */
830 static int device_early_init(struct hl_device *hdev)
831 {
832 	int i, rc;
833 	char workq_name[32];
834 
835 	switch (hdev->asic_type) {
836 	case ASIC_GOYA:
837 		goya_set_asic_funcs(hdev);
838 		strscpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name));
839 		break;
840 	case ASIC_GAUDI:
841 		gaudi_set_asic_funcs(hdev);
842 		strscpy(hdev->asic_name, "GAUDI", sizeof(hdev->asic_name));
843 		break;
844 	case ASIC_GAUDI_SEC:
845 		gaudi_set_asic_funcs(hdev);
846 		strscpy(hdev->asic_name, "GAUDI SEC", sizeof(hdev->asic_name));
847 		break;
848 	case ASIC_GAUDI2:
849 		gaudi2_set_asic_funcs(hdev);
850 		strscpy(hdev->asic_name, "GAUDI2", sizeof(hdev->asic_name));
851 		break;
852 	case ASIC_GAUDI2B:
853 		gaudi2_set_asic_funcs(hdev);
854 		strscpy(hdev->asic_name, "GAUDI2B", sizeof(hdev->asic_name));
855 		break;
856 		break;
857 	default:
858 		dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
859 			hdev->asic_type);
860 		return -EINVAL;
861 	}
862 
863 	rc = hdev->asic_funcs->early_init(hdev);
864 	if (rc)
865 		return rc;
866 
867 	rc = hl_asid_init(hdev);
868 	if (rc)
869 		goto early_fini;
870 
871 	if (hdev->asic_prop.completion_queues_count) {
872 		hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count,
873 				sizeof(struct workqueue_struct *),
874 				GFP_KERNEL);
875 		if (!hdev->cq_wq) {
876 			rc = -ENOMEM;
877 			goto asid_fini;
878 		}
879 	}
880 
881 	for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
882 		snprintf(workq_name, 32, "hl%u-free-jobs-%u", hdev->cdev_idx, (u32) i);
883 		hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);
884 		if (hdev->cq_wq[i] == NULL) {
885 			dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
886 			rc = -ENOMEM;
887 			goto free_cq_wq;
888 		}
889 	}
890 
891 	snprintf(workq_name, 32, "hl%u-events", hdev->cdev_idx);
892 	hdev->eq_wq = create_singlethread_workqueue(workq_name);
893 	if (hdev->eq_wq == NULL) {
894 		dev_err(hdev->dev, "Failed to allocate EQ workqueue\n");
895 		rc = -ENOMEM;
896 		goto free_cq_wq;
897 	}
898 
899 	snprintf(workq_name, 32, "hl%u-cs-completions", hdev->cdev_idx);
900 	hdev->cs_cmplt_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
901 	if (!hdev->cs_cmplt_wq) {
902 		dev_err(hdev->dev,
903 			"Failed to allocate CS completions workqueue\n");
904 		rc = -ENOMEM;
905 		goto free_eq_wq;
906 	}
907 
908 	snprintf(workq_name, 32, "hl%u-ts-free-obj", hdev->cdev_idx);
909 	hdev->ts_free_obj_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
910 	if (!hdev->ts_free_obj_wq) {
911 		dev_err(hdev->dev,
912 			"Failed to allocate Timestamp registration free workqueue\n");
913 		rc = -ENOMEM;
914 		goto free_cs_cmplt_wq;
915 	}
916 
917 	snprintf(workq_name, 32, "hl%u-prefetch", hdev->cdev_idx);
918 	hdev->prefetch_wq = alloc_workqueue(workq_name, WQ_UNBOUND, 0);
919 	if (!hdev->prefetch_wq) {
920 		dev_err(hdev->dev, "Failed to allocate MMU prefetch workqueue\n");
921 		rc = -ENOMEM;
922 		goto free_ts_free_wq;
923 	}
924 
925 	hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info), GFP_KERNEL);
926 	if (!hdev->hl_chip_info) {
927 		rc = -ENOMEM;
928 		goto free_prefetch_wq;
929 	}
930 
931 	rc = hl_mmu_if_set_funcs(hdev);
932 	if (rc)
933 		goto free_chip_info;
934 
935 	hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr);
936 
937 	snprintf(workq_name, 32, "hl%u_device_reset", hdev->cdev_idx);
938 	hdev->reset_wq = create_singlethread_workqueue(workq_name);
939 	if (!hdev->reset_wq) {
940 		rc = -ENOMEM;
941 		dev_err(hdev->dev, "Failed to create device reset WQ\n");
942 		goto free_cb_mgr;
943 	}
944 
945 	INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work, device_hard_reset_pending);
946 	hdev->device_reset_work.hdev = hdev;
947 	hdev->device_fini_pending = 0;
948 
949 	INIT_DELAYED_WORK(&hdev->device_release_watchdog_work.reset_work,
950 				device_release_watchdog_func);
951 	hdev->device_release_watchdog_work.hdev = hdev;
952 
953 	mutex_init(&hdev->send_cpu_message_lock);
954 	mutex_init(&hdev->debug_lock);
955 	INIT_LIST_HEAD(&hdev->cs_mirror_list);
956 	spin_lock_init(&hdev->cs_mirror_lock);
957 	spin_lock_init(&hdev->reset_info.lock);
958 	INIT_LIST_HEAD(&hdev->fpriv_list);
959 	INIT_LIST_HEAD(&hdev->fpriv_ctrl_list);
960 	mutex_init(&hdev->fpriv_list_lock);
961 	mutex_init(&hdev->fpriv_ctrl_list_lock);
962 	mutex_init(&hdev->clk_throttling.lock);
963 
964 	return 0;
965 
966 free_cb_mgr:
967 	hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
968 	hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);
969 free_chip_info:
970 	kfree(hdev->hl_chip_info);
971 free_prefetch_wq:
972 	destroy_workqueue(hdev->prefetch_wq);
973 free_ts_free_wq:
974 	destroy_workqueue(hdev->ts_free_obj_wq);
975 free_cs_cmplt_wq:
976 	destroy_workqueue(hdev->cs_cmplt_wq);
977 free_eq_wq:
978 	destroy_workqueue(hdev->eq_wq);
979 free_cq_wq:
980 	for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
981 		if (hdev->cq_wq[i])
982 			destroy_workqueue(hdev->cq_wq[i]);
983 	kfree(hdev->cq_wq);
984 asid_fini:
985 	hl_asid_fini(hdev);
986 early_fini:
987 	if (hdev->asic_funcs->early_fini)
988 		hdev->asic_funcs->early_fini(hdev);
989 
990 	return rc;
991 }
992 
993 /*
994  * device_early_fini - finalize all that was done in device_early_init
995  *
996  * @hdev: pointer to habanalabs device structure
997  *
998  */
999 static void device_early_fini(struct hl_device *hdev)
1000 {
1001 	int i;
1002 
1003 	mutex_destroy(&hdev->debug_lock);
1004 	mutex_destroy(&hdev->send_cpu_message_lock);
1005 
1006 	mutex_destroy(&hdev->fpriv_list_lock);
1007 	mutex_destroy(&hdev->fpriv_ctrl_list_lock);
1008 
1009 	mutex_destroy(&hdev->clk_throttling.lock);
1010 
1011 	hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
1012 	hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);
1013 
1014 	kfree(hdev->hl_chip_info);
1015 
1016 	destroy_workqueue(hdev->prefetch_wq);
1017 	destroy_workqueue(hdev->ts_free_obj_wq);
1018 	destroy_workqueue(hdev->cs_cmplt_wq);
1019 	destroy_workqueue(hdev->eq_wq);
1020 	destroy_workqueue(hdev->reset_wq);
1021 
1022 	for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1023 		destroy_workqueue(hdev->cq_wq[i]);
1024 	kfree(hdev->cq_wq);
1025 
1026 	hl_asid_fini(hdev);
1027 
1028 	if (hdev->asic_funcs->early_fini)
1029 		hdev->asic_funcs->early_fini(hdev);
1030 }
1031 
1032 static bool is_pci_link_healthy(struct hl_device *hdev)
1033 {
1034 	u16 vendor_id;
1035 
1036 	if (!hdev->pdev)
1037 		return false;
1038 
1039 	pci_read_config_word(hdev->pdev, PCI_VENDOR_ID, &vendor_id);
1040 
1041 	return (vendor_id == PCI_VENDOR_ID_HABANALABS);
1042 }
1043 
1044 static void hl_device_eq_heartbeat(struct hl_device *hdev)
1045 {
1046 	u64 event_mask = HL_NOTIFIER_EVENT_DEVICE_RESET | HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE;
1047 	struct asic_fixed_properties *prop = &hdev->asic_prop;
1048 
1049 	if (!prop->cpucp_info.eq_health_check_supported)
1050 		return;
1051 
1052 	if (hdev->eq_heartbeat_received)
1053 		hdev->eq_heartbeat_received = false;
1054 	else
1055 		hl_device_cond_reset(hdev, HL_DRV_RESET_HARD, event_mask);
1056 }
1057 
1058 static void hl_device_heartbeat(struct work_struct *work)
1059 {
1060 	struct hl_device *hdev = container_of(work, struct hl_device,
1061 						work_heartbeat.work);
1062 	struct hl_info_fw_err_info info = {0};
1063 	u64 event_mask = HL_NOTIFIER_EVENT_DEVICE_RESET | HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE;
1064 
1065 	/* Start heartbeat checks only after driver has enabled events from FW */
1066 	if (!hl_device_operational(hdev, NULL) || !hdev->init_done)
1067 		goto reschedule;
1068 
1069 	/*
1070 	 * For EQ health check need to check if driver received the heartbeat eq event
1071 	 * in order to validate the eq is working.
1072 	 */
1073 	hl_device_eq_heartbeat(hdev);
1074 
1075 	if (!hdev->asic_funcs->send_heartbeat(hdev))
1076 		goto reschedule;
1077 
1078 	if (hl_device_operational(hdev, NULL))
1079 		dev_err(hdev->dev, "Device heartbeat failed! PCI link is %s\n",
1080 			is_pci_link_healthy(hdev) ? "healthy" : "broken");
1081 
1082 	info.err_type = HL_INFO_FW_HEARTBEAT_ERR;
1083 	info.event_mask = &event_mask;
1084 	hl_handle_fw_err(hdev, &info);
1085 	hl_device_cond_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_HEARTBEAT, event_mask);
1086 
1087 	return;
1088 
1089 reschedule:
1090 	/*
1091 	 * prev_reset_trigger tracks consecutive fatal h/w errors until first
1092 	 * heartbeat immediately post reset.
1093 	 * If control reached here, then at least one heartbeat work has been
1094 	 * scheduled since last reset/init cycle.
1095 	 * So if the device is not already in reset cycle, reset the flag
1096 	 * prev_reset_trigger as no reset occurred with HL_DRV_RESET_FW_FATAL_ERR
1097 	 * status for at least one heartbeat. From this point driver restarts
1098 	 * tracking future consecutive fatal errors.
1099 	 */
1100 	if (!hdev->reset_info.in_reset)
1101 		hdev->reset_info.prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
1102 
1103 	schedule_delayed_work(&hdev->work_heartbeat,
1104 			usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
1105 }
1106 
1107 /*
1108  * device_late_init - do late stuff initialization for the habanalabs device
1109  *
1110  * @hdev: pointer to habanalabs device structure
1111  *
1112  * Do stuff that either needs the device H/W queues to be active or needs
1113  * to happen after all the rest of the initialization is finished
1114  */
1115 static int device_late_init(struct hl_device *hdev)
1116 {
1117 	int rc;
1118 
1119 	if (hdev->asic_funcs->late_init) {
1120 		rc = hdev->asic_funcs->late_init(hdev);
1121 		if (rc) {
1122 			dev_err(hdev->dev,
1123 				"failed late initialization for the H/W\n");
1124 			return rc;
1125 		}
1126 	}
1127 
1128 	hdev->high_pll = hdev->asic_prop.high_pll;
1129 
1130 	if (hdev->heartbeat) {
1131 		/*
1132 		 * Before scheduling the heartbeat driver will check if eq event has received.
1133 		 * for the first schedule we need to set the indication as true then for the next
1134 		 * one this indication will be true only if eq event was sent by FW.
1135 		 */
1136 		hdev->eq_heartbeat_received = true;
1137 
1138 		INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
1139 
1140 		schedule_delayed_work(&hdev->work_heartbeat,
1141 				usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
1142 	}
1143 
1144 	hdev->late_init_done = true;
1145 
1146 	return 0;
1147 }
1148 
1149 /*
1150  * device_late_fini - finalize all that was done in device_late_init
1151  *
1152  * @hdev: pointer to habanalabs device structure
1153  *
1154  */
1155 static void device_late_fini(struct hl_device *hdev)
1156 {
1157 	if (!hdev->late_init_done)
1158 		return;
1159 
1160 	if (hdev->heartbeat)
1161 		cancel_delayed_work_sync(&hdev->work_heartbeat);
1162 
1163 	if (hdev->asic_funcs->late_fini)
1164 		hdev->asic_funcs->late_fini(hdev);
1165 
1166 	hdev->late_init_done = false;
1167 }
1168 
1169 int hl_device_utilization(struct hl_device *hdev, u32 *utilization)
1170 {
1171 	u64 max_power, curr_power, dc_power, dividend, divisor;
1172 	int rc;
1173 
1174 	max_power = hdev->max_power;
1175 	dc_power = hdev->asic_prop.dc_power_default;
1176 	divisor = max_power - dc_power;
1177 	if (!divisor) {
1178 		dev_warn(hdev->dev, "device utilization is not supported\n");
1179 		return -EOPNOTSUPP;
1180 	}
1181 	rc = hl_fw_cpucp_power_get(hdev, &curr_power);
1182 
1183 	if (rc)
1184 		return rc;
1185 
1186 	curr_power = clamp(curr_power, dc_power, max_power);
1187 
1188 	dividend = (curr_power - dc_power) * 100;
1189 	*utilization = (u32) div_u64(dividend, divisor);
1190 
1191 	return 0;
1192 }
1193 
1194 int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable)
1195 {
1196 	int rc = 0;
1197 
1198 	mutex_lock(&hdev->debug_lock);
1199 
1200 	if (!enable) {
1201 		if (!hdev->in_debug) {
1202 			dev_err(hdev->dev,
1203 				"Failed to disable debug mode because device was not in debug mode\n");
1204 			rc = -EFAULT;
1205 			goto out;
1206 		}
1207 
1208 		if (!hdev->reset_info.hard_reset_pending)
1209 			hdev->asic_funcs->halt_coresight(hdev, ctx);
1210 
1211 		hdev->in_debug = 0;
1212 
1213 		goto out;
1214 	}
1215 
1216 	if (hdev->in_debug) {
1217 		dev_err(hdev->dev,
1218 			"Failed to enable debug mode because device is already in debug mode\n");
1219 		rc = -EFAULT;
1220 		goto out;
1221 	}
1222 
1223 	hdev->in_debug = 1;
1224 
1225 out:
1226 	mutex_unlock(&hdev->debug_lock);
1227 
1228 	return rc;
1229 }
1230 
1231 static void take_release_locks(struct hl_device *hdev)
1232 {
1233 	/* Flush anyone that is inside the critical section of enqueue
1234 	 * jobs to the H/W
1235 	 */
1236 	hdev->asic_funcs->hw_queues_lock(hdev);
1237 	hdev->asic_funcs->hw_queues_unlock(hdev);
1238 
1239 	/* Flush processes that are sending message to CPU */
1240 	mutex_lock(&hdev->send_cpu_message_lock);
1241 	mutex_unlock(&hdev->send_cpu_message_lock);
1242 
1243 	/* Flush anyone that is inside device open */
1244 	mutex_lock(&hdev->fpriv_list_lock);
1245 	mutex_unlock(&hdev->fpriv_list_lock);
1246 	mutex_lock(&hdev->fpriv_ctrl_list_lock);
1247 	mutex_unlock(&hdev->fpriv_ctrl_list_lock);
1248 }
1249 
1250 static void hl_abort_waiting_for_completions(struct hl_device *hdev)
1251 {
1252 	hl_abort_waiting_for_cs_completions(hdev);
1253 
1254 	/* Release all pending user interrupts, each pending user interrupt
1255 	 * holds a reference to a user context.
1256 	 */
1257 	hl_release_pending_user_interrupts(hdev);
1258 }
1259 
1260 static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset,
1261 				bool skip_wq_flush)
1262 {
1263 	if (hard_reset)
1264 		device_late_fini(hdev);
1265 
1266 	/*
1267 	 * Halt the engines and disable interrupts so we won't get any more
1268 	 * completions from H/W and we won't have any accesses from the
1269 	 * H/W to the host machine
1270 	 */
1271 	hdev->asic_funcs->halt_engines(hdev, hard_reset, fw_reset);
1272 
1273 	/* Go over all the queues, release all CS and their jobs */
1274 	hl_cs_rollback_all(hdev, skip_wq_flush);
1275 
1276 	/* flush the MMU prefetch workqueue */
1277 	flush_workqueue(hdev->prefetch_wq);
1278 
1279 	hl_abort_waiting_for_completions(hdev);
1280 }
1281 
1282 /*
1283  * hl_device_suspend - initiate device suspend
1284  *
1285  * @hdev: pointer to habanalabs device structure
1286  *
1287  * Puts the hw in the suspend state (all asics).
1288  * Returns 0 for success or an error on failure.
1289  * Called at driver suspend.
1290  */
1291 int hl_device_suspend(struct hl_device *hdev)
1292 {
1293 	int rc;
1294 
1295 	pci_save_state(hdev->pdev);
1296 
1297 	/* Block future CS/VM/JOB completion operations */
1298 	spin_lock(&hdev->reset_info.lock);
1299 	if (hdev->reset_info.in_reset) {
1300 		spin_unlock(&hdev->reset_info.lock);
1301 		dev_err(hdev->dev, "Can't suspend while in reset\n");
1302 		return -EIO;
1303 	}
1304 	hdev->reset_info.in_reset = 1;
1305 	spin_unlock(&hdev->reset_info.lock);
1306 
1307 	/* This blocks all other stuff that is not blocked by in_reset */
1308 	hdev->disabled = true;
1309 
1310 	take_release_locks(hdev);
1311 
1312 	rc = hdev->asic_funcs->suspend(hdev);
1313 	if (rc)
1314 		dev_err(hdev->dev,
1315 			"Failed to disable PCI access of device CPU\n");
1316 
1317 	/* Shut down the device */
1318 	pci_disable_device(hdev->pdev);
1319 	pci_set_power_state(hdev->pdev, PCI_D3hot);
1320 
1321 	return 0;
1322 }
1323 
1324 /*
1325  * hl_device_resume - initiate device resume
1326  *
1327  * @hdev: pointer to habanalabs device structure
1328  *
1329  * Bring the hw back to operating state (all asics).
1330  * Returns 0 for success or an error on failure.
1331  * Called at driver resume.
1332  */
1333 int hl_device_resume(struct hl_device *hdev)
1334 {
1335 	int rc;
1336 
1337 	pci_set_power_state(hdev->pdev, PCI_D0);
1338 	pci_restore_state(hdev->pdev);
1339 	rc = pci_enable_device_mem(hdev->pdev);
1340 	if (rc) {
1341 		dev_err(hdev->dev,
1342 			"Failed to enable PCI device in resume\n");
1343 		return rc;
1344 	}
1345 
1346 	pci_set_master(hdev->pdev);
1347 
1348 	rc = hdev->asic_funcs->resume(hdev);
1349 	if (rc) {
1350 		dev_err(hdev->dev, "Failed to resume device after suspend\n");
1351 		goto disable_device;
1352 	}
1353 
1354 
1355 	/* 'in_reset' was set to true during suspend, now we must clear it in order
1356 	 * for hard reset to be performed
1357 	 */
1358 	spin_lock(&hdev->reset_info.lock);
1359 	hdev->reset_info.in_reset = 0;
1360 	spin_unlock(&hdev->reset_info.lock);
1361 
1362 	rc = hl_device_reset(hdev, HL_DRV_RESET_HARD);
1363 	if (rc) {
1364 		dev_err(hdev->dev, "Failed to reset device during resume\n");
1365 		goto disable_device;
1366 	}
1367 
1368 	return 0;
1369 
1370 disable_device:
1371 	pci_disable_device(hdev->pdev);
1372 
1373 	return rc;
1374 }
1375 
1376 static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool control_dev)
1377 {
1378 	struct task_struct *task = NULL;
1379 	struct list_head *hpriv_list;
1380 	struct hl_fpriv *hpriv;
1381 	struct mutex *hpriv_lock;
1382 	u32 pending_cnt;
1383 
1384 	hpriv_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
1385 	hpriv_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
1386 
1387 	/* Giving time for user to close FD, and for processes that are inside
1388 	 * hl_device_open to finish
1389 	 */
1390 	if (!list_empty(hpriv_list))
1391 		ssleep(1);
1392 
1393 	if (timeout) {
1394 		pending_cnt = timeout;
1395 	} else {
1396 		if (hdev->process_kill_trial_cnt) {
1397 			/* Processes have been already killed */
1398 			pending_cnt = 1;
1399 			goto wait_for_processes;
1400 		} else {
1401 			/* Wait a small period after process kill */
1402 			pending_cnt = HL_PENDING_RESET_PER_SEC;
1403 		}
1404 	}
1405 
1406 	mutex_lock(hpriv_lock);
1407 
1408 	/* This section must be protected because we are dereferencing
1409 	 * pointers that are freed if the process exits
1410 	 */
1411 	list_for_each_entry(hpriv, hpriv_list, dev_node) {
1412 		task = get_pid_task(hpriv->taskpid, PIDTYPE_PID);
1413 		if (task) {
1414 			dev_info(hdev->dev, "Killing user process pid=%d\n",
1415 				task_pid_nr(task));
1416 			send_sig(SIGKILL, task, 1);
1417 			usleep_range(1000, 10000);
1418 
1419 			put_task_struct(task);
1420 		} else {
1421 			dev_dbg(hdev->dev,
1422 				"Can't get task struct for user process %d, process was killed from outside the driver\n",
1423 				pid_nr(hpriv->taskpid));
1424 		}
1425 	}
1426 
1427 	mutex_unlock(hpriv_lock);
1428 
1429 	/*
1430 	 * We killed the open users, but that doesn't mean they are closed.
1431 	 * It could be that they are running a long cleanup phase in the driver
1432 	 * e.g. MMU unmappings, or running other long teardown flow even before
1433 	 * our cleanup.
1434 	 * Therefore we need to wait again to make sure they are closed before
1435 	 * continuing with the reset.
1436 	 */
1437 
1438 wait_for_processes:
1439 	while ((!list_empty(hpriv_list)) && (pending_cnt)) {
1440 		dev_dbg(hdev->dev,
1441 			"Waiting for all unmap operations to finish before hard reset\n");
1442 
1443 		pending_cnt--;
1444 
1445 		ssleep(1);
1446 	}
1447 
1448 	/* All processes exited successfully */
1449 	if (list_empty(hpriv_list))
1450 		return 0;
1451 
1452 	/* Give up waiting for processes to exit */
1453 	if (hdev->process_kill_trial_cnt == HL_PENDING_RESET_MAX_TRIALS)
1454 		return -ETIME;
1455 
1456 	hdev->process_kill_trial_cnt++;
1457 
1458 	return -EBUSY;
1459 }
1460 
1461 static void device_disable_open_processes(struct hl_device *hdev, bool control_dev)
1462 {
1463 	struct list_head *hpriv_list;
1464 	struct hl_fpriv *hpriv;
1465 	struct mutex *hpriv_lock;
1466 
1467 	hpriv_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
1468 	hpriv_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
1469 
1470 	mutex_lock(hpriv_lock);
1471 	list_for_each_entry(hpriv, hpriv_list, dev_node)
1472 		hpriv->hdev = NULL;
1473 	mutex_unlock(hpriv_lock);
1474 }
1475 
1476 static void send_disable_pci_access(struct hl_device *hdev, u32 flags)
1477 {
1478 	/* If reset is due to heartbeat, device CPU is no responsive in
1479 	 * which case no point sending PCI disable message to it.
1480 	 */
1481 	if ((flags & HL_DRV_RESET_HARD) &&
1482 			!(flags & (HL_DRV_RESET_HEARTBEAT | HL_DRV_RESET_BYPASS_REQ_TO_FW))) {
1483 		/* Disable PCI access from device F/W so he won't send
1484 		 * us additional interrupts. We disable MSI/MSI-X at
1485 		 * the halt_engines function and we can't have the F/W
1486 		 * sending us interrupts after that. We need to disable
1487 		 * the access here because if the device is marked
1488 		 * disable, the message won't be send. Also, in case
1489 		 * of heartbeat, the device CPU is marked as disable
1490 		 * so this message won't be sent
1491 		 */
1492 		if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0)) {
1493 			dev_warn(hdev->dev, "Failed to disable FW's PCI access\n");
1494 			return;
1495 		}
1496 
1497 		/* verify that last EQs are handled before disabled is set */
1498 		if (hdev->cpu_queues_enable)
1499 			synchronize_irq(pci_irq_vector(hdev->pdev,
1500 					hdev->asic_prop.eq_interrupt_id));
1501 	}
1502 }
1503 
1504 static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
1505 {
1506 	u32 cur_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
1507 
1508 	/* No consecutive mechanism when user context exists */
1509 	if (hdev->is_compute_ctx_active)
1510 		return;
1511 
1512 	/*
1513 	 * 'reset cause' is being updated here, because getting here
1514 	 * means that it's the 1st time and the last time we're here
1515 	 * ('in_reset' makes sure of it). This makes sure that
1516 	 * 'reset_cause' will continue holding its 1st recorded reason!
1517 	 */
1518 	if (flags & HL_DRV_RESET_HEARTBEAT) {
1519 		hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT;
1520 		cur_reset_trigger = HL_DRV_RESET_HEARTBEAT;
1521 	} else if (flags & HL_DRV_RESET_TDR) {
1522 		hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_TDR;
1523 		cur_reset_trigger = HL_DRV_RESET_TDR;
1524 	} else if (flags & HL_DRV_RESET_FW_FATAL_ERR) {
1525 		hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
1526 		cur_reset_trigger = HL_DRV_RESET_FW_FATAL_ERR;
1527 	} else {
1528 		hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
1529 	}
1530 
1531 	/*
1532 	 * If reset cause is same twice, then reset_trigger_repeated
1533 	 * is set and if this reset is due to a fatal FW error
1534 	 * device is set to an unstable state.
1535 	 */
1536 	if (hdev->reset_info.prev_reset_trigger != cur_reset_trigger) {
1537 		hdev->reset_info.prev_reset_trigger = cur_reset_trigger;
1538 		hdev->reset_info.reset_trigger_repeated = 0;
1539 	} else {
1540 		hdev->reset_info.reset_trigger_repeated = 1;
1541 	}
1542 }
1543 
1544 /*
1545  * hl_device_reset - reset the device
1546  *
1547  * @hdev: pointer to habanalabs device structure
1548  * @flags: reset flags.
1549  *
1550  * Block future CS and wait for pending CS to be enqueued
1551  * Call ASIC H/W fini
1552  * Flush all completions
1553  * Re-initialize all internal data structures
1554  * Call ASIC H/W init, late_init
1555  * Test queues
1556  * Enable device
1557  *
1558  * Returns 0 for success or an error on failure.
1559  */
1560 int hl_device_reset(struct hl_device *hdev, u32 flags)
1561 {
1562 	bool hard_reset, from_hard_reset_thread, fw_reset, reset_upon_device_release,
1563 		schedule_hard_reset = false, delay_reset, from_dev_release, from_watchdog_thread;
1564 	u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
1565 	struct hl_ctx *ctx;
1566 	int i, rc, hw_fini_rc;
1567 
1568 	if (!hdev->init_done) {
1569 		dev_err(hdev->dev, "Can't reset before initialization is done\n");
1570 		return 0;
1571 	}
1572 
1573 	hard_reset = !!(flags & HL_DRV_RESET_HARD);
1574 	from_hard_reset_thread = !!(flags & HL_DRV_RESET_FROM_RESET_THR);
1575 	fw_reset = !!(flags & HL_DRV_RESET_BYPASS_REQ_TO_FW);
1576 	from_dev_release = !!(flags & HL_DRV_RESET_DEV_RELEASE);
1577 	delay_reset = !!(flags & HL_DRV_RESET_DELAY);
1578 	from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR);
1579 	reset_upon_device_release = hdev->reset_upon_device_release && from_dev_release;
1580 
1581 	if (!hard_reset && (hl_device_status(hdev) == HL_DEVICE_STATUS_MALFUNCTION)) {
1582 		dev_dbg(hdev->dev, "soft-reset isn't supported on a malfunctioning device\n");
1583 		return 0;
1584 	}
1585 
1586 	if (!hard_reset && !hdev->asic_prop.supports_compute_reset) {
1587 		dev_dbg(hdev->dev, "asic doesn't support compute reset - do hard-reset instead\n");
1588 		hard_reset = true;
1589 	}
1590 
1591 	if (reset_upon_device_release) {
1592 		if (hard_reset) {
1593 			dev_crit(hdev->dev,
1594 				"Aborting reset because hard-reset is mutually exclusive with reset-on-device-release\n");
1595 			return -EINVAL;
1596 		}
1597 
1598 		goto do_reset;
1599 	}
1600 
1601 	if (!hard_reset && !hdev->asic_prop.allow_inference_soft_reset) {
1602 		dev_dbg(hdev->dev,
1603 			"asic doesn't allow inference soft reset - do hard-reset instead\n");
1604 		hard_reset = true;
1605 	}
1606 
1607 do_reset:
1608 	/* Re-entry of reset thread */
1609 	if (from_hard_reset_thread && hdev->process_kill_trial_cnt)
1610 		goto kill_processes;
1611 
1612 	/*
1613 	 * Prevent concurrency in this function - only one reset should be
1614 	 * done at any given time. We need to perform this only if we didn't
1615 	 * get here from a dedicated hard reset thread.
1616 	 */
1617 	if (!from_hard_reset_thread) {
1618 		/* Block future CS/VM/JOB completion operations */
1619 		spin_lock(&hdev->reset_info.lock);
1620 		if (hdev->reset_info.in_reset) {
1621 			/* We allow scheduling of a hard reset only during a compute reset */
1622 			if (hard_reset && hdev->reset_info.in_compute_reset)
1623 				hdev->reset_info.hard_reset_schedule_flags = flags;
1624 			spin_unlock(&hdev->reset_info.lock);
1625 			return 0;
1626 		}
1627 
1628 		/* This still allows the completion of some KDMA ops
1629 		 * Update this before in_reset because in_compute_reset implies we are in reset
1630 		 */
1631 		hdev->reset_info.in_compute_reset = !hard_reset;
1632 
1633 		hdev->reset_info.in_reset = 1;
1634 
1635 		spin_unlock(&hdev->reset_info.lock);
1636 
1637 		/* Cancel the device release watchdog work if required.
1638 		 * In case of reset-upon-device-release while the release watchdog work is
1639 		 * scheduled due to a hard-reset, do hard-reset instead of compute-reset.
1640 		 */
1641 		if ((hard_reset || from_dev_release) && hdev->reset_info.watchdog_active) {
1642 			struct hl_device_reset_work *watchdog_work =
1643 					&hdev->device_release_watchdog_work;
1644 
1645 			hdev->reset_info.watchdog_active = 0;
1646 			if (!from_watchdog_thread)
1647 				cancel_delayed_work_sync(&watchdog_work->reset_work);
1648 
1649 			if (from_dev_release && (watchdog_work->flags & HL_DRV_RESET_HARD)) {
1650 				hdev->reset_info.in_compute_reset = 0;
1651 				flags |= HL_DRV_RESET_HARD;
1652 				flags &= ~HL_DRV_RESET_DEV_RELEASE;
1653 				hard_reset = true;
1654 			}
1655 		}
1656 
1657 		if (delay_reset)
1658 			usleep_range(HL_RESET_DELAY_USEC, HL_RESET_DELAY_USEC << 1);
1659 
1660 escalate_reset_flow:
1661 		handle_reset_trigger(hdev, flags);
1662 		send_disable_pci_access(hdev, flags);
1663 
1664 		/* This also blocks future CS/VM/JOB completion operations */
1665 		hdev->disabled = true;
1666 
1667 		take_release_locks(hdev);
1668 
1669 		if (hard_reset)
1670 			dev_info(hdev->dev, "Going to reset device\n");
1671 		else if (reset_upon_device_release)
1672 			dev_dbg(hdev->dev, "Going to reset device after release by user\n");
1673 		else
1674 			dev_dbg(hdev->dev, "Going to reset engines of inference device\n");
1675 	}
1676 
1677 	if ((hard_reset) && (!from_hard_reset_thread)) {
1678 		hdev->reset_info.hard_reset_pending = true;
1679 
1680 		hdev->process_kill_trial_cnt = 0;
1681 
1682 		hdev->device_reset_work.flags = flags;
1683 
1684 		/*
1685 		 * Because the reset function can't run from heartbeat work,
1686 		 * we need to call the reset function from a dedicated work.
1687 		 */
1688 		queue_delayed_work(hdev->reset_wq, &hdev->device_reset_work.reset_work, 0);
1689 
1690 		return 0;
1691 	}
1692 
1693 	cleanup_resources(hdev, hard_reset, fw_reset, from_dev_release);
1694 
1695 kill_processes:
1696 	if (hard_reset) {
1697 		/* Kill processes here after CS rollback. This is because the
1698 		 * process can't really exit until all its CSs are done, which
1699 		 * is what we do in cs rollback
1700 		 */
1701 		rc = device_kill_open_processes(hdev, 0, false);
1702 
1703 		if (rc == -EBUSY) {
1704 			if (hdev->device_fini_pending) {
1705 				dev_crit(hdev->dev,
1706 					"%s Failed to kill all open processes, stopping hard reset\n",
1707 					dev_name(&(hdev)->pdev->dev));
1708 				goto out_err;
1709 			}
1710 
1711 			/* signal reset thread to reschedule */
1712 			return rc;
1713 		}
1714 
1715 		if (rc) {
1716 			dev_crit(hdev->dev,
1717 				"%s Failed to kill all open processes, stopping hard reset\n",
1718 				dev_name(&(hdev)->pdev->dev));
1719 			goto out_err;
1720 		}
1721 
1722 		/* Flush the Event queue workers to make sure no other thread is
1723 		 * reading or writing to registers during the reset
1724 		 */
1725 		flush_workqueue(hdev->eq_wq);
1726 	}
1727 
1728 	/* Reset the H/W. It will be in idle state after this returns */
1729 	hw_fini_rc = hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset);
1730 
1731 	if (hard_reset) {
1732 		hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
1733 
1734 		/* Release kernel context */
1735 		if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
1736 			hdev->kernel_ctx = NULL;
1737 
1738 		hl_vm_fini(hdev);
1739 		hl_mmu_fini(hdev);
1740 		hl_eq_reset(hdev, &hdev->event_queue);
1741 	}
1742 
1743 	/* Re-initialize PI,CI to 0 in all queues (hw queue, cq) */
1744 	hl_hw_queue_reset(hdev, hard_reset);
1745 	for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1746 		hl_cq_reset(hdev, &hdev->completion_queue[i]);
1747 
1748 	/* Make sure the context switch phase will run again */
1749 	ctx = hl_get_compute_ctx(hdev);
1750 	if (ctx) {
1751 		atomic_set(&ctx->thread_ctx_switch_token, 1);
1752 		ctx->thread_ctx_switch_wait_token = 0;
1753 		hl_ctx_put(ctx);
1754 	}
1755 
1756 	if (hw_fini_rc) {
1757 		rc = hw_fini_rc;
1758 		goto out_err;
1759 	}
1760 	/* Finished tear-down, starting to re-initialize */
1761 
1762 	if (hard_reset) {
1763 		hdev->device_cpu_disabled = false;
1764 		hdev->reset_info.hard_reset_pending = false;
1765 
1766 		if (hdev->reset_info.reset_trigger_repeated &&
1767 				(hdev->reset_info.prev_reset_trigger ==
1768 						HL_DRV_RESET_FW_FATAL_ERR)) {
1769 			/* if there 2 back to back resets from FW,
1770 			 * ensure driver puts the driver in a unusable state
1771 			 */
1772 			dev_crit(hdev->dev,
1773 				"%s Consecutive FW fatal errors received, stopping hard reset\n",
1774 				dev_name(&(hdev)->pdev->dev));
1775 			rc = -EIO;
1776 			goto out_err;
1777 		}
1778 
1779 		if (hdev->kernel_ctx) {
1780 			dev_crit(hdev->dev,
1781 				"%s kernel ctx was alive during hard reset, something is terribly wrong\n",
1782 				dev_name(&(hdev)->pdev->dev));
1783 			rc = -EBUSY;
1784 			goto out_err;
1785 		}
1786 
1787 		rc = hl_mmu_init(hdev);
1788 		if (rc) {
1789 			dev_err(hdev->dev,
1790 				"Failed to initialize MMU S/W after hard reset\n");
1791 			goto out_err;
1792 		}
1793 
1794 		/* Allocate the kernel context */
1795 		hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx),
1796 						GFP_KERNEL);
1797 		if (!hdev->kernel_ctx) {
1798 			rc = -ENOMEM;
1799 			hl_mmu_fini(hdev);
1800 			goto out_err;
1801 		}
1802 
1803 		hdev->is_compute_ctx_active = false;
1804 
1805 		rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
1806 		if (rc) {
1807 			dev_err(hdev->dev,
1808 				"failed to init kernel ctx in hard reset\n");
1809 			kfree(hdev->kernel_ctx);
1810 			hdev->kernel_ctx = NULL;
1811 			hl_mmu_fini(hdev);
1812 			goto out_err;
1813 		}
1814 	}
1815 
1816 	/* Device is now enabled as part of the initialization requires
1817 	 * communication with the device firmware to get information that
1818 	 * is required for the initialization itself
1819 	 */
1820 	hdev->disabled = false;
1821 
1822 	/* F/W security enabled indication might be updated after hard-reset */
1823 	if (hard_reset) {
1824 		rc = hl_fw_read_preboot_status(hdev);
1825 		if (rc)
1826 			goto out_err;
1827 	}
1828 
1829 	rc = hdev->asic_funcs->hw_init(hdev);
1830 	if (rc) {
1831 		dev_err(hdev->dev, "failed to initialize the H/W after reset\n");
1832 		goto out_err;
1833 	}
1834 
1835 	/* If device is not idle fail the reset process */
1836 	if (!hdev->asic_funcs->is_device_idle(hdev, idle_mask,
1837 						HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) {
1838 		print_idle_status_mask(hdev, "device is not idle after reset", idle_mask);
1839 		rc = -EIO;
1840 		goto out_err;
1841 	}
1842 
1843 	/* Check that the communication with the device is working */
1844 	rc = hdev->asic_funcs->test_queues(hdev);
1845 	if (rc) {
1846 		dev_err(hdev->dev, "Failed to detect if device is alive after reset\n");
1847 		goto out_err;
1848 	}
1849 
1850 	if (hard_reset) {
1851 		rc = device_late_init(hdev);
1852 		if (rc) {
1853 			dev_err(hdev->dev, "Failed late init after hard reset\n");
1854 			goto out_err;
1855 		}
1856 
1857 		rc = hl_vm_init(hdev);
1858 		if (rc) {
1859 			dev_err(hdev->dev, "Failed to init memory module after hard reset\n");
1860 			goto out_err;
1861 		}
1862 
1863 		if (!hdev->asic_prop.fw_security_enabled)
1864 			hl_fw_set_max_power(hdev);
1865 	} else {
1866 		rc = hdev->asic_funcs->compute_reset_late_init(hdev);
1867 		if (rc) {
1868 			if (reset_upon_device_release)
1869 				dev_err(hdev->dev,
1870 					"Failed late init in reset after device release\n");
1871 			else
1872 				dev_err(hdev->dev, "Failed late init after compute reset\n");
1873 			goto out_err;
1874 		}
1875 	}
1876 
1877 	rc = hdev->asic_funcs->scrub_device_mem(hdev);
1878 	if (rc) {
1879 		dev_err(hdev->dev, "scrub mem failed from device reset (%d)\n", rc);
1880 		goto out_err;
1881 	}
1882 
1883 	spin_lock(&hdev->reset_info.lock);
1884 	hdev->reset_info.in_compute_reset = 0;
1885 
1886 	/* Schedule hard reset only if requested and if not already in hard reset.
1887 	 * We keep 'in_reset' enabled, so no other reset can go in during the hard
1888 	 * reset schedule
1889 	 */
1890 	if (!hard_reset && hdev->reset_info.hard_reset_schedule_flags)
1891 		schedule_hard_reset = true;
1892 	else
1893 		hdev->reset_info.in_reset = 0;
1894 
1895 	spin_unlock(&hdev->reset_info.lock);
1896 
1897 	hdev->reset_info.needs_reset = false;
1898 
1899 	if (hard_reset)
1900 		dev_info(hdev->dev,
1901 			 "Successfully finished resetting the %s device\n",
1902 			 dev_name(&(hdev)->pdev->dev));
1903 	else
1904 		dev_dbg(hdev->dev,
1905 			"Successfully finished resetting the %s device\n",
1906 			dev_name(&(hdev)->pdev->dev));
1907 
1908 	if (hard_reset) {
1909 		hdev->reset_info.hard_reset_cnt++;
1910 
1911 		/* After reset is done, we are ready to receive events from
1912 		 * the F/W. We can't do it before because we will ignore events
1913 		 * and if those events are fatal, we won't know about it and
1914 		 * the device will be operational although it shouldn't be
1915 		 */
1916 		hdev->asic_funcs->enable_events_from_fw(hdev);
1917 	} else {
1918 		if (!reset_upon_device_release)
1919 			hdev->reset_info.compute_reset_cnt++;
1920 
1921 		if (schedule_hard_reset) {
1922 			dev_info(hdev->dev, "Performing hard reset scheduled during compute reset\n");
1923 			flags = hdev->reset_info.hard_reset_schedule_flags;
1924 			hdev->reset_info.hard_reset_schedule_flags = 0;
1925 			hard_reset = true;
1926 			goto escalate_reset_flow;
1927 		}
1928 	}
1929 
1930 	return 0;
1931 
1932 out_err:
1933 	hdev->disabled = true;
1934 
1935 	spin_lock(&hdev->reset_info.lock);
1936 	hdev->reset_info.in_compute_reset = 0;
1937 
1938 	if (hard_reset) {
1939 		dev_err(hdev->dev,
1940 			"%s Failed to reset! Device is NOT usable\n",
1941 			dev_name(&(hdev)->pdev->dev));
1942 		hdev->reset_info.hard_reset_cnt++;
1943 	} else {
1944 		if (reset_upon_device_release) {
1945 			dev_err(hdev->dev, "Failed to reset device after user release\n");
1946 			flags &= ~HL_DRV_RESET_DEV_RELEASE;
1947 		} else {
1948 			dev_err(hdev->dev, "Failed to do compute reset\n");
1949 			hdev->reset_info.compute_reset_cnt++;
1950 		}
1951 
1952 		spin_unlock(&hdev->reset_info.lock);
1953 		flags |= HL_DRV_RESET_HARD;
1954 		hard_reset = true;
1955 		goto escalate_reset_flow;
1956 	}
1957 
1958 	hdev->reset_info.in_reset = 0;
1959 
1960 	spin_unlock(&hdev->reset_info.lock);
1961 
1962 	return rc;
1963 }
1964 
1965 /*
1966  * hl_device_cond_reset() - conditionally reset the device.
1967  * @hdev: pointer to habanalabs device structure.
1968  * @reset_flags: reset flags.
1969  * @event_mask: events to notify user about.
1970  *
1971  * Conditionally reset the device, or alternatively schedule a watchdog work to reset the device
1972  * unless another reset precedes it.
1973  */
1974 int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask)
1975 {
1976 	struct hl_ctx *ctx = NULL;
1977 
1978 	/* F/W reset cannot be postponed */
1979 	if (flags & HL_DRV_RESET_BYPASS_REQ_TO_FW)
1980 		goto device_reset;
1981 
1982 	/* Device release watchdog is relevant only if user exists and gets a reset notification */
1983 	if (!(event_mask & HL_NOTIFIER_EVENT_DEVICE_RESET)) {
1984 		dev_err(hdev->dev, "Resetting device without a reset indication to user\n");
1985 		goto device_reset;
1986 	}
1987 
1988 	ctx = hl_get_compute_ctx(hdev);
1989 	if (!ctx)
1990 		goto device_reset;
1991 
1992 	/*
1993 	 * There is no point in postponing the reset if user is not registered for events.
1994 	 * However if no eventfd_ctx exists but the device release watchdog is already scheduled, it
1995 	 * just implies that user has unregistered as part of handling a previous event. In this
1996 	 * case an immediate reset is not required.
1997 	 */
1998 	if (!ctx->hpriv->notifier_event.eventfd && !hdev->reset_info.watchdog_active)
1999 		goto device_reset;
2000 
2001 	/* Schedule the device release watchdog work unless reset is already in progress or if the
2002 	 * work is already scheduled.
2003 	 */
2004 	spin_lock(&hdev->reset_info.lock);
2005 	if (hdev->reset_info.in_reset) {
2006 		spin_unlock(&hdev->reset_info.lock);
2007 		goto device_reset;
2008 	}
2009 
2010 	if (hdev->reset_info.watchdog_active) {
2011 		hdev->device_release_watchdog_work.flags |= flags;
2012 		goto out;
2013 	}
2014 
2015 	hdev->device_release_watchdog_work.flags = flags;
2016 	dev_dbg(hdev->dev, "Device is going to be hard-reset in %u sec unless being released\n",
2017 		hdev->device_release_watchdog_timeout_sec);
2018 	schedule_delayed_work(&hdev->device_release_watchdog_work.reset_work,
2019 				msecs_to_jiffies(hdev->device_release_watchdog_timeout_sec * 1000));
2020 	hdev->reset_info.watchdog_active = 1;
2021 out:
2022 	spin_unlock(&hdev->reset_info.lock);
2023 
2024 	hl_notifier_event_send_all(hdev, event_mask);
2025 
2026 	hl_ctx_put(ctx);
2027 
2028 	hl_abort_waiting_for_completions(hdev);
2029 
2030 	return 0;
2031 
2032 device_reset:
2033 	if (event_mask)
2034 		hl_notifier_event_send_all(hdev, event_mask);
2035 	if (ctx)
2036 		hl_ctx_put(ctx);
2037 
2038 	return hl_device_reset(hdev, flags);
2039 }
2040 
2041 static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event_mask)
2042 {
2043 	mutex_lock(&notifier_event->lock);
2044 	notifier_event->events_mask |= event_mask;
2045 
2046 	if (notifier_event->eventfd)
2047 		eventfd_signal(notifier_event->eventfd, 1);
2048 
2049 	mutex_unlock(&notifier_event->lock);
2050 }
2051 
2052 /*
2053  * hl_notifier_event_send_all - notify all user processes via eventfd
2054  *
2055  * @hdev: pointer to habanalabs device structure
2056  * @event_mask: the occurred event/s
2057  * Returns 0 for success or an error on failure.
2058  */
2059 void hl_notifier_event_send_all(struct hl_device *hdev, u64 event_mask)
2060 {
2061 	struct hl_fpriv	*hpriv;
2062 
2063 	if (!event_mask) {
2064 		dev_warn(hdev->dev, "Skip sending zero event");
2065 		return;
2066 	}
2067 
2068 	mutex_lock(&hdev->fpriv_list_lock);
2069 
2070 	list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node)
2071 		hl_notifier_event_send(&hpriv->notifier_event, event_mask);
2072 
2073 	mutex_unlock(&hdev->fpriv_list_lock);
2074 }
2075 
2076 /*
2077  * hl_device_init - main initialization function for habanalabs device
2078  *
2079  * @hdev: pointer to habanalabs device structure
2080  *
2081  * Allocate an id for the device, do early initialization and then call the
2082  * ASIC specific initialization functions. Finally, create the cdev and the
2083  * Linux device to expose it to the user
2084  */
2085 int hl_device_init(struct hl_device *hdev)
2086 {
2087 	int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt;
2088 	struct hl_ts_free_jobs *free_jobs_data;
2089 	bool expose_interfaces_on_err = false;
2090 	void *p;
2091 
2092 	/* Initialize ASIC function pointers and perform early init */
2093 	rc = device_early_init(hdev);
2094 	if (rc)
2095 		goto out_disabled;
2096 
2097 	user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count +
2098 				hdev->asic_prop.user_interrupt_count;
2099 
2100 	if (user_interrupt_cnt) {
2101 		hdev->user_interrupt = kcalloc(user_interrupt_cnt, sizeof(*hdev->user_interrupt),
2102 						GFP_KERNEL);
2103 		if (!hdev->user_interrupt) {
2104 			rc = -ENOMEM;
2105 			goto early_fini;
2106 		}
2107 
2108 		/* Timestamp records supported only if CQ supported in device */
2109 		if (hdev->asic_prop.first_available_cq[0] != USHRT_MAX) {
2110 			for (i = 0 ; i < user_interrupt_cnt ; i++) {
2111 				p = vzalloc(TIMESTAMP_FREE_NODES_NUM *
2112 						sizeof(struct timestamp_reg_free_node));
2113 				if (!p) {
2114 					rc = -ENOMEM;
2115 					goto free_usr_intr_mem;
2116 				}
2117 				free_jobs_data = &hdev->user_interrupt[i].ts_free_jobs_data;
2118 				free_jobs_data->free_nodes_pool = p;
2119 				free_jobs_data->free_nodes_length = TIMESTAMP_FREE_NODES_NUM;
2120 				free_jobs_data->next_avail_free_node_idx = 0;
2121 			}
2122 		}
2123 	}
2124 
2125 	free_jobs_data = &hdev->common_user_cq_interrupt.ts_free_jobs_data;
2126 	p = vzalloc(TIMESTAMP_FREE_NODES_NUM *
2127 				sizeof(struct timestamp_reg_free_node));
2128 	if (!p) {
2129 		rc = -ENOMEM;
2130 		goto free_usr_intr_mem;
2131 	}
2132 
2133 	free_jobs_data->free_nodes_pool = p;
2134 	free_jobs_data->free_nodes_length = TIMESTAMP_FREE_NODES_NUM;
2135 	free_jobs_data->next_avail_free_node_idx = 0;
2136 
2137 	/*
2138 	 * Start calling ASIC initialization. First S/W then H/W and finally
2139 	 * late init
2140 	 */
2141 	rc = hdev->asic_funcs->sw_init(hdev);
2142 	if (rc)
2143 		goto free_common_usr_intr_mem;
2144 
2145 
2146 	/* initialize completion structure for multi CS wait */
2147 	hl_multi_cs_completion_init(hdev);
2148 
2149 	/*
2150 	 * Initialize the H/W queues. Must be done before hw_init, because
2151 	 * there the addresses of the kernel queue are being written to the
2152 	 * registers of the device
2153 	 */
2154 	rc = hl_hw_queues_create(hdev);
2155 	if (rc) {
2156 		dev_err(hdev->dev, "failed to initialize kernel queues\n");
2157 		goto sw_fini;
2158 	}
2159 
2160 	cq_cnt = hdev->asic_prop.completion_queues_count;
2161 
2162 	/*
2163 	 * Initialize the completion queues. Must be done before hw_init,
2164 	 * because there the addresses of the completion queues are being
2165 	 * passed as arguments to request_irq
2166 	 */
2167 	if (cq_cnt) {
2168 		hdev->completion_queue = kcalloc(cq_cnt,
2169 				sizeof(*hdev->completion_queue),
2170 				GFP_KERNEL);
2171 
2172 		if (!hdev->completion_queue) {
2173 			dev_err(hdev->dev,
2174 				"failed to allocate completion queues\n");
2175 			rc = -ENOMEM;
2176 			goto hw_queues_destroy;
2177 		}
2178 	}
2179 
2180 	for (i = 0, cq_ready_cnt = 0 ; i < cq_cnt ; i++, cq_ready_cnt++) {
2181 		rc = hl_cq_init(hdev, &hdev->completion_queue[i],
2182 				hdev->asic_funcs->get_queue_id_for_cq(hdev, i));
2183 		if (rc) {
2184 			dev_err(hdev->dev,
2185 				"failed to initialize completion queue\n");
2186 			goto cq_fini;
2187 		}
2188 		hdev->completion_queue[i].cq_idx = i;
2189 	}
2190 
2191 	hdev->shadow_cs_queue = kcalloc(hdev->asic_prop.max_pending_cs,
2192 					sizeof(struct hl_cs *), GFP_KERNEL);
2193 	if (!hdev->shadow_cs_queue) {
2194 		rc = -ENOMEM;
2195 		goto cq_fini;
2196 	}
2197 
2198 	/*
2199 	 * Initialize the event queue. Must be done before hw_init,
2200 	 * because there the address of the event queue is being
2201 	 * passed as argument to request_irq
2202 	 */
2203 	rc = hl_eq_init(hdev, &hdev->event_queue);
2204 	if (rc) {
2205 		dev_err(hdev->dev, "failed to initialize event queue\n");
2206 		goto free_shadow_cs_queue;
2207 	}
2208 
2209 	/* MMU S/W must be initialized before kernel context is created */
2210 	rc = hl_mmu_init(hdev);
2211 	if (rc) {
2212 		dev_err(hdev->dev, "Failed to initialize MMU S/W structures\n");
2213 		goto eq_fini;
2214 	}
2215 
2216 	/* Allocate the kernel context */
2217 	hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), GFP_KERNEL);
2218 	if (!hdev->kernel_ctx) {
2219 		rc = -ENOMEM;
2220 		goto mmu_fini;
2221 	}
2222 
2223 	hdev->is_compute_ctx_active = false;
2224 
2225 	hdev->asic_funcs->state_dump_init(hdev);
2226 
2227 	hdev->device_release_watchdog_timeout_sec = HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC;
2228 
2229 	hdev->memory_scrub_val = MEM_SCRUB_DEFAULT_VAL;
2230 
2231 	rc = hl_debugfs_device_init(hdev);
2232 	if (rc) {
2233 		dev_err(hdev->dev, "failed to initialize debugfs entry structure\n");
2234 		kfree(hdev->kernel_ctx);
2235 		goto mmu_fini;
2236 	}
2237 
2238 	/* The debugfs entry structure is accessed in hl_ctx_init(), so it must be called after
2239 	 * hl_debugfs_device_init().
2240 	 */
2241 	rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
2242 	if (rc) {
2243 		dev_err(hdev->dev, "failed to initialize kernel context\n");
2244 		kfree(hdev->kernel_ctx);
2245 		goto debugfs_device_fini;
2246 	}
2247 
2248 	rc = hl_cb_pool_init(hdev);
2249 	if (rc) {
2250 		dev_err(hdev->dev, "failed to initialize CB pool\n");
2251 		goto release_ctx;
2252 	}
2253 
2254 	rc = hl_dec_init(hdev);
2255 	if (rc) {
2256 		dev_err(hdev->dev, "Failed to initialize the decoder module\n");
2257 		goto cb_pool_fini;
2258 	}
2259 
2260 	/*
2261 	 * From this point, override rc (=0) in case of an error to allow debugging
2262 	 * (by adding char devices and creating sysfs/debugfs files as part of the error flow).
2263 	 */
2264 	expose_interfaces_on_err = true;
2265 
2266 	/* Device is now enabled as part of the initialization requires
2267 	 * communication with the device firmware to get information that
2268 	 * is required for the initialization itself
2269 	 */
2270 	hdev->disabled = false;
2271 
2272 	rc = hdev->asic_funcs->hw_init(hdev);
2273 	if (rc) {
2274 		dev_err(hdev->dev, "failed to initialize the H/W\n");
2275 		rc = 0;
2276 		goto out_disabled;
2277 	}
2278 
2279 	/* Check that the communication with the device is working */
2280 	rc = hdev->asic_funcs->test_queues(hdev);
2281 	if (rc) {
2282 		dev_err(hdev->dev, "Failed to detect if device is alive\n");
2283 		rc = 0;
2284 		goto out_disabled;
2285 	}
2286 
2287 	rc = device_late_init(hdev);
2288 	if (rc) {
2289 		dev_err(hdev->dev, "Failed late initialization\n");
2290 		rc = 0;
2291 		goto out_disabled;
2292 	}
2293 
2294 	dev_info(hdev->dev, "Found %s device with %lluGB DRAM\n",
2295 		hdev->asic_name,
2296 		hdev->asic_prop.dram_size / SZ_1G);
2297 
2298 	rc = hl_vm_init(hdev);
2299 	if (rc) {
2300 		dev_err(hdev->dev, "Failed to initialize memory module\n");
2301 		rc = 0;
2302 		goto out_disabled;
2303 	}
2304 
2305 	/*
2306 	 * Expose devices and sysfs/debugfs files to user.
2307 	 * From here there is no need to expose them in case of an error.
2308 	 */
2309 	expose_interfaces_on_err = false;
2310 
2311 	rc = drm_dev_register(&hdev->drm, 0);
2312 	if (rc) {
2313 		dev_err(hdev->dev, "Failed to register DRM device, rc %d\n", rc);
2314 		rc = 0;
2315 		goto out_disabled;
2316 	}
2317 
2318 	rc = cdev_sysfs_debugfs_add(hdev);
2319 	if (rc) {
2320 		dev_err(hdev->dev, "Failed to add char devices and sysfs/debugfs files\n");
2321 		rc = 0;
2322 		goto out_disabled;
2323 	}
2324 
2325 	/* Need to call this again because the max power might change,
2326 	 * depending on card type for certain ASICs
2327 	 */
2328 	if (hdev->asic_prop.set_max_power_on_device_init &&
2329 			!hdev->asic_prop.fw_security_enabled)
2330 		hl_fw_set_max_power(hdev);
2331 
2332 	/*
2333 	 * hl_hwmon_init() must be called after device_late_init(), because only
2334 	 * there we get the information from the device about which
2335 	 * hwmon-related sensors the device supports.
2336 	 * Furthermore, it must be done after adding the device to the system.
2337 	 */
2338 	rc = hl_hwmon_init(hdev);
2339 	if (rc) {
2340 		dev_err(hdev->dev, "Failed to initialize hwmon\n");
2341 		rc = 0;
2342 		goto out_disabled;
2343 	}
2344 
2345 	dev_notice(hdev->dev,
2346 		"Successfully added device %s to habanalabs driver\n",
2347 		dev_name(&(hdev)->pdev->dev));
2348 
2349 	/* After initialization is done, we are ready to receive events from
2350 	 * the F/W. We can't do it before because we will ignore events and if
2351 	 * those events are fatal, we won't know about it and the device will
2352 	 * be operational although it shouldn't be
2353 	 */
2354 	hdev->asic_funcs->enable_events_from_fw(hdev);
2355 
2356 	hdev->init_done = true;
2357 
2358 	return 0;
2359 
2360 cb_pool_fini:
2361 	hl_cb_pool_fini(hdev);
2362 release_ctx:
2363 	if (hl_ctx_put(hdev->kernel_ctx) != 1)
2364 		dev_err(hdev->dev,
2365 			"kernel ctx is still alive on initialization failure\n");
2366 debugfs_device_fini:
2367 	hl_debugfs_device_fini(hdev);
2368 mmu_fini:
2369 	hl_mmu_fini(hdev);
2370 eq_fini:
2371 	hl_eq_fini(hdev, &hdev->event_queue);
2372 free_shadow_cs_queue:
2373 	kfree(hdev->shadow_cs_queue);
2374 cq_fini:
2375 	for (i = 0 ; i < cq_ready_cnt ; i++)
2376 		hl_cq_fini(hdev, &hdev->completion_queue[i]);
2377 	kfree(hdev->completion_queue);
2378 hw_queues_destroy:
2379 	hl_hw_queues_destroy(hdev);
2380 sw_fini:
2381 	hdev->asic_funcs->sw_fini(hdev);
2382 free_common_usr_intr_mem:
2383 	vfree(hdev->common_user_cq_interrupt.ts_free_jobs_data.free_nodes_pool);
2384 free_usr_intr_mem:
2385 	if (user_interrupt_cnt) {
2386 		for (i = 0 ; i < user_interrupt_cnt ; i++) {
2387 			if (!hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool)
2388 				break;
2389 			vfree(hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool);
2390 		}
2391 		kfree(hdev->user_interrupt);
2392 	}
2393 early_fini:
2394 	device_early_fini(hdev);
2395 out_disabled:
2396 	hdev->disabled = true;
2397 	if (expose_interfaces_on_err) {
2398 		drm_dev_register(&hdev->drm, 0);
2399 		cdev_sysfs_debugfs_add(hdev);
2400 	}
2401 
2402 	pr_err("Failed to initialize accel%d. Device %s is NOT usable!\n",
2403 		hdev->cdev_idx, dev_name(&hdev->pdev->dev));
2404 
2405 	return rc;
2406 }
2407 
2408 /*
2409  * hl_device_fini - main tear-down function for habanalabs device
2410  *
2411  * @hdev: pointer to habanalabs device structure
2412  *
2413  * Destroy the device, call ASIC fini functions and release the id
2414  */
2415 void hl_device_fini(struct hl_device *hdev)
2416 {
2417 	u32 user_interrupt_cnt;
2418 	bool device_in_reset;
2419 	ktime_t timeout;
2420 	u64 reset_sec;
2421 	int i, rc;
2422 
2423 	dev_info(hdev->dev, "Removing device %s\n", dev_name(&(hdev)->pdev->dev));
2424 
2425 	hdev->device_fini_pending = 1;
2426 	flush_delayed_work(&hdev->device_reset_work.reset_work);
2427 
2428 	if (hdev->pldm)
2429 		reset_sec = HL_PLDM_HARD_RESET_MAX_TIMEOUT;
2430 	else
2431 		reset_sec = HL_HARD_RESET_MAX_TIMEOUT;
2432 
2433 	/*
2434 	 * This function is competing with the reset function, so try to
2435 	 * take the reset atomic and if we are already in middle of reset,
2436 	 * wait until reset function is finished. Reset function is designed
2437 	 * to always finish. However, in Gaudi, because of all the network
2438 	 * ports, the hard reset could take between 10-30 seconds
2439 	 */
2440 
2441 	timeout = ktime_add_us(ktime_get(), reset_sec * 1000 * 1000);
2442 
2443 	spin_lock(&hdev->reset_info.lock);
2444 	device_in_reset = !!hdev->reset_info.in_reset;
2445 	if (!device_in_reset)
2446 		hdev->reset_info.in_reset = 1;
2447 	spin_unlock(&hdev->reset_info.lock);
2448 
2449 	while (device_in_reset) {
2450 		usleep_range(50, 200);
2451 
2452 		spin_lock(&hdev->reset_info.lock);
2453 		device_in_reset = !!hdev->reset_info.in_reset;
2454 		if (!device_in_reset)
2455 			hdev->reset_info.in_reset = 1;
2456 		spin_unlock(&hdev->reset_info.lock);
2457 
2458 		if (ktime_compare(ktime_get(), timeout) > 0) {
2459 			dev_crit(hdev->dev,
2460 				"%s Failed to remove device because reset function did not finish\n",
2461 				dev_name(&(hdev)->pdev->dev));
2462 			return;
2463 		}
2464 	}
2465 
2466 	cancel_delayed_work_sync(&hdev->device_release_watchdog_work.reset_work);
2467 
2468 	/* Disable PCI access from device F/W so it won't send us additional
2469 	 * interrupts. We disable MSI/MSI-X at the halt_engines function and we
2470 	 * can't have the F/W sending us interrupts after that. We need to
2471 	 * disable the access here because if the device is marked disable, the
2472 	 * message won't be send. Also, in case of heartbeat, the device CPU is
2473 	 * marked as disable so this message won't be sent
2474 	 */
2475 	hl_fw_send_pci_access_msg(hdev,	CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
2476 
2477 	/* Mark device as disabled */
2478 	hdev->disabled = true;
2479 
2480 	take_release_locks(hdev);
2481 
2482 	hdev->reset_info.hard_reset_pending = true;
2483 
2484 	hl_hwmon_fini(hdev);
2485 
2486 	cleanup_resources(hdev, true, false, false);
2487 
2488 	/* Kill processes here after CS rollback. This is because the process
2489 	 * can't really exit until all its CSs are done, which is what we
2490 	 * do in cs rollback
2491 	 */
2492 	dev_info(hdev->dev,
2493 		"Waiting for all processes to exit (timeout of %u seconds)",
2494 		HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI);
2495 
2496 	hdev->process_kill_trial_cnt = 0;
2497 	rc = device_kill_open_processes(hdev, HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI, false);
2498 	if (rc) {
2499 		dev_crit(hdev->dev, "Failed to kill all open processes (%d)\n", rc);
2500 		device_disable_open_processes(hdev, false);
2501 	}
2502 
2503 	hdev->process_kill_trial_cnt = 0;
2504 	rc = device_kill_open_processes(hdev, 0, true);
2505 	if (rc) {
2506 		dev_crit(hdev->dev, "Failed to kill all control device open processes (%d)\n", rc);
2507 		device_disable_open_processes(hdev, true);
2508 	}
2509 
2510 	hl_cb_pool_fini(hdev);
2511 
2512 	/* Reset the H/W. It will be in idle state after this returns */
2513 	rc = hdev->asic_funcs->hw_fini(hdev, true, false);
2514 	if (rc)
2515 		dev_err(hdev->dev, "hw_fini failed in device fini while removing device %d\n", rc);
2516 
2517 	hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
2518 
2519 	/* Release kernel context */
2520 	if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
2521 		dev_err(hdev->dev, "kernel ctx is still alive\n");
2522 
2523 	hl_dec_fini(hdev);
2524 
2525 	hl_vm_fini(hdev);
2526 
2527 	hl_mmu_fini(hdev);
2528 
2529 	vfree(hdev->captured_err_info.page_fault_info.user_mappings);
2530 
2531 	hl_eq_fini(hdev, &hdev->event_queue);
2532 
2533 	kfree(hdev->shadow_cs_queue);
2534 
2535 	for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
2536 		hl_cq_fini(hdev, &hdev->completion_queue[i]);
2537 	kfree(hdev->completion_queue);
2538 
2539 	user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count +
2540 					hdev->asic_prop.user_interrupt_count;
2541 
2542 	if (user_interrupt_cnt) {
2543 		if (hdev->asic_prop.first_available_cq[0] != USHRT_MAX) {
2544 			for (i = 0 ; i < user_interrupt_cnt ; i++)
2545 				vfree(hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool);
2546 		}
2547 
2548 		kfree(hdev->user_interrupt);
2549 	}
2550 
2551 	vfree(hdev->common_user_cq_interrupt.ts_free_jobs_data.free_nodes_pool);
2552 
2553 	hl_hw_queues_destroy(hdev);
2554 
2555 	/* Call ASIC S/W finalize function */
2556 	hdev->asic_funcs->sw_fini(hdev);
2557 
2558 	device_early_fini(hdev);
2559 
2560 	/* Hide devices and sysfs/debugfs files from user */
2561 	cdev_sysfs_debugfs_remove(hdev);
2562 	drm_dev_unregister(&hdev->drm);
2563 
2564 	hl_debugfs_device_fini(hdev);
2565 
2566 	pr_info("removed device successfully\n");
2567 }
2568 
2569 /*
2570  * MMIO register access helper functions.
2571  */
2572 
2573 /*
2574  * hl_rreg - Read an MMIO register
2575  *
2576  * @hdev: pointer to habanalabs device structure
2577  * @reg: MMIO register offset (in bytes)
2578  *
2579  * Returns the value of the MMIO register we are asked to read
2580  *
2581  */
2582 inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
2583 {
2584 	u32 val = readl(hdev->rmmio + reg);
2585 
2586 	if (unlikely(trace_habanalabs_rreg32_enabled()))
2587 		trace_habanalabs_rreg32(hdev->dev, reg, val);
2588 
2589 	return val;
2590 }
2591 
2592 /*
2593  * hl_wreg - Write to an MMIO register
2594  *
2595  * @hdev: pointer to habanalabs device structure
2596  * @reg: MMIO register offset (in bytes)
2597  * @val: 32-bit value
2598  *
2599  * Writes the 32-bit value into the MMIO register
2600  *
2601  */
2602 inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)
2603 {
2604 	if (unlikely(trace_habanalabs_wreg32_enabled()))
2605 		trace_habanalabs_wreg32(hdev->dev, reg, val);
2606 
2607 	writel(val, hdev->rmmio + reg);
2608 }
2609 
2610 void hl_capture_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,
2611 			u8 flags)
2612 {
2613 	struct razwi_info *razwi_info = &hdev->captured_err_info.razwi_info;
2614 
2615 	if (num_of_engines > HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR) {
2616 		dev_err(hdev->dev,
2617 				"Number of possible razwi initiators (%u) exceeded limit (%u)\n",
2618 				num_of_engines, HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR);
2619 		return;
2620 	}
2621 
2622 	/* In case it's the first razwi since the device was opened, capture its parameters */
2623 	if (atomic_cmpxchg(&hdev->captured_err_info.razwi_info.razwi_detected, 0, 1))
2624 		return;
2625 
2626 	razwi_info->razwi.timestamp = ktime_to_ns(ktime_get());
2627 	razwi_info->razwi.addr = addr;
2628 	razwi_info->razwi.num_of_possible_engines = num_of_engines;
2629 	memcpy(&razwi_info->razwi.engine_id[0], &engine_id[0],
2630 			num_of_engines * sizeof(u16));
2631 	razwi_info->razwi.flags = flags;
2632 
2633 	razwi_info->razwi_info_available = true;
2634 }
2635 
2636 void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,
2637 			u8 flags, u64 *event_mask)
2638 {
2639 	hl_capture_razwi(hdev, addr, engine_id, num_of_engines, flags);
2640 
2641 	if (event_mask)
2642 		*event_mask |= HL_NOTIFIER_EVENT_RAZWI;
2643 }
2644 
2645 static void hl_capture_user_mappings(struct hl_device *hdev, bool is_pmmu)
2646 {
2647 	struct page_fault_info *pgf_info = &hdev->captured_err_info.page_fault_info;
2648 	struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
2649 	struct hl_vm_hash_node *hnode;
2650 	struct hl_userptr *userptr;
2651 	enum vm_type *vm_type;
2652 	struct hl_ctx *ctx;
2653 	u32 map_idx = 0;
2654 	int i;
2655 
2656 	/* Reset previous session count*/
2657 	pgf_info->num_of_user_mappings = 0;
2658 
2659 	ctx = hl_get_compute_ctx(hdev);
2660 	if (!ctx) {
2661 		dev_err(hdev->dev, "Can't get user context for user mappings\n");
2662 		return;
2663 	}
2664 
2665 	mutex_lock(&ctx->mem_hash_lock);
2666 	hash_for_each(ctx->mem_hash, i, hnode, node) {
2667 		vm_type = hnode->ptr;
2668 		if (((*vm_type == VM_TYPE_USERPTR) && is_pmmu) ||
2669 				((*vm_type == VM_TYPE_PHYS_PACK) && !is_pmmu))
2670 			pgf_info->num_of_user_mappings++;
2671 
2672 	}
2673 
2674 	if (!pgf_info->num_of_user_mappings)
2675 		goto finish;
2676 
2677 	/* In case we already allocated in previous session, need to release it before
2678 	 * allocating new buffer.
2679 	 */
2680 	vfree(pgf_info->user_mappings);
2681 	pgf_info->user_mappings =
2682 			vzalloc(pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping));
2683 	if (!pgf_info->user_mappings) {
2684 		pgf_info->num_of_user_mappings = 0;
2685 		goto finish;
2686 	}
2687 
2688 	hash_for_each(ctx->mem_hash, i, hnode, node) {
2689 		vm_type = hnode->ptr;
2690 		if ((*vm_type == VM_TYPE_USERPTR) && (is_pmmu)) {
2691 			userptr = hnode->ptr;
2692 			pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr;
2693 			pgf_info->user_mappings[map_idx].size = userptr->size;
2694 			map_idx++;
2695 		} else if ((*vm_type == VM_TYPE_PHYS_PACK) && (!is_pmmu)) {
2696 			phys_pg_pack = hnode->ptr;
2697 			pgf_info->user_mappings[map_idx].dev_va = hnode->vaddr;
2698 			pgf_info->user_mappings[map_idx].size = phys_pg_pack->total_size;
2699 			map_idx++;
2700 		}
2701 	}
2702 finish:
2703 	mutex_unlock(&ctx->mem_hash_lock);
2704 	hl_ctx_put(ctx);
2705 }
2706 
2707 void hl_capture_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu)
2708 {
2709 	struct page_fault_info *pgf_info = &hdev->captured_err_info.page_fault_info;
2710 
2711 	/* Capture only the first page fault */
2712 	if (atomic_cmpxchg(&pgf_info->page_fault_detected, 0, 1))
2713 		return;
2714 
2715 	pgf_info->page_fault.timestamp = ktime_to_ns(ktime_get());
2716 	pgf_info->page_fault.addr = addr;
2717 	pgf_info->page_fault.engine_id = eng_id;
2718 	hl_capture_user_mappings(hdev, is_pmmu);
2719 
2720 	pgf_info->page_fault_info_available = true;
2721 }
2722 
2723 void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu,
2724 				u64 *event_mask)
2725 {
2726 	hl_capture_page_fault(hdev, addr, eng_id, is_pmmu);
2727 
2728 	if (event_mask)
2729 		*event_mask |=  HL_NOTIFIER_EVENT_PAGE_FAULT;
2730 }
2731 
2732 static void hl_capture_hw_err(struct hl_device *hdev, u16 event_id)
2733 {
2734 	struct hw_err_info *info = &hdev->captured_err_info.hw_err;
2735 
2736 	/* Capture only the first HW err */
2737 	if (atomic_cmpxchg(&info->event_detected, 0, 1))
2738 		return;
2739 
2740 	info->event.timestamp = ktime_to_ns(ktime_get());
2741 	info->event.event_id = event_id;
2742 
2743 	info->event_info_available = true;
2744 }
2745 
2746 void hl_handle_critical_hw_err(struct hl_device *hdev, u16 event_id, u64 *event_mask)
2747 {
2748 	hl_capture_hw_err(hdev, event_id);
2749 
2750 	if (event_mask)
2751 		*event_mask |= HL_NOTIFIER_EVENT_CRITICL_HW_ERR;
2752 }
2753 
2754 static void hl_capture_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *fw_info)
2755 {
2756 	struct fw_err_info *info = &hdev->captured_err_info.fw_err;
2757 
2758 	/* Capture only the first FW error */
2759 	if (atomic_cmpxchg(&info->event_detected, 0, 1))
2760 		return;
2761 
2762 	info->event.timestamp = ktime_to_ns(ktime_get());
2763 	info->event.err_type = fw_info->err_type;
2764 	if (fw_info->err_type == HL_INFO_FW_REPORTED_ERR)
2765 		info->event.event_id = fw_info->event_id;
2766 
2767 	info->event_info_available = true;
2768 }
2769 
2770 void hl_handle_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *info)
2771 {
2772 	hl_capture_fw_err(hdev, info);
2773 
2774 	if (info->event_mask)
2775 		*info->event_mask |= HL_NOTIFIER_EVENT_CRITICL_FW_ERR;
2776 }
2777 
2778 void hl_capture_engine_err(struct hl_device *hdev, u16 engine_id, u16 error_count)
2779 {
2780 	struct engine_err_info *info = &hdev->captured_err_info.engine_err;
2781 
2782 	/* Capture only the first engine error */
2783 	if (atomic_cmpxchg(&info->event_detected, 0, 1))
2784 		return;
2785 
2786 	info->event.timestamp = ktime_to_ns(ktime_get());
2787 	info->event.engine_id = engine_id;
2788 	info->event.error_count = error_count;
2789 	info->event_info_available = true;
2790 }
2791 
2792 void hl_enable_err_info_capture(struct hl_error_info *captured_err_info)
2793 {
2794 	vfree(captured_err_info->page_fault_info.user_mappings);
2795 	memset(captured_err_info, 0, sizeof(struct hl_error_info));
2796 	atomic_set(&captured_err_info->cs_timeout.write_enable, 1);
2797 	captured_err_info->undef_opcode.write_enable = true;
2798 }
2799