xref: /linux/drivers/accel/habanalabs/common/firmware_if.c (revision 5027ec19f1049a07df5b0a37b1f462514cf2724b)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2016-2022 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #include "habanalabs.h"
9 #include <linux/habanalabs/hl_boot_if.h>
10 
11 #include <linux/firmware.h>
12 #include <linux/crc32.h>
13 #include <linux/slab.h>
14 #include <linux/ctype.h>
15 #include <linux/vmalloc.h>
16 
17 #include <trace/events/habanalabs.h>
18 
19 #define FW_FILE_MAX_SIZE		0x1400000 /* maximum size of 20MB */
20 
21 static char *comms_cmd_str_arr[COMMS_INVLD_LAST] = {
22 	[COMMS_NOOP] = __stringify(COMMS_NOOP),
23 	[COMMS_CLR_STS] = __stringify(COMMS_CLR_STS),
24 	[COMMS_RST_STATE] = __stringify(COMMS_RST_STATE),
25 	[COMMS_PREP_DESC] = __stringify(COMMS_PREP_DESC),
26 	[COMMS_DATA_RDY] = __stringify(COMMS_DATA_RDY),
27 	[COMMS_EXEC] = __stringify(COMMS_EXEC),
28 	[COMMS_RST_DEV] = __stringify(COMMS_RST_DEV),
29 	[COMMS_GOTO_WFE] = __stringify(COMMS_GOTO_WFE),
30 	[COMMS_SKIP_BMC] = __stringify(COMMS_SKIP_BMC),
31 	[COMMS_PREP_DESC_ELBI] = __stringify(COMMS_PREP_DESC_ELBI),
32 };
33 
34 static char *comms_sts_str_arr[COMMS_STS_INVLD_LAST] = {
35 	[COMMS_STS_NOOP] = __stringify(COMMS_STS_NOOP),
36 	[COMMS_STS_ACK] = __stringify(COMMS_STS_ACK),
37 	[COMMS_STS_OK] = __stringify(COMMS_STS_OK),
38 	[COMMS_STS_ERR] = __stringify(COMMS_STS_ERR),
39 	[COMMS_STS_VALID_ERR] = __stringify(COMMS_STS_VALID_ERR),
40 	[COMMS_STS_TIMEOUT_ERR] = __stringify(COMMS_STS_TIMEOUT_ERR),
41 };
42 
43 static char *extract_fw_ver_from_str(const char *fw_str)
44 {
45 	char *str, *fw_ver, *whitespace;
46 	u32 ver_offset;
47 
48 	fw_ver = kmalloc(VERSION_MAX_LEN, GFP_KERNEL);
49 	if (!fw_ver)
50 		return NULL;
51 
52 	str = strnstr(fw_str, "fw-", VERSION_MAX_LEN);
53 	if (!str)
54 		goto free_fw_ver;
55 
56 	/* Skip the fw- part */
57 	str += 3;
58 	ver_offset = str - fw_str;
59 
60 	/* Copy until the next whitespace */
61 	whitespace = strnstr(str, " ", VERSION_MAX_LEN - ver_offset);
62 	if (!whitespace)
63 		goto free_fw_ver;
64 
65 	strscpy(fw_ver, str, whitespace - str + 1);
66 
67 	return fw_ver;
68 
69 free_fw_ver:
70 	kfree(fw_ver);
71 	return NULL;
72 }
73 
74 /**
75  * extract_u32_until_given_char() - given a string of the format "<u32><char>*", extract the u32.
76  * @str: the given string
77  * @ver_num: the pointer to the extracted u32 to be returned to the caller.
78  * @given_char: the given char at the end of the u32 in the string
79  *
80  * Return: Upon success, return a pointer to the given_char in the string. Upon failure, return NULL
81  */
82 static char *extract_u32_until_given_char(char *str, u32 *ver_num, char given_char)
83 {
84 	char num_str[8] = {}, *ch;
85 
86 	ch = strchrnul(str, given_char);
87 	if (*ch == '\0' || ch == str || ch - str >= sizeof(num_str))
88 		return NULL;
89 
90 	memcpy(num_str, str, ch - str);
91 	if (kstrtou32(num_str, 10, ver_num))
92 		return NULL;
93 	return ch;
94 }
95 
96 /**
97  * hl_get_sw_major_minor_subminor() - extract the FW's SW version major, minor, sub-minor
98  *				      from the version string
99  * @hdev: pointer to the hl_device
100  * @fw_str: the FW's version string
101  *
102  * The extracted version is set in the hdev fields: fw_sw_{major/minor/sub_minor}_ver.
103  *
104  * fw_str is expected to have one of two possible formats, examples:
105  * 1) 'Preboot version hl-gaudi2-1.9.0-fw-42.0.1-sec-3'
106  * 2) 'Preboot version hl-gaudi2-1.9.0-rc-fw-42.0.1-sec-3'
107  * In those examples, the SW major,minor,subminor are correspondingly: 1,9,0.
108  *
109  * Return: 0 for success or a negative error code for failure.
110  */
111 static int hl_get_sw_major_minor_subminor(struct hl_device *hdev, const char *fw_str)
112 {
113 	char *end, *start;
114 
115 	end = strnstr(fw_str, "-rc-", VERSION_MAX_LEN);
116 	if (end == fw_str)
117 		return -EINVAL;
118 
119 	if (!end)
120 		end = strnstr(fw_str, "-fw-", VERSION_MAX_LEN);
121 
122 	if (end == fw_str)
123 		return -EINVAL;
124 
125 	if (!end)
126 		return -EINVAL;
127 
128 	for (start = end - 1; start != fw_str; start--) {
129 		if (*start == '-')
130 			break;
131 	}
132 
133 	if (start == fw_str)
134 		return -EINVAL;
135 
136 	/* start/end point each to the starting and ending hyphen of the sw version e.g. -1.9.0- */
137 	start++;
138 	start = extract_u32_until_given_char(start, &hdev->fw_sw_major_ver, '.');
139 	if (!start)
140 		goto err_zero_ver;
141 
142 	start++;
143 	start = extract_u32_until_given_char(start, &hdev->fw_sw_minor_ver, '.');
144 	if (!start)
145 		goto err_zero_ver;
146 
147 	start++;
148 	start = extract_u32_until_given_char(start, &hdev->fw_sw_sub_minor_ver, '-');
149 	if (!start)
150 		goto err_zero_ver;
151 
152 	return 0;
153 
154 err_zero_ver:
155 	hdev->fw_sw_major_ver = 0;
156 	hdev->fw_sw_minor_ver = 0;
157 	hdev->fw_sw_sub_minor_ver = 0;
158 	return -EINVAL;
159 }
160 
161 /**
162  * hl_get_preboot_major_minor() - extract the FW's version major, minor from the version string.
163  * @hdev: pointer to the hl_device
164  * @preboot_ver: the FW's version string
165  *
166  * preboot_ver is expected to be the format of <major>.<minor>.<sub minor>*, e.g: 42.0.1-sec-3
167  * The extracted version is set in the hdev fields: fw_inner_{major/minor}_ver.
168  *
169  * Return: 0 on success, negative error code for failure.
170  */
171 static int hl_get_preboot_major_minor(struct hl_device *hdev, char *preboot_ver)
172 {
173 	preboot_ver = extract_u32_until_given_char(preboot_ver, &hdev->fw_inner_major_ver, '.');
174 	if (!preboot_ver) {
175 		dev_err(hdev->dev, "Error parsing preboot major version\n");
176 		goto err_zero_ver;
177 	}
178 
179 	preboot_ver++;
180 
181 	preboot_ver = extract_u32_until_given_char(preboot_ver, &hdev->fw_inner_minor_ver, '.');
182 	if (!preboot_ver) {
183 		dev_err(hdev->dev, "Error parsing preboot minor version\n");
184 		goto err_zero_ver;
185 	}
186 	return 0;
187 
188 err_zero_ver:
189 	hdev->fw_inner_major_ver = 0;
190 	hdev->fw_inner_minor_ver = 0;
191 	return -EINVAL;
192 }
193 
194 static int hl_request_fw(struct hl_device *hdev,
195 				const struct firmware **firmware_p,
196 				const char *fw_name)
197 {
198 	size_t fw_size;
199 	int rc;
200 
201 	rc = request_firmware(firmware_p, fw_name, hdev->dev);
202 	if (rc) {
203 		dev_err(hdev->dev, "Firmware file %s is not found! (error %d)\n",
204 				fw_name, rc);
205 		goto out;
206 	}
207 
208 	fw_size = (*firmware_p)->size;
209 	if ((fw_size % 4) != 0) {
210 		dev_err(hdev->dev, "Illegal %s firmware size %zu\n",
211 				fw_name, fw_size);
212 		rc = -EINVAL;
213 		goto release_fw;
214 	}
215 
216 	dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
217 
218 	if (fw_size > FW_FILE_MAX_SIZE) {
219 		dev_err(hdev->dev,
220 			"FW file size %zu exceeds maximum of %u bytes\n",
221 			fw_size, FW_FILE_MAX_SIZE);
222 		rc = -EINVAL;
223 		goto release_fw;
224 	}
225 
226 	return 0;
227 
228 release_fw:
229 	release_firmware(*firmware_p);
230 out:
231 	return rc;
232 }
233 
234 /**
235  * hl_release_firmware() - release FW
236  *
237  * @fw: fw descriptor
238  *
239  * note: this inline function added to serve as a comprehensive mirror for the
240  *       hl_request_fw function.
241  */
242 static inline void hl_release_firmware(const struct firmware *fw)
243 {
244 	release_firmware(fw);
245 }
246 
247 /**
248  * hl_fw_copy_fw_to_device() - copy FW to device
249  *
250  * @hdev: pointer to hl_device structure.
251  * @fw: fw descriptor
252  * @dst: IO memory mapped address space to copy firmware to
253  * @src_offset: offset in src FW to copy from
254  * @size: amount of bytes to copy (0 to copy the whole binary)
255  *
256  * actual copy of FW binary data to device, shared by static and dynamic loaders
257  */
258 static int hl_fw_copy_fw_to_device(struct hl_device *hdev,
259 				const struct firmware *fw, void __iomem *dst,
260 				u32 src_offset, u32 size)
261 {
262 	const void *fw_data;
263 
264 	/* size 0 indicates to copy the whole file */
265 	if (!size)
266 		size = fw->size;
267 
268 	if (src_offset + size > fw->size) {
269 		dev_err(hdev->dev,
270 			"size to copy(%u) and offset(%u) are invalid\n",
271 			size, src_offset);
272 		return -EINVAL;
273 	}
274 
275 	fw_data = (const void *) fw->data;
276 
277 	memcpy_toio(dst, fw_data + src_offset, size);
278 	return 0;
279 }
280 
281 /**
282  * hl_fw_copy_msg_to_device() - copy message to device
283  *
284  * @hdev: pointer to hl_device structure.
285  * @msg: message
286  * @dst: IO memory mapped address space to copy firmware to
287  * @src_offset: offset in src message to copy from
288  * @size: amount of bytes to copy (0 to copy the whole binary)
289  *
290  * actual copy of message data to device.
291  */
292 static int hl_fw_copy_msg_to_device(struct hl_device *hdev,
293 		struct lkd_msg_comms *msg, void __iomem *dst,
294 		u32 src_offset, u32 size)
295 {
296 	void *msg_data;
297 
298 	/* size 0 indicates to copy the whole file */
299 	if (!size)
300 		size = sizeof(struct lkd_msg_comms);
301 
302 	if (src_offset + size > sizeof(struct lkd_msg_comms)) {
303 		dev_err(hdev->dev,
304 			"size to copy(%u) and offset(%u) are invalid\n",
305 			size, src_offset);
306 		return -EINVAL;
307 	}
308 
309 	msg_data = (void *) msg;
310 
311 	memcpy_toio(dst, msg_data + src_offset, size);
312 
313 	return 0;
314 }
315 
316 /**
317  * hl_fw_load_fw_to_device() - Load F/W code to device's memory.
318  *
319  * @hdev: pointer to hl_device structure.
320  * @fw_name: the firmware image name
321  * @dst: IO memory mapped address space to copy firmware to
322  * @src_offset: offset in src FW to copy from
323  * @size: amount of bytes to copy (0 to copy the whole binary)
324  *
325  * Copy fw code from firmware file to device memory.
326  *
327  * Return: 0 on success, non-zero for failure.
328  */
329 int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
330 				void __iomem *dst, u32 src_offset, u32 size)
331 {
332 	const struct firmware *fw;
333 	int rc;
334 
335 	rc = hl_request_fw(hdev, &fw, fw_name);
336 	if (rc)
337 		return rc;
338 
339 	rc = hl_fw_copy_fw_to_device(hdev, fw, dst, src_offset, size);
340 
341 	hl_release_firmware(fw);
342 	return rc;
343 }
344 
345 int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode, u64 value)
346 {
347 	struct cpucp_packet pkt = {};
348 
349 	pkt.ctl = cpu_to_le32(opcode << CPUCP_PKT_CTL_OPCODE_SHIFT);
350 	pkt.value = cpu_to_le64(value);
351 
352 	return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
353 }
354 
355 int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
356 				u16 len, u32 timeout, u64 *result)
357 {
358 	struct hl_hw_queue *queue = &hdev->kernel_queues[hw_queue_id];
359 	struct asic_fixed_properties *prop = &hdev->asic_prop;
360 	struct cpucp_packet *pkt;
361 	dma_addr_t pkt_dma_addr;
362 	struct hl_bd *sent_bd;
363 	u32 tmp, expected_ack_val, pi, opcode;
364 	int rc;
365 
366 	pkt = hl_cpu_accessible_dma_pool_alloc(hdev, len, &pkt_dma_addr);
367 	if (!pkt) {
368 		dev_err(hdev->dev,
369 			"Failed to allocate DMA memory for packet to CPU\n");
370 		return -ENOMEM;
371 	}
372 
373 	memcpy(pkt, msg, len);
374 
375 	mutex_lock(&hdev->send_cpu_message_lock);
376 
377 	/* CPU-CP messages can be sent during soft-reset */
378 	if (hdev->disabled && !hdev->reset_info.in_compute_reset) {
379 		rc = 0;
380 		goto out;
381 	}
382 
383 	if (hdev->device_cpu_disabled) {
384 		rc = -EIO;
385 		goto out;
386 	}
387 
388 	/* set fence to a non valid value */
389 	pkt->fence = cpu_to_le32(UINT_MAX);
390 	pi = queue->pi;
391 
392 	/*
393 	 * The CPU queue is a synchronous queue with an effective depth of
394 	 * a single entry (although it is allocated with room for multiple
395 	 * entries). We lock on it using 'send_cpu_message_lock' which
396 	 * serializes accesses to the CPU queue.
397 	 * Which means that we don't need to lock the access to the entire H/W
398 	 * queues module when submitting a JOB to the CPU queue.
399 	 */
400 	hl_hw_queue_submit_bd(hdev, queue, hl_queue_inc_ptr(queue->pi), len, pkt_dma_addr);
401 
402 	if (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN)
403 		expected_ack_val = queue->pi;
404 	else
405 		expected_ack_val = CPUCP_PACKET_FENCE_VAL;
406 
407 	rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp,
408 				(tmp == expected_ack_val), 1000,
409 				timeout, true);
410 
411 	hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
412 
413 	if (rc == -ETIMEDOUT) {
414 		/* If FW performed reset just before sending it a packet, we will get a timeout.
415 		 * This is expected behavior, hence no need for error message.
416 		 */
417 		if (!hl_device_operational(hdev, NULL) && !hdev->reset_info.in_compute_reset)
418 			dev_dbg(hdev->dev, "Device CPU packet timeout (0x%x) due to FW reset\n",
419 					tmp);
420 		else
421 			dev_err(hdev->dev, "Device CPU packet timeout (status = 0x%x)\n", tmp);
422 		hdev->device_cpu_disabled = true;
423 		goto out;
424 	}
425 
426 	tmp = le32_to_cpu(pkt->ctl);
427 
428 	rc = (tmp & CPUCP_PKT_CTL_RC_MASK) >> CPUCP_PKT_CTL_RC_SHIFT;
429 	if (rc) {
430 		opcode = (tmp & CPUCP_PKT_CTL_OPCODE_MASK) >> CPUCP_PKT_CTL_OPCODE_SHIFT;
431 
432 		if (!prop->supports_advanced_cpucp_rc) {
433 			dev_dbg(hdev->dev, "F/W ERROR %d for CPU packet %d\n", rc, opcode);
434 			rc = -EIO;
435 			goto scrub_descriptor;
436 		}
437 
438 		switch (rc) {
439 		case cpucp_packet_invalid:
440 			dev_err(hdev->dev,
441 				"CPU packet %d is not supported by F/W\n", opcode);
442 			break;
443 		case cpucp_packet_fault:
444 			dev_err(hdev->dev,
445 				"F/W failed processing CPU packet %d\n", opcode);
446 			break;
447 		case cpucp_packet_invalid_pkt:
448 			dev_dbg(hdev->dev,
449 				"CPU packet %d is not supported by F/W\n", opcode);
450 			break;
451 		case cpucp_packet_invalid_params:
452 			dev_err(hdev->dev,
453 				"F/W reports invalid parameters for CPU packet %d\n", opcode);
454 			break;
455 
456 		default:
457 			dev_err(hdev->dev,
458 				"Unknown F/W ERROR %d for CPU packet %d\n", rc, opcode);
459 		}
460 
461 		/* propagate the return code from the f/w to the callers who want to check it */
462 		if (result)
463 			*result = rc;
464 
465 		rc = -EIO;
466 
467 	} else if (result) {
468 		*result = le64_to_cpu(pkt->result);
469 	}
470 
471 scrub_descriptor:
472 	/* Scrub previous buffer descriptor 'ctl' field which contains the
473 	 * previous PI value written during packet submission.
474 	 * We must do this or else F/W can read an old value upon queue wraparound.
475 	 */
476 	sent_bd = queue->kernel_address;
477 	sent_bd += hl_pi_2_offset(pi);
478 	sent_bd->ctl = cpu_to_le32(UINT_MAX);
479 
480 out:
481 	mutex_unlock(&hdev->send_cpu_message_lock);
482 
483 	hl_cpu_accessible_dma_pool_free(hdev, len, pkt);
484 
485 	return rc;
486 }
487 
488 int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type)
489 {
490 	struct cpucp_packet pkt;
491 	u64 result;
492 	int rc;
493 
494 	memset(&pkt, 0, sizeof(pkt));
495 
496 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ <<
497 				CPUCP_PKT_CTL_OPCODE_SHIFT);
498 	pkt.value = cpu_to_le64(event_type);
499 
500 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
501 						0, &result);
502 
503 	if (rc)
504 		dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
505 
506 	return rc;
507 }
508 
509 int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
510 		size_t irq_arr_size)
511 {
512 	struct cpucp_unmask_irq_arr_packet *pkt;
513 	size_t total_pkt_size;
514 	u64 result;
515 	int rc;
516 
517 	total_pkt_size = sizeof(struct cpucp_unmask_irq_arr_packet) +
518 			irq_arr_size;
519 
520 	/* data should be aligned to 8 bytes in order to CPU-CP to copy it */
521 	total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
522 
523 	/* total_pkt_size is casted to u16 later on */
524 	if (total_pkt_size > USHRT_MAX) {
525 		dev_err(hdev->dev, "too many elements in IRQ array\n");
526 		return -EINVAL;
527 	}
528 
529 	pkt = kzalloc(total_pkt_size, GFP_KERNEL);
530 	if (!pkt)
531 		return -ENOMEM;
532 
533 	pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
534 	memcpy(&pkt->irqs, irq_arr, irq_arr_size);
535 
536 	pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
537 						CPUCP_PKT_CTL_OPCODE_SHIFT);
538 
539 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
540 						total_pkt_size, 0, &result);
541 
542 	if (rc)
543 		dev_err(hdev->dev, "failed to unmask IRQ array\n");
544 
545 	kfree(pkt);
546 
547 	return rc;
548 }
549 
550 int hl_fw_test_cpu_queue(struct hl_device *hdev)
551 {
552 	struct cpucp_packet test_pkt = {};
553 	u64 result;
554 	int rc;
555 
556 	test_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
557 					CPUCP_PKT_CTL_OPCODE_SHIFT);
558 	test_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
559 
560 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
561 						sizeof(test_pkt), 0, &result);
562 
563 	if (!rc) {
564 		if (result != CPUCP_PACKET_FENCE_VAL)
565 			dev_err(hdev->dev,
566 				"CPU queue test failed (%#08llx)\n", result);
567 	} else {
568 		dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
569 	}
570 
571 	return rc;
572 }
573 
574 void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
575 						dma_addr_t *dma_handle)
576 {
577 	u64 kernel_addr;
578 
579 	kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
580 
581 	*dma_handle = hdev->cpu_accessible_dma_address +
582 		(kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem);
583 
584 	return (void *) (uintptr_t) kernel_addr;
585 }
586 
587 void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
588 					void *vaddr)
589 {
590 	gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
591 			size);
592 }
593 
594 int hl_fw_send_soft_reset(struct hl_device *hdev)
595 {
596 	struct cpucp_packet pkt;
597 	int rc;
598 
599 	memset(&pkt, 0, sizeof(pkt));
600 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_SOFT_RESET << CPUCP_PKT_CTL_OPCODE_SHIFT);
601 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
602 	if (rc)
603 		dev_err(hdev->dev, "failed to send soft-reset msg (err = %d)\n", rc);
604 
605 	return rc;
606 }
607 
608 int hl_fw_send_device_activity(struct hl_device *hdev, bool open)
609 {
610 	struct cpucp_packet pkt;
611 	int rc;
612 
613 	memset(&pkt, 0, sizeof(pkt));
614 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_ACTIVE_STATUS_SET <<	CPUCP_PKT_CTL_OPCODE_SHIFT);
615 	pkt.value = cpu_to_le64(open);
616 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
617 	if (rc)
618 		dev_err(hdev->dev, "failed to send device activity msg(%u)\n", open);
619 
620 	return rc;
621 }
622 
623 int hl_fw_send_heartbeat(struct hl_device *hdev)
624 {
625 	struct cpucp_packet hb_pkt;
626 	u64 result;
627 	int rc;
628 
629 	memset(&hb_pkt, 0, sizeof(hb_pkt));
630 	hb_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
631 					CPUCP_PKT_CTL_OPCODE_SHIFT);
632 	hb_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
633 
634 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
635 						sizeof(hb_pkt), 0, &result);
636 
637 	if ((rc) || (result != CPUCP_PACKET_FENCE_VAL))
638 		return -EIO;
639 
640 	if (le32_to_cpu(hb_pkt.status_mask) &
641 					CPUCP_PKT_HB_STATUS_EQ_FAULT_MASK) {
642 		dev_warn(hdev->dev, "FW reported EQ fault during heartbeat\n");
643 		rc = -EIO;
644 	}
645 
646 	return rc;
647 }
648 
649 static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
650 								u32 sts_val)
651 {
652 	bool err_exists = false;
653 
654 	if (!(err_val & CPU_BOOT_ERR0_ENABLED))
655 		return false;
656 
657 	if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL) {
658 		dev_err(hdev->dev,
659 			"Device boot error - DRAM initialization failed\n");
660 		err_exists = true;
661 	}
662 
663 	if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED) {
664 		dev_err(hdev->dev, "Device boot error - FIT image corrupted\n");
665 		err_exists = true;
666 	}
667 
668 	if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL) {
669 		dev_err(hdev->dev,
670 			"Device boot error - Thermal Sensor initialization failed\n");
671 		err_exists = true;
672 	}
673 
674 	if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED) {
675 		if (hdev->bmc_enable) {
676 			dev_err(hdev->dev,
677 				"Device boot error - Skipped waiting for BMC\n");
678 			err_exists = true;
679 		} else {
680 			dev_info(hdev->dev,
681 				"Device boot message - Skipped waiting for BMC\n");
682 			/* This is an info so we don't want it to disable the
683 			 * device
684 			 */
685 			err_val &= ~CPU_BOOT_ERR0_BMC_WAIT_SKIPPED;
686 		}
687 	}
688 
689 	if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY) {
690 		dev_err(hdev->dev,
691 			"Device boot error - Serdes data from BMC not available\n");
692 		err_exists = true;
693 	}
694 
695 	if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL) {
696 		dev_err(hdev->dev,
697 			"Device boot error - NIC F/W initialization failed\n");
698 		err_exists = true;
699 	}
700 
701 	if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY) {
702 		dev_err(hdev->dev,
703 			"Device boot warning - security not ready\n");
704 		err_exists = true;
705 	}
706 
707 	if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL) {
708 		dev_err(hdev->dev, "Device boot error - security failure\n");
709 		err_exists = true;
710 	}
711 
712 	if (err_val & CPU_BOOT_ERR0_EFUSE_FAIL) {
713 		dev_err(hdev->dev, "Device boot error - eFuse failure\n");
714 		err_exists = true;
715 	}
716 
717 	if (err_val & CPU_BOOT_ERR0_SEC_IMG_VER_FAIL) {
718 		dev_err(hdev->dev, "Device boot error - Failed to load preboot secondary image\n");
719 		err_exists = true;
720 	}
721 
722 	if (err_val & CPU_BOOT_ERR0_PLL_FAIL) {
723 		dev_err(hdev->dev, "Device boot error - PLL failure\n");
724 		err_exists = true;
725 	}
726 
727 	if (err_val & CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL) {
728 		dev_err(hdev->dev, "Device boot error - Failed to set threshold for temperature sensor\n");
729 		err_exists = true;
730 	}
731 
732 	if (err_val & CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL) {
733 		/* Ignore this bit, don't prevent driver loading */
734 		dev_dbg(hdev->dev, "device unusable status is set\n");
735 		err_val &= ~CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL;
736 	}
737 
738 	if (err_val & CPU_BOOT_ERR0_BINNING_FAIL) {
739 		dev_err(hdev->dev, "Device boot error - binning failure\n");
740 		err_exists = true;
741 	}
742 
743 	if (sts_val & CPU_BOOT_DEV_STS0_ENABLED)
744 		dev_dbg(hdev->dev, "Device status0 %#x\n", sts_val);
745 
746 	if (err_val & CPU_BOOT_ERR0_EEPROM_FAIL) {
747 		dev_err(hdev->dev, "Device boot error - EEPROM failure detected\n");
748 		err_exists = true;
749 	}
750 
751 	/* All warnings should go here in order not to reach the unknown error validation */
752 	if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) {
753 		dev_warn(hdev->dev,
754 			"Device boot warning - Skipped DRAM initialization\n");
755 		/* This is a warning so we don't want it to disable the
756 		 * device
757 		 */
758 		err_val &= ~CPU_BOOT_ERR0_DRAM_SKIPPED;
759 	}
760 
761 	if (err_val & CPU_BOOT_ERR0_PRI_IMG_VER_FAIL) {
762 		dev_warn(hdev->dev,
763 			"Device boot warning - Failed to load preboot primary image\n");
764 		/* This is a warning so we don't want it to disable the
765 		 * device as we have a secondary preboot image
766 		 */
767 		err_val &= ~CPU_BOOT_ERR0_PRI_IMG_VER_FAIL;
768 	}
769 
770 	if (err_val & CPU_BOOT_ERR0_TPM_FAIL) {
771 		dev_warn(hdev->dev,
772 			"Device boot warning - TPM failure\n");
773 		/* This is a warning so we don't want it to disable the
774 		 * device
775 		 */
776 		err_val &= ~CPU_BOOT_ERR0_TPM_FAIL;
777 	}
778 
779 	if (!err_exists && (err_val & ~CPU_BOOT_ERR0_ENABLED)) {
780 		dev_err(hdev->dev,
781 			"Device boot error - unknown ERR0 error 0x%08x\n", err_val);
782 		err_exists = true;
783 	}
784 
785 	/* return error only if it's in the predefined mask */
786 	if (err_exists && ((err_val & ~CPU_BOOT_ERR0_ENABLED) &
787 				lower_32_bits(hdev->boot_error_status_mask)))
788 		return true;
789 
790 	return false;
791 }
792 
793 /* placeholder for ERR1 as no errors defined there yet */
794 static bool fw_report_boot_dev1(struct hl_device *hdev, u32 err_val,
795 								u32 sts_val)
796 {
797 	/*
798 	 * keep this variable to preserve the logic of the function.
799 	 * this way it would require less modifications when error will be
800 	 * added to DEV_ERR1
801 	 */
802 	bool err_exists = false;
803 
804 	if (!(err_val & CPU_BOOT_ERR1_ENABLED))
805 		return false;
806 
807 	if (sts_val & CPU_BOOT_DEV_STS1_ENABLED)
808 		dev_dbg(hdev->dev, "Device status1 %#x\n", sts_val);
809 
810 	if (!err_exists && (err_val & ~CPU_BOOT_ERR1_ENABLED)) {
811 		dev_err(hdev->dev,
812 			"Device boot error - unknown ERR1 error 0x%08x\n",
813 								err_val);
814 		err_exists = true;
815 	}
816 
817 	/* return error only if it's in the predefined mask */
818 	if (err_exists && ((err_val & ~CPU_BOOT_ERR1_ENABLED) &
819 				upper_32_bits(hdev->boot_error_status_mask)))
820 		return true;
821 
822 	return false;
823 }
824 
825 static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
826 				u32 boot_err1_reg, u32 cpu_boot_dev_status0_reg,
827 				u32 cpu_boot_dev_status1_reg)
828 {
829 	u32 err_val, status_val;
830 	bool err_exists = false;
831 
832 	/* Some of the firmware status codes are deprecated in newer f/w
833 	 * versions. In those versions, the errors are reported
834 	 * in different registers. Therefore, we need to check those
835 	 * registers and print the exact errors. Moreover, there
836 	 * may be multiple errors, so we need to report on each error
837 	 * separately. Some of the error codes might indicate a state
838 	 * that is not an error per-se, but it is an error in production
839 	 * environment
840 	 */
841 	err_val = RREG32(boot_err0_reg);
842 	status_val = RREG32(cpu_boot_dev_status0_reg);
843 	err_exists = fw_report_boot_dev0(hdev, err_val, status_val);
844 
845 	err_val = RREG32(boot_err1_reg);
846 	status_val = RREG32(cpu_boot_dev_status1_reg);
847 	err_exists |= fw_report_boot_dev1(hdev, err_val, status_val);
848 
849 	if (err_exists)
850 		return -EIO;
851 
852 	return 0;
853 }
854 
855 int hl_fw_cpucp_info_get(struct hl_device *hdev,
856 				u32 sts_boot_dev_sts0_reg,
857 				u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
858 				u32 boot_err1_reg)
859 {
860 	struct asic_fixed_properties *prop = &hdev->asic_prop;
861 	struct cpucp_packet pkt = {};
862 	dma_addr_t cpucp_info_dma_addr;
863 	void *cpucp_info_cpu_addr;
864 	char *kernel_ver;
865 	u64 result;
866 	int rc;
867 
868 	cpucp_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, sizeof(struct cpucp_info),
869 								&cpucp_info_dma_addr);
870 	if (!cpucp_info_cpu_addr) {
871 		dev_err(hdev->dev,
872 			"Failed to allocate DMA memory for CPU-CP info packet\n");
873 		return -ENOMEM;
874 	}
875 
876 	memset(cpucp_info_cpu_addr, 0, sizeof(struct cpucp_info));
877 
878 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_INFO_GET <<
879 				CPUCP_PKT_CTL_OPCODE_SHIFT);
880 	pkt.addr = cpu_to_le64(cpucp_info_dma_addr);
881 	pkt.data_max_size = cpu_to_le32(sizeof(struct cpucp_info));
882 
883 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
884 					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
885 	if (rc) {
886 		dev_err(hdev->dev,
887 			"Failed to handle CPU-CP info pkt, error %d\n", rc);
888 		goto out;
889 	}
890 
891 	rc = fw_read_errors(hdev, boot_err0_reg, boot_err1_reg,
892 				sts_boot_dev_sts0_reg, sts_boot_dev_sts1_reg);
893 	if (rc) {
894 		dev_err(hdev->dev, "Errors in device boot\n");
895 		goto out;
896 	}
897 
898 	memcpy(&prop->cpucp_info, cpucp_info_cpu_addr,
899 			sizeof(prop->cpucp_info));
900 
901 	rc = hl_build_hwmon_channel_info(hdev, prop->cpucp_info.sensors);
902 	if (rc) {
903 		dev_err(hdev->dev,
904 			"Failed to build hwmon channel info, error %d\n", rc);
905 		rc = -EFAULT;
906 		goto out;
907 	}
908 
909 	kernel_ver = extract_fw_ver_from_str(prop->cpucp_info.kernel_version);
910 	if (kernel_ver) {
911 		dev_info(hdev->dev, "Linux version %s", kernel_ver);
912 		kfree(kernel_ver);
913 	}
914 
915 	/* assume EQ code doesn't need to check eqe index */
916 	hdev->event_queue.check_eqe_index = false;
917 
918 	/* Read FW application security bits again */
919 	if (prop->fw_cpu_boot_dev_sts0_valid) {
920 		prop->fw_app_cpu_boot_dev_sts0 = RREG32(sts_boot_dev_sts0_reg);
921 		if (prop->fw_app_cpu_boot_dev_sts0 &
922 				CPU_BOOT_DEV_STS0_EQ_INDEX_EN)
923 			hdev->event_queue.check_eqe_index = true;
924 	}
925 
926 	if (prop->fw_cpu_boot_dev_sts1_valid)
927 		prop->fw_app_cpu_boot_dev_sts1 = RREG32(sts_boot_dev_sts1_reg);
928 
929 out:
930 	hl_cpu_accessible_dma_pool_free(hdev, sizeof(struct cpucp_info), cpucp_info_cpu_addr);
931 
932 	return rc;
933 }
934 
935 static int hl_fw_send_msi_info_msg(struct hl_device *hdev)
936 {
937 	struct cpucp_array_data_packet *pkt;
938 	size_t total_pkt_size, data_size;
939 	u64 result;
940 	int rc;
941 
942 	/* skip sending this info for unsupported ASICs */
943 	if (!hdev->asic_funcs->get_msi_info)
944 		return 0;
945 
946 	data_size = CPUCP_NUM_OF_MSI_TYPES * sizeof(u32);
947 	total_pkt_size = sizeof(struct cpucp_array_data_packet) + data_size;
948 
949 	/* data should be aligned to 8 bytes in order to CPU-CP to copy it */
950 	total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
951 
952 	/* total_pkt_size is casted to u16 later on */
953 	if (total_pkt_size > USHRT_MAX) {
954 		dev_err(hdev->dev, "CPUCP array data is too big\n");
955 		return -EINVAL;
956 	}
957 
958 	pkt = kzalloc(total_pkt_size, GFP_KERNEL);
959 	if (!pkt)
960 		return -ENOMEM;
961 
962 	pkt->length = cpu_to_le32(CPUCP_NUM_OF_MSI_TYPES);
963 
964 	memset((void *) &pkt->data, 0xFF, data_size);
965 	hdev->asic_funcs->get_msi_info(pkt->data);
966 
967 	pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_MSI_INFO_SET <<
968 						CPUCP_PKT_CTL_OPCODE_SHIFT);
969 
970 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *)pkt,
971 						total_pkt_size, 0, &result);
972 
973 	/*
974 	 * in case packet result is invalid it means that FW does not support
975 	 * this feature and will use default/hard coded MSI values. no reason
976 	 * to stop the boot
977 	 */
978 	if (rc && result == cpucp_packet_invalid)
979 		rc = 0;
980 
981 	if (rc)
982 		dev_err(hdev->dev, "failed to send CPUCP array data\n");
983 
984 	kfree(pkt);
985 
986 	return rc;
987 }
988 
989 int hl_fw_cpucp_handshake(struct hl_device *hdev,
990 				u32 sts_boot_dev_sts0_reg,
991 				u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
992 				u32 boot_err1_reg)
993 {
994 	int rc;
995 
996 	rc = hl_fw_cpucp_info_get(hdev, sts_boot_dev_sts0_reg,
997 					sts_boot_dev_sts1_reg, boot_err0_reg,
998 					boot_err1_reg);
999 	if (rc)
1000 		return rc;
1001 
1002 	return hl_fw_send_msi_info_msg(hdev);
1003 }
1004 
1005 int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
1006 {
1007 	struct cpucp_packet pkt = {};
1008 	void *eeprom_info_cpu_addr;
1009 	dma_addr_t eeprom_info_dma_addr;
1010 	u64 result;
1011 	int rc;
1012 
1013 	eeprom_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, max_size,
1014 									&eeprom_info_dma_addr);
1015 	if (!eeprom_info_cpu_addr) {
1016 		dev_err(hdev->dev,
1017 			"Failed to allocate DMA memory for CPU-CP EEPROM packet\n");
1018 		return -ENOMEM;
1019 	}
1020 
1021 	memset(eeprom_info_cpu_addr, 0, max_size);
1022 
1023 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_EEPROM_DATA_GET <<
1024 				CPUCP_PKT_CTL_OPCODE_SHIFT);
1025 	pkt.addr = cpu_to_le64(eeprom_info_dma_addr);
1026 	pkt.data_max_size = cpu_to_le32(max_size);
1027 
1028 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1029 			HL_CPUCP_EEPROM_TIMEOUT_USEC, &result);
1030 
1031 	if (rc) {
1032 		dev_err(hdev->dev,
1033 			"Failed to handle CPU-CP EEPROM packet, error %d\n",
1034 			rc);
1035 		goto out;
1036 	}
1037 
1038 	/* result contains the actual size */
1039 	memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
1040 
1041 out:
1042 	hl_cpu_accessible_dma_pool_free(hdev, max_size, eeprom_info_cpu_addr);
1043 
1044 	return rc;
1045 }
1046 
1047 int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data)
1048 {
1049 	struct cpucp_monitor_dump *mon_dump_cpu_addr;
1050 	dma_addr_t mon_dump_dma_addr;
1051 	struct cpucp_packet pkt = {};
1052 	size_t data_size;
1053 	__le32 *src_ptr;
1054 	u32 *dst_ptr;
1055 	u64 result;
1056 	int i, rc;
1057 
1058 	data_size = sizeof(struct cpucp_monitor_dump);
1059 	mon_dump_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, data_size, &mon_dump_dma_addr);
1060 	if (!mon_dump_cpu_addr) {
1061 		dev_err(hdev->dev,
1062 			"Failed to allocate DMA memory for CPU-CP monitor-dump packet\n");
1063 		return -ENOMEM;
1064 	}
1065 
1066 	memset(mon_dump_cpu_addr, 0, data_size);
1067 
1068 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_MONITOR_DUMP_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
1069 	pkt.addr = cpu_to_le64(mon_dump_dma_addr);
1070 	pkt.data_max_size = cpu_to_le32(data_size);
1071 
1072 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1073 							HL_CPUCP_MON_DUMP_TIMEOUT_USEC, &result);
1074 	if (rc) {
1075 		dev_err(hdev->dev, "Failed to handle CPU-CP monitor-dump packet, error %d\n", rc);
1076 		goto out;
1077 	}
1078 
1079 	/* result contains the actual size */
1080 	src_ptr = (__le32 *) mon_dump_cpu_addr;
1081 	dst_ptr = data;
1082 	for (i = 0; i < (data_size / sizeof(u32)); i++) {
1083 		*dst_ptr = le32_to_cpu(*src_ptr);
1084 		src_ptr++;
1085 		dst_ptr++;
1086 	}
1087 
1088 out:
1089 	hl_cpu_accessible_dma_pool_free(hdev, data_size, mon_dump_cpu_addr);
1090 
1091 	return rc;
1092 }
1093 
1094 int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
1095 		struct hl_info_pci_counters *counters)
1096 {
1097 	struct cpucp_packet pkt = {};
1098 	u64 result;
1099 	int rc;
1100 
1101 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
1102 			CPUCP_PKT_CTL_OPCODE_SHIFT);
1103 
1104 	/* Fetch PCI rx counter */
1105 	pkt.index = cpu_to_le32(cpucp_pcie_throughput_rx);
1106 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1107 					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1108 	if (rc) {
1109 		dev_err(hdev->dev,
1110 			"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
1111 		return rc;
1112 	}
1113 	counters->rx_throughput = result;
1114 
1115 	memset(&pkt, 0, sizeof(pkt));
1116 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
1117 			CPUCP_PKT_CTL_OPCODE_SHIFT);
1118 
1119 	/* Fetch PCI tx counter */
1120 	pkt.index = cpu_to_le32(cpucp_pcie_throughput_tx);
1121 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1122 					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1123 	if (rc) {
1124 		dev_err(hdev->dev,
1125 			"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
1126 		return rc;
1127 	}
1128 	counters->tx_throughput = result;
1129 
1130 	/* Fetch PCI replay counter */
1131 	memset(&pkt, 0, sizeof(pkt));
1132 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_REPLAY_CNT_GET <<
1133 			CPUCP_PKT_CTL_OPCODE_SHIFT);
1134 
1135 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1136 			HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1137 	if (rc) {
1138 		dev_err(hdev->dev,
1139 			"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
1140 		return rc;
1141 	}
1142 	counters->replay_cnt = (u32) result;
1143 
1144 	return rc;
1145 }
1146 
1147 int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy)
1148 {
1149 	struct cpucp_packet pkt = {};
1150 	u64 result;
1151 	int rc;
1152 
1153 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_TOTAL_ENERGY_GET <<
1154 				CPUCP_PKT_CTL_OPCODE_SHIFT);
1155 
1156 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1157 					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1158 	if (rc) {
1159 		dev_err(hdev->dev,
1160 			"Failed to handle CpuCP total energy pkt, error %d\n",
1161 				rc);
1162 		return rc;
1163 	}
1164 
1165 	*total_energy = result;
1166 
1167 	return rc;
1168 }
1169 
1170 int get_used_pll_index(struct hl_device *hdev, u32 input_pll_index,
1171 						enum pll_index *pll_index)
1172 {
1173 	struct asic_fixed_properties *prop = &hdev->asic_prop;
1174 	u8 pll_byte, pll_bit_off;
1175 	bool dynamic_pll;
1176 	int fw_pll_idx;
1177 
1178 	dynamic_pll = !!(prop->fw_app_cpu_boot_dev_sts0 &
1179 						CPU_BOOT_DEV_STS0_DYN_PLL_EN);
1180 
1181 	if (!dynamic_pll) {
1182 		/*
1183 		 * in case we are working with legacy FW (each asic has unique
1184 		 * PLL numbering) use the driver based index as they are
1185 		 * aligned with fw legacy numbering
1186 		 */
1187 		*pll_index = input_pll_index;
1188 		return 0;
1189 	}
1190 
1191 	/* retrieve a FW compatible PLL index based on
1192 	 * ASIC specific user request
1193 	 */
1194 	fw_pll_idx = hdev->asic_funcs->map_pll_idx_to_fw_idx(input_pll_index);
1195 	if (fw_pll_idx < 0) {
1196 		dev_err(hdev->dev, "Invalid PLL index (%u) error %d\n",
1197 			input_pll_index, fw_pll_idx);
1198 		return -EINVAL;
1199 	}
1200 
1201 	/* PLL map is a u8 array */
1202 	pll_byte = prop->cpucp_info.pll_map[fw_pll_idx >> 3];
1203 	pll_bit_off = fw_pll_idx & 0x7;
1204 
1205 	if (!(pll_byte & BIT(pll_bit_off))) {
1206 		dev_err(hdev->dev, "PLL index %d is not supported\n",
1207 			fw_pll_idx);
1208 		return -EINVAL;
1209 	}
1210 
1211 	*pll_index = fw_pll_idx;
1212 
1213 	return 0;
1214 }
1215 
1216 int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
1217 		u16 *pll_freq_arr)
1218 {
1219 	struct cpucp_packet pkt;
1220 	enum pll_index used_pll_idx;
1221 	u64 result;
1222 	int rc;
1223 
1224 	rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
1225 	if (rc)
1226 		return rc;
1227 
1228 	memset(&pkt, 0, sizeof(pkt));
1229 
1230 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_PLL_INFO_GET <<
1231 				CPUCP_PKT_CTL_OPCODE_SHIFT);
1232 	pkt.pll_type = __cpu_to_le16((u16)used_pll_idx);
1233 
1234 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1235 			HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1236 	if (rc) {
1237 		dev_err(hdev->dev, "Failed to read PLL info, error %d\n", rc);
1238 		return rc;
1239 	}
1240 
1241 	pll_freq_arr[0] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT0_MASK, result);
1242 	pll_freq_arr[1] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT1_MASK, result);
1243 	pll_freq_arr[2] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT2_MASK, result);
1244 	pll_freq_arr[3] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT3_MASK, result);
1245 
1246 	return 0;
1247 }
1248 
1249 int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power)
1250 {
1251 	struct cpucp_packet pkt;
1252 	u64 result;
1253 	int rc;
1254 
1255 	memset(&pkt, 0, sizeof(pkt));
1256 
1257 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET <<
1258 				CPUCP_PKT_CTL_OPCODE_SHIFT);
1259 	pkt.type = cpu_to_le16(CPUCP_POWER_INPUT);
1260 
1261 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1262 			HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1263 	if (rc) {
1264 		dev_err(hdev->dev, "Failed to read power, error %d\n", rc);
1265 		return rc;
1266 	}
1267 
1268 	*power = result;
1269 
1270 	return rc;
1271 }
1272 
1273 int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
1274 				struct cpucp_hbm_row_info *info)
1275 {
1276 	struct cpucp_hbm_row_info *cpucp_repl_rows_info_cpu_addr;
1277 	dma_addr_t cpucp_repl_rows_info_dma_addr;
1278 	struct cpucp_packet pkt = {};
1279 	u64 result;
1280 	int rc;
1281 
1282 	cpucp_repl_rows_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev,
1283 							sizeof(struct cpucp_hbm_row_info),
1284 							&cpucp_repl_rows_info_dma_addr);
1285 	if (!cpucp_repl_rows_info_cpu_addr) {
1286 		dev_err(hdev->dev,
1287 			"Failed to allocate DMA memory for CPU-CP replaced rows info packet\n");
1288 		return -ENOMEM;
1289 	}
1290 
1291 	memset(cpucp_repl_rows_info_cpu_addr, 0, sizeof(struct cpucp_hbm_row_info));
1292 
1293 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET <<
1294 					CPUCP_PKT_CTL_OPCODE_SHIFT);
1295 	pkt.addr = cpu_to_le64(cpucp_repl_rows_info_dma_addr);
1296 	pkt.data_max_size = cpu_to_le32(sizeof(struct cpucp_hbm_row_info));
1297 
1298 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1299 					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1300 	if (rc) {
1301 		dev_err(hdev->dev,
1302 			"Failed to handle CPU-CP replaced rows info pkt, error %d\n", rc);
1303 		goto out;
1304 	}
1305 
1306 	memcpy(info, cpucp_repl_rows_info_cpu_addr, sizeof(*info));
1307 
1308 out:
1309 	hl_cpu_accessible_dma_pool_free(hdev, sizeof(struct cpucp_hbm_row_info),
1310 						cpucp_repl_rows_info_cpu_addr);
1311 
1312 	return rc;
1313 }
1314 
1315 int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num)
1316 {
1317 	struct cpucp_packet pkt;
1318 	u64 result;
1319 	int rc;
1320 
1321 	memset(&pkt, 0, sizeof(pkt));
1322 
1323 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_HBM_PENDING_ROWS_STATUS << CPUCP_PKT_CTL_OPCODE_SHIFT);
1324 
1325 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
1326 	if (rc) {
1327 		dev_err(hdev->dev,
1328 				"Failed to handle CPU-CP pending rows info pkt, error %d\n", rc);
1329 		goto out;
1330 	}
1331 
1332 	*pend_rows_num = (u32) result;
1333 out:
1334 	return rc;
1335 }
1336 
1337 int hl_fw_cpucp_engine_core_asid_set(struct hl_device *hdev, u32 asid)
1338 {
1339 	struct cpucp_packet pkt;
1340 	int rc;
1341 
1342 	memset(&pkt, 0, sizeof(pkt));
1343 
1344 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_ENGINE_CORE_ASID_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
1345 	pkt.value = cpu_to_le64(asid);
1346 
1347 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1348 						HL_CPUCP_INFO_TIMEOUT_USEC, NULL);
1349 	if (rc)
1350 		dev_err(hdev->dev,
1351 			"Failed on ASID configuration request for engine core, error %d\n",
1352 			rc);
1353 
1354 	return rc;
1355 }
1356 
1357 void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev)
1358 {
1359 	struct static_fw_load_mgr *static_loader =
1360 			&hdev->fw_loader.static_loader;
1361 	int rc;
1362 
1363 	if (hdev->asic_prop.dynamic_fw_load) {
1364 		rc = hl_fw_dynamic_send_protocol_cmd(hdev, &hdev->fw_loader,
1365 				COMMS_RST_DEV, 0, false,
1366 				hdev->fw_loader.cpu_timeout);
1367 		if (rc)
1368 			dev_err(hdev->dev, "Failed sending COMMS_RST_DEV\n");
1369 	} else {
1370 		WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_RST_DEV);
1371 	}
1372 }
1373 
1374 void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev)
1375 {
1376 	struct fw_load_mgr *fw_loader = &hdev->fw_loader;
1377 	u32 status, cpu_boot_status_reg, cpu_timeout;
1378 	struct static_fw_load_mgr *static_loader;
1379 	struct pre_fw_load_props *pre_fw_load;
1380 	int rc;
1381 
1382 	if (hdev->device_cpu_is_halted)
1383 		return;
1384 
1385 	/* Stop device CPU to make sure nothing bad happens */
1386 	if (hdev->asic_prop.dynamic_fw_load) {
1387 		pre_fw_load = &fw_loader->pre_fw_load;
1388 		cpu_timeout = fw_loader->cpu_timeout;
1389 		cpu_boot_status_reg = pre_fw_load->cpu_boot_status_reg;
1390 
1391 		rc = hl_fw_dynamic_send_protocol_cmd(hdev, &hdev->fw_loader,
1392 				COMMS_GOTO_WFE, 0, false, cpu_timeout);
1393 		if (rc) {
1394 			dev_err(hdev->dev, "Failed sending COMMS_GOTO_WFE\n");
1395 		} else {
1396 			rc = hl_poll_timeout(
1397 				hdev,
1398 				cpu_boot_status_reg,
1399 				status,
1400 				status == CPU_BOOT_STATUS_IN_WFE,
1401 				hdev->fw_poll_interval_usec,
1402 				cpu_timeout);
1403 			if (rc)
1404 				dev_err(hdev->dev, "Current status=%u. Timed-out updating to WFE\n",
1405 						status);
1406 		}
1407 	} else {
1408 		static_loader = &hdev->fw_loader.static_loader;
1409 		WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_GOTO_WFE);
1410 		msleep(static_loader->cpu_reset_wait_msec);
1411 
1412 		/* Must clear this register in order to prevent preboot
1413 		 * from reading WFE after reboot
1414 		 */
1415 		WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_NA);
1416 	}
1417 
1418 	hdev->device_cpu_is_halted = true;
1419 }
1420 
1421 static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
1422 {
1423 	/* Some of the status codes below are deprecated in newer f/w
1424 	 * versions but we keep them here for backward compatibility
1425 	 */
1426 	switch (status) {
1427 	case CPU_BOOT_STATUS_NA:
1428 		dev_err(hdev->dev,
1429 			"Device boot progress - BTL/ROM did NOT run\n");
1430 		break;
1431 	case CPU_BOOT_STATUS_IN_WFE:
1432 		dev_err(hdev->dev,
1433 			"Device boot progress - Stuck inside WFE loop\n");
1434 		break;
1435 	case CPU_BOOT_STATUS_IN_BTL:
1436 		dev_err(hdev->dev,
1437 			"Device boot progress - Stuck in BTL\n");
1438 		break;
1439 	case CPU_BOOT_STATUS_IN_PREBOOT:
1440 		dev_err(hdev->dev,
1441 			"Device boot progress - Stuck in Preboot\n");
1442 		break;
1443 	case CPU_BOOT_STATUS_IN_SPL:
1444 		dev_err(hdev->dev,
1445 			"Device boot progress - Stuck in SPL\n");
1446 		break;
1447 	case CPU_BOOT_STATUS_IN_UBOOT:
1448 		dev_err(hdev->dev,
1449 			"Device boot progress - Stuck in u-boot\n");
1450 		break;
1451 	case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
1452 		dev_err(hdev->dev,
1453 			"Device boot progress - DRAM initialization failed\n");
1454 		break;
1455 	case CPU_BOOT_STATUS_UBOOT_NOT_READY:
1456 		dev_err(hdev->dev,
1457 			"Device boot progress - Cannot boot\n");
1458 		break;
1459 	case CPU_BOOT_STATUS_TS_INIT_FAIL:
1460 		dev_err(hdev->dev,
1461 			"Device boot progress - Thermal Sensor initialization failed\n");
1462 		break;
1463 	case CPU_BOOT_STATUS_SECURITY_READY:
1464 		dev_err(hdev->dev,
1465 			"Device boot progress - Stuck in preboot after security initialization\n");
1466 		break;
1467 	case CPU_BOOT_STATUS_FW_SHUTDOWN_PREP:
1468 		dev_err(hdev->dev,
1469 			"Device boot progress - Stuck in preparation for shutdown\n");
1470 		break;
1471 	default:
1472 		dev_err(hdev->dev,
1473 			"Device boot progress - Invalid or unexpected status code %d\n", status);
1474 		break;
1475 	}
1476 }
1477 
1478 int hl_fw_wait_preboot_ready(struct hl_device *hdev)
1479 {
1480 	struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
1481 	u32 status = 0, timeout;
1482 	int rc, tries = 1;
1483 	bool preboot_still_runs;
1484 
1485 	/* Need to check two possible scenarios:
1486 	 *
1487 	 * CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT - for newer firmwares where
1488 	 * the preboot is waiting for the boot fit
1489 	 *
1490 	 * All other status values - for older firmwares where the uboot was
1491 	 * loaded from the FLASH
1492 	 */
1493 	timeout = pre_fw_load->wait_for_preboot_timeout;
1494 retry:
1495 	rc = hl_poll_timeout(
1496 		hdev,
1497 		pre_fw_load->cpu_boot_status_reg,
1498 		status,
1499 		(status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
1500 		(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
1501 		(status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT),
1502 		hdev->fw_poll_interval_usec,
1503 		timeout);
1504 	/*
1505 	 * if F/W reports "security-ready" it means preboot might take longer.
1506 	 * If the field 'wait_for_preboot_extended_timeout' is non 0 we wait again
1507 	 * with that timeout
1508 	 */
1509 	preboot_still_runs = (status == CPU_BOOT_STATUS_SECURITY_READY ||
1510 				status == CPU_BOOT_STATUS_IN_PREBOOT ||
1511 				status == CPU_BOOT_STATUS_FW_SHUTDOWN_PREP ||
1512 				status == CPU_BOOT_STATUS_DRAM_RDY);
1513 
1514 	if (rc && tries && preboot_still_runs) {
1515 		tries--;
1516 		if (pre_fw_load->wait_for_preboot_extended_timeout) {
1517 			timeout = pre_fw_load->wait_for_preboot_extended_timeout;
1518 			goto retry;
1519 		}
1520 	}
1521 
1522 	if (rc) {
1523 		detect_cpu_boot_status(hdev, status);
1524 		dev_err(hdev->dev, "CPU boot ready timeout (status = %d)\n", status);
1525 
1526 		/* If we read all FF, then something is totally wrong, no point
1527 		 * of reading specific errors
1528 		 */
1529 		if (status != -1)
1530 			fw_read_errors(hdev, pre_fw_load->boot_err0_reg,
1531 						pre_fw_load->boot_err1_reg,
1532 						pre_fw_load->sts_boot_dev_sts0_reg,
1533 						pre_fw_load->sts_boot_dev_sts1_reg);
1534 		return -EIO;
1535 	}
1536 
1537 	hdev->fw_loader.fw_comp_loaded |= FW_TYPE_PREBOOT_CPU;
1538 
1539 	return 0;
1540 }
1541 
1542 static int hl_fw_read_preboot_caps(struct hl_device *hdev)
1543 {
1544 	struct pre_fw_load_props *pre_fw_load;
1545 	struct asic_fixed_properties *prop;
1546 	u32 reg_val;
1547 	int rc;
1548 
1549 	prop = &hdev->asic_prop;
1550 	pre_fw_load = &hdev->fw_loader.pre_fw_load;
1551 
1552 	rc = hl_fw_wait_preboot_ready(hdev);
1553 	if (rc)
1554 		return rc;
1555 
1556 	/*
1557 	 * the registers DEV_STS* contain FW capabilities/features.
1558 	 * We can rely on this registers only if bit CPU_BOOT_DEV_STS*_ENABLED
1559 	 * is set.
1560 	 * In the first read of this register we store the value of this
1561 	 * register ONLY if the register is enabled (which will be propagated
1562 	 * to next stages) and also mark the register as valid.
1563 	 * In case it is not enabled the stored value will be left 0- all
1564 	 * caps/features are off
1565 	 */
1566 	reg_val = RREG32(pre_fw_load->sts_boot_dev_sts0_reg);
1567 	if (reg_val & CPU_BOOT_DEV_STS0_ENABLED) {
1568 		prop->fw_cpu_boot_dev_sts0_valid = true;
1569 		prop->fw_preboot_cpu_boot_dev_sts0 = reg_val;
1570 	}
1571 
1572 	reg_val = RREG32(pre_fw_load->sts_boot_dev_sts1_reg);
1573 	if (reg_val & CPU_BOOT_DEV_STS1_ENABLED) {
1574 		prop->fw_cpu_boot_dev_sts1_valid = true;
1575 		prop->fw_preboot_cpu_boot_dev_sts1 = reg_val;
1576 	}
1577 
1578 	prop->dynamic_fw_load = !!(prop->fw_preboot_cpu_boot_dev_sts0 &
1579 						CPU_BOOT_DEV_STS0_FW_LD_COM_EN);
1580 
1581 	/* initialize FW loader once we know what load protocol is used */
1582 	hdev->asic_funcs->init_firmware_loader(hdev);
1583 
1584 	dev_dbg(hdev->dev, "Attempting %s FW load\n",
1585 			prop->dynamic_fw_load ? "dynamic" : "legacy");
1586 	return 0;
1587 }
1588 
1589 static int hl_fw_static_read_device_fw_version(struct hl_device *hdev,
1590 					enum hl_fw_component fwc)
1591 {
1592 	struct asic_fixed_properties *prop = &hdev->asic_prop;
1593 	struct fw_load_mgr *fw_loader = &hdev->fw_loader;
1594 	struct static_fw_load_mgr *static_loader;
1595 	char *dest, *boot_ver, *preboot_ver;
1596 	u32 ver_off, limit;
1597 	const char *name;
1598 	char btl_ver[32];
1599 
1600 	static_loader = &hdev->fw_loader.static_loader;
1601 
1602 	switch (fwc) {
1603 	case FW_COMP_BOOT_FIT:
1604 		ver_off = RREG32(static_loader->boot_fit_version_offset_reg);
1605 		dest = prop->uboot_ver;
1606 		name = "Boot-fit";
1607 		limit = static_loader->boot_fit_version_max_off;
1608 		break;
1609 	case FW_COMP_PREBOOT:
1610 		ver_off = RREG32(static_loader->preboot_version_offset_reg);
1611 		dest = prop->preboot_ver;
1612 		name = "Preboot";
1613 		limit = static_loader->preboot_version_max_off;
1614 		break;
1615 	default:
1616 		dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
1617 		return -EIO;
1618 	}
1619 
1620 	ver_off &= static_loader->sram_offset_mask;
1621 
1622 	if (ver_off < limit) {
1623 		memcpy_fromio(dest,
1624 			hdev->pcie_bar[fw_loader->sram_bar_id] + ver_off,
1625 			VERSION_MAX_LEN);
1626 	} else {
1627 		dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
1628 								name, ver_off);
1629 		strscpy(dest, "unavailable", VERSION_MAX_LEN);
1630 		return -EIO;
1631 	}
1632 
1633 	if (fwc == FW_COMP_BOOT_FIT) {
1634 		boot_ver = extract_fw_ver_from_str(prop->uboot_ver);
1635 		if (boot_ver) {
1636 			dev_info(hdev->dev, "boot-fit version %s\n", boot_ver);
1637 			kfree(boot_ver);
1638 		}
1639 	} else if (fwc == FW_COMP_PREBOOT) {
1640 		preboot_ver = strnstr(prop->preboot_ver, "Preboot",
1641 						VERSION_MAX_LEN);
1642 		if (preboot_ver && preboot_ver != prop->preboot_ver) {
1643 			strscpy(btl_ver, prop->preboot_ver,
1644 				min((int) (preboot_ver - prop->preboot_ver),
1645 									31));
1646 			dev_info(hdev->dev, "%s\n", btl_ver);
1647 		}
1648 
1649 		preboot_ver = extract_fw_ver_from_str(prop->preboot_ver);
1650 		if (preboot_ver) {
1651 			dev_info(hdev->dev, "preboot version %s\n",
1652 								preboot_ver);
1653 			kfree(preboot_ver);
1654 		}
1655 	}
1656 
1657 	return 0;
1658 }
1659 
1660 /**
1661  * hl_fw_preboot_update_state - update internal data structures during
1662  *                              handshake with preboot
1663  *
1664  *
1665  * @hdev: pointer to the habanalabs device structure
1666  *
1667  * @return 0 on success, otherwise non-zero error code
1668  */
1669 static void hl_fw_preboot_update_state(struct hl_device *hdev)
1670 {
1671 	struct asic_fixed_properties *prop = &hdev->asic_prop;
1672 	u32 cpu_boot_dev_sts0, cpu_boot_dev_sts1;
1673 
1674 	cpu_boot_dev_sts0 = prop->fw_preboot_cpu_boot_dev_sts0;
1675 	cpu_boot_dev_sts1 = prop->fw_preboot_cpu_boot_dev_sts1;
1676 
1677 	/* We read boot_dev_sts registers multiple times during boot:
1678 	 * 1. preboot - a. Check whether the security status bits are valid
1679 	 *              b. Check whether fw security is enabled
1680 	 *              c. Check whether hard reset is done by preboot
1681 	 * 2. boot cpu - a. Fetch boot cpu security status
1682 	 *               b. Check whether hard reset is done by boot cpu
1683 	 * 3. FW application - a. Fetch fw application security status
1684 	 *                     b. Check whether hard reset is done by fw app
1685 	 */
1686 	prop->hard_reset_done_by_fw = !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
1687 
1688 	prop->fw_security_enabled = !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_SECURITY_EN);
1689 
1690 	dev_dbg(hdev->dev, "Firmware preboot boot device status0 %#x\n",
1691 							cpu_boot_dev_sts0);
1692 
1693 	dev_dbg(hdev->dev, "Firmware preboot boot device status1 %#x\n",
1694 							cpu_boot_dev_sts1);
1695 
1696 	dev_dbg(hdev->dev, "Firmware preboot hard-reset is %s\n",
1697 			prop->hard_reset_done_by_fw ? "enabled" : "disabled");
1698 
1699 	dev_dbg(hdev->dev, "firmware-level security is %s\n",
1700 			prop->fw_security_enabled ? "enabled" : "disabled");
1701 
1702 	dev_dbg(hdev->dev, "GIC controller is %s\n",
1703 			prop->gic_interrupts_enable ? "enabled" : "disabled");
1704 }
1705 
1706 static int hl_fw_static_read_preboot_status(struct hl_device *hdev)
1707 {
1708 	int rc;
1709 
1710 	rc = hl_fw_static_read_device_fw_version(hdev, FW_COMP_PREBOOT);
1711 	if (rc)
1712 		return rc;
1713 
1714 	return 0;
1715 }
1716 
1717 int hl_fw_read_preboot_status(struct hl_device *hdev)
1718 {
1719 	int rc;
1720 
1721 	if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
1722 		return 0;
1723 
1724 	/* get FW pre-load parameters  */
1725 	hdev->asic_funcs->init_firmware_preload_params(hdev);
1726 
1727 	/*
1728 	 * In order to determine boot method (static VS dynamic) we need to
1729 	 * read the boot caps register
1730 	 */
1731 	rc = hl_fw_read_preboot_caps(hdev);
1732 	if (rc)
1733 		return rc;
1734 
1735 	hl_fw_preboot_update_state(hdev);
1736 
1737 	/* no need to read preboot status in dynamic load */
1738 	if (hdev->asic_prop.dynamic_fw_load)
1739 		return 0;
1740 
1741 	return hl_fw_static_read_preboot_status(hdev);
1742 }
1743 
1744 /* associate string with COMM status */
1745 static char *hl_dynamic_fw_status_str[COMMS_STS_INVLD_LAST] = {
1746 	[COMMS_STS_NOOP] = "NOOP",
1747 	[COMMS_STS_ACK] = "ACK",
1748 	[COMMS_STS_OK] = "OK",
1749 	[COMMS_STS_ERR] = "ERR",
1750 	[COMMS_STS_VALID_ERR] = "VALID_ERR",
1751 	[COMMS_STS_TIMEOUT_ERR] = "TIMEOUT_ERR",
1752 };
1753 
1754 /**
1755  * hl_fw_dynamic_report_error_status - report error status
1756  *
1757  * @hdev: pointer to the habanalabs device structure
1758  * @status: value of FW status register
1759  * @expected_status: the expected status
1760  */
1761 static void hl_fw_dynamic_report_error_status(struct hl_device *hdev,
1762 						u32 status,
1763 						enum comms_sts expected_status)
1764 {
1765 	enum comms_sts comm_status =
1766 				FIELD_GET(COMMS_STATUS_STATUS_MASK, status);
1767 
1768 	if (comm_status < COMMS_STS_INVLD_LAST)
1769 		dev_err(hdev->dev, "Device status %s, expected status: %s\n",
1770 				hl_dynamic_fw_status_str[comm_status],
1771 				hl_dynamic_fw_status_str[expected_status]);
1772 	else
1773 		dev_err(hdev->dev, "Device status unknown %d, expected status: %s\n",
1774 				comm_status,
1775 				hl_dynamic_fw_status_str[expected_status]);
1776 }
1777 
1778 /**
1779  * hl_fw_dynamic_send_cmd - send LKD to FW cmd
1780  *
1781  * @hdev: pointer to the habanalabs device structure
1782  * @fw_loader: managing structure for loading device's FW
1783  * @cmd: LKD to FW cmd code
1784  * @size: size of next FW component to be loaded (0 if not necessary)
1785  *
1786  * LDK to FW exact command layout is defined at struct comms_command.
1787  * note: the size argument is used only when the next FW component should be
1788  *       loaded, otherwise it shall be 0. the size is used by the FW in later
1789  *       protocol stages and when sending only indicating the amount of memory
1790  *       to be allocated by the FW to receive the next boot component.
1791  */
1792 static void hl_fw_dynamic_send_cmd(struct hl_device *hdev,
1793 				struct fw_load_mgr *fw_loader,
1794 				enum comms_cmd cmd, unsigned int size)
1795 {
1796 	struct cpu_dyn_regs *dyn_regs;
1797 	u32 val;
1798 
1799 	dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
1800 
1801 	val = FIELD_PREP(COMMS_COMMAND_CMD_MASK, cmd);
1802 	val |= FIELD_PREP(COMMS_COMMAND_SIZE_MASK, size);
1803 
1804 	trace_habanalabs_comms_send_cmd(hdev->dev, comms_cmd_str_arr[cmd]);
1805 	WREG32(le32_to_cpu(dyn_regs->kmd_msg_to_cpu), val);
1806 }
1807 
1808 /**
1809  * hl_fw_dynamic_extract_fw_response - update the FW response
1810  *
1811  * @hdev: pointer to the habanalabs device structure
1812  * @fw_loader: managing structure for loading device's FW
1813  * @response: FW response
1814  * @status: the status read from CPU status register
1815  *
1816  * @return 0 on success, otherwise non-zero error code
1817  */
1818 static int hl_fw_dynamic_extract_fw_response(struct hl_device *hdev,
1819 						struct fw_load_mgr *fw_loader,
1820 						struct fw_response *response,
1821 						u32 status)
1822 {
1823 	response->status = FIELD_GET(COMMS_STATUS_STATUS_MASK, status);
1824 	response->ram_offset = FIELD_GET(COMMS_STATUS_OFFSET_MASK, status) <<
1825 						COMMS_STATUS_OFFSET_ALIGN_SHIFT;
1826 	response->ram_type = FIELD_GET(COMMS_STATUS_RAM_TYPE_MASK, status);
1827 
1828 	if ((response->ram_type != COMMS_SRAM) &&
1829 					(response->ram_type != COMMS_DRAM)) {
1830 		dev_err(hdev->dev, "FW status: invalid RAM type %u\n",
1831 							response->ram_type);
1832 		return -EIO;
1833 	}
1834 
1835 	return 0;
1836 }
1837 
1838 /**
1839  * hl_fw_dynamic_wait_for_status - wait for status in dynamic FW load
1840  *
1841  * @hdev: pointer to the habanalabs device structure
1842  * @fw_loader: managing structure for loading device's FW
1843  * @expected_status: expected status to wait for
1844  * @timeout: timeout for status wait
1845  *
1846  * @return 0 on success, otherwise non-zero error code
1847  *
1848  * waiting for status from FW include polling the FW status register until
1849  * expected status is received or timeout occurs (whatever occurs first).
1850  */
1851 static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
1852 						struct fw_load_mgr *fw_loader,
1853 						enum comms_sts expected_status,
1854 						u32 timeout)
1855 {
1856 	struct cpu_dyn_regs *dyn_regs;
1857 	u32 status;
1858 	int rc;
1859 
1860 	dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
1861 
1862 	trace_habanalabs_comms_wait_status(hdev->dev, comms_sts_str_arr[expected_status]);
1863 
1864 	/* Wait for expected status */
1865 	rc = hl_poll_timeout(
1866 		hdev,
1867 		le32_to_cpu(dyn_regs->cpu_cmd_status_to_host),
1868 		status,
1869 		FIELD_GET(COMMS_STATUS_STATUS_MASK, status) == expected_status,
1870 		hdev->fw_comms_poll_interval_usec,
1871 		timeout);
1872 
1873 	if (rc) {
1874 		hl_fw_dynamic_report_error_status(hdev, status,
1875 							expected_status);
1876 		return -EIO;
1877 	}
1878 
1879 	trace_habanalabs_comms_wait_status_done(hdev->dev, comms_sts_str_arr[expected_status]);
1880 
1881 	/*
1882 	 * skip storing FW response for NOOP to preserve the actual desired
1883 	 * FW status
1884 	 */
1885 	if (expected_status == COMMS_STS_NOOP)
1886 		return 0;
1887 
1888 	rc = hl_fw_dynamic_extract_fw_response(hdev, fw_loader,
1889 					&fw_loader->dynamic_loader.response,
1890 					status);
1891 	return rc;
1892 }
1893 
1894 /**
1895  * hl_fw_dynamic_send_clear_cmd - send clear command to FW
1896  *
1897  * @hdev: pointer to the habanalabs device structure
1898  * @fw_loader: managing structure for loading device's FW
1899  *
1900  * @return 0 on success, otherwise non-zero error code
1901  *
1902  * after command cycle between LKD to FW CPU (i.e. LKD got an expected status
1903  * from FW) we need to clear the CPU status register in order to avoid garbage
1904  * between command cycles.
1905  * This is done by sending clear command and polling the CPU to LKD status
1906  * register to hold the status NOOP
1907  */
1908 static int hl_fw_dynamic_send_clear_cmd(struct hl_device *hdev,
1909 						struct fw_load_mgr *fw_loader)
1910 {
1911 	hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_CLR_STS, 0);
1912 
1913 	return hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_NOOP,
1914 							fw_loader->cpu_timeout);
1915 }
1916 
1917 /**
1918  * hl_fw_dynamic_send_protocol_cmd - send LKD to FW cmd and wait for ACK
1919  *
1920  * @hdev: pointer to the habanalabs device structure
1921  * @fw_loader: managing structure for loading device's FW
1922  * @cmd: LKD to FW cmd code
1923  * @size: size of next FW component to be loaded (0 if not necessary)
1924  * @wait_ok: if true also wait for OK response from FW
1925  * @timeout: timeout for status wait
1926  *
1927  * @return 0 on success, otherwise non-zero error code
1928  *
1929  * brief:
1930  * when sending protocol command we have the following steps:
1931  * - send clear (clear command and verify clear status register)
1932  * - send the actual protocol command
1933  * - wait for ACK on the protocol command
1934  * - send clear
1935  * - send NOOP
1936  * if, in addition, the specific protocol command should wait for OK then:
1937  * - wait for OK
1938  * - send clear
1939  * - send NOOP
1940  *
1941  * NOTES:
1942  * send clear: this is necessary in order to clear the status register to avoid
1943  *             leftovers between command
1944  * NOOP command: necessary to avoid loop on the clear command by the FW
1945  */
1946 int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
1947 				struct fw_load_mgr *fw_loader,
1948 				enum comms_cmd cmd, unsigned int size,
1949 				bool wait_ok, u32 timeout)
1950 {
1951 	int rc;
1952 
1953 	trace_habanalabs_comms_protocol_cmd(hdev->dev, comms_cmd_str_arr[cmd]);
1954 
1955 	/* first send clear command to clean former commands */
1956 	rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
1957 	if (rc)
1958 		return rc;
1959 
1960 	/* send the actual command */
1961 	hl_fw_dynamic_send_cmd(hdev, fw_loader, cmd, size);
1962 
1963 	/* wait for ACK for the command */
1964 	rc = hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_ACK,
1965 								timeout);
1966 	if (rc)
1967 		return rc;
1968 
1969 	/* clear command to prepare for NOOP command */
1970 	rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
1971 	if (rc)
1972 		return rc;
1973 
1974 	/* send the actual NOOP command */
1975 	hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_NOOP, 0);
1976 
1977 	if (!wait_ok)
1978 		return 0;
1979 
1980 	rc = hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_OK,
1981 								timeout);
1982 	if (rc)
1983 		return rc;
1984 
1985 	/* clear command to prepare for NOOP command */
1986 	rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
1987 	if (rc)
1988 		return rc;
1989 
1990 	/* send the actual NOOP command */
1991 	hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_NOOP, 0);
1992 
1993 	return 0;
1994 }
1995 
1996 /**
1997  * hl_fw_compat_crc32 - CRC compatible with FW
1998  *
1999  * @data: pointer to the data
2000  * @size: size of the data
2001  *
2002  * @return the CRC32 result
2003  *
2004  * NOTE: kernel's CRC32 differs from standard CRC32 calculation.
2005  *       in order to be aligned we need to flip the bits of both the input
2006  *       initial CRC and kernel's CRC32 result.
2007  *       in addition both sides use initial CRC of 0,
2008  */
2009 static u32 hl_fw_compat_crc32(u8 *data, size_t size)
2010 {
2011 	return ~crc32_le(~((u32)0), data, size);
2012 }
2013 
2014 /**
2015  * hl_fw_dynamic_validate_memory_bound - validate memory bounds for memory
2016  *                                        transfer (image or descriptor) between
2017  *                                        host and FW
2018  *
2019  * @hdev: pointer to the habanalabs device structure
2020  * @addr: device address of memory transfer
2021  * @size: memory transfer size
2022  * @region: PCI memory region
2023  *
2024  * @return 0 on success, otherwise non-zero error code
2025  */
2026 static int hl_fw_dynamic_validate_memory_bound(struct hl_device *hdev,
2027 						u64 addr, size_t size,
2028 						struct pci_mem_region *region)
2029 {
2030 	u64 end_addr;
2031 
2032 	/* now make sure that the memory transfer is within region's bounds */
2033 	end_addr = addr + size;
2034 	if (end_addr >= region->region_base + region->region_size) {
2035 		dev_err(hdev->dev,
2036 			"dynamic FW load: memory transfer end address out of memory region bounds. addr: %llx\n",
2037 							end_addr);
2038 		return -EIO;
2039 	}
2040 
2041 	/*
2042 	 * now make sure memory transfer is within predefined BAR bounds.
2043 	 * this is to make sure we do not need to set the bar (e.g. for DRAM
2044 	 * memory transfers)
2045 	 */
2046 	if (end_addr >= region->region_base - region->offset_in_bar +
2047 							region->bar_size) {
2048 		dev_err(hdev->dev,
2049 			"FW image beyond PCI BAR bounds\n");
2050 		return -EIO;
2051 	}
2052 
2053 	return 0;
2054 }
2055 
2056 /**
2057  * hl_fw_dynamic_validate_descriptor - validate FW descriptor
2058  *
2059  * @hdev: pointer to the habanalabs device structure
2060  * @fw_loader: managing structure for loading device's FW
2061  * @fw_desc: the descriptor from FW
2062  *
2063  * @return 0 on success, otherwise non-zero error code
2064  */
2065 static int hl_fw_dynamic_validate_descriptor(struct hl_device *hdev,
2066 					struct fw_load_mgr *fw_loader,
2067 					struct lkd_fw_comms_desc *fw_desc)
2068 {
2069 	struct pci_mem_region *region;
2070 	enum pci_region region_id;
2071 	size_t data_size;
2072 	u32 data_crc32;
2073 	u8 *data_ptr;
2074 	u64 addr;
2075 	int rc;
2076 
2077 	if (le32_to_cpu(fw_desc->header.magic) != HL_COMMS_DESC_MAGIC)
2078 		dev_dbg(hdev->dev, "Invalid magic for dynamic FW descriptor (%x)\n",
2079 				fw_desc->header.magic);
2080 
2081 	if (fw_desc->header.version != HL_COMMS_DESC_VER)
2082 		dev_dbg(hdev->dev, "Invalid version for dynamic FW descriptor (%x)\n",
2083 				fw_desc->header.version);
2084 
2085 	/*
2086 	 * Calc CRC32 of data without header. use the size of the descriptor
2087 	 * reported by firmware, without calculating it ourself, to allow adding
2088 	 * more fields to the lkd_fw_comms_desc structure.
2089 	 * note that no alignment/stride address issues here as all structures
2090 	 * are 64 bit padded.
2091 	 */
2092 	data_ptr = (u8 *)fw_desc + sizeof(struct comms_desc_header);
2093 	data_size = le16_to_cpu(fw_desc->header.size);
2094 
2095 	data_crc32 = hl_fw_compat_crc32(data_ptr, data_size);
2096 	if (data_crc32 != le32_to_cpu(fw_desc->header.crc32)) {
2097 		dev_err(hdev->dev, "CRC32 mismatch for dynamic FW descriptor (%x:%x)\n",
2098 			data_crc32, fw_desc->header.crc32);
2099 		return -EIO;
2100 	}
2101 
2102 	/* find memory region to which to copy the image */
2103 	addr = le64_to_cpu(fw_desc->img_addr);
2104 	region_id = hl_get_pci_memory_region(hdev, addr);
2105 	if ((region_id != PCI_REGION_SRAM) && ((region_id != PCI_REGION_DRAM))) {
2106 		dev_err(hdev->dev, "Invalid region to copy FW image address=%llx\n", addr);
2107 		return -EIO;
2108 	}
2109 
2110 	region = &hdev->pci_mem_region[region_id];
2111 
2112 	/* store the region for the copy stage */
2113 	fw_loader->dynamic_loader.image_region = region;
2114 
2115 	/*
2116 	 * here we know that the start address is valid, now make sure that the
2117 	 * image is within region's bounds
2118 	 */
2119 	rc = hl_fw_dynamic_validate_memory_bound(hdev, addr,
2120 					fw_loader->dynamic_loader.fw_image_size,
2121 					region);
2122 	if (rc) {
2123 		dev_err(hdev->dev, "invalid mem transfer request for FW image\n");
2124 		return rc;
2125 	}
2126 
2127 	/* here we can mark the descriptor as valid as the content has been validated */
2128 	fw_loader->dynamic_loader.fw_desc_valid = true;
2129 
2130 	return 0;
2131 }
2132 
2133 static int hl_fw_dynamic_validate_response(struct hl_device *hdev,
2134 						struct fw_response *response,
2135 						struct pci_mem_region *region)
2136 {
2137 	u64 device_addr;
2138 	int rc;
2139 
2140 	device_addr = region->region_base + response->ram_offset;
2141 
2142 	/*
2143 	 * validate that the descriptor is within region's bounds
2144 	 * Note that as the start address was supplied according to the RAM
2145 	 * type- testing only the end address is enough
2146 	 */
2147 	rc = hl_fw_dynamic_validate_memory_bound(hdev, device_addr,
2148 					sizeof(struct lkd_fw_comms_desc),
2149 					region);
2150 	return rc;
2151 }
2152 
2153 /*
2154  * hl_fw_dynamic_read_descriptor_msg - read and show the ascii msg that sent by fw
2155  *
2156  * @hdev: pointer to the habanalabs device structure
2157  * @fw_desc: the descriptor from FW
2158  */
2159 static void hl_fw_dynamic_read_descriptor_msg(struct hl_device *hdev,
2160 					struct lkd_fw_comms_desc *fw_desc)
2161 {
2162 	int i;
2163 	char *msg;
2164 
2165 	for (i = 0 ; i < LKD_FW_ASCII_MSG_MAX ; i++) {
2166 		if (!fw_desc->ascii_msg[i].valid)
2167 			return;
2168 
2169 		/* force NULL termination */
2170 		msg = fw_desc->ascii_msg[i].msg;
2171 		msg[LKD_FW_ASCII_MSG_MAX_LEN - 1] = '\0';
2172 
2173 		switch (fw_desc->ascii_msg[i].msg_lvl) {
2174 		case LKD_FW_ASCII_MSG_ERR:
2175 			dev_err(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
2176 			break;
2177 		case LKD_FW_ASCII_MSG_WRN:
2178 			dev_warn(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
2179 			break;
2180 		case LKD_FW_ASCII_MSG_INF:
2181 			dev_info(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
2182 			break;
2183 		default:
2184 			dev_dbg(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
2185 			break;
2186 		}
2187 	}
2188 }
2189 
2190 /**
2191  * hl_fw_dynamic_read_and_validate_descriptor - read and validate FW descriptor
2192  *
2193  * @hdev: pointer to the habanalabs device structure
2194  * @fw_loader: managing structure for loading device's FW
2195  *
2196  * @return 0 on success, otherwise non-zero error code
2197  */
2198 static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev,
2199 						struct fw_load_mgr *fw_loader)
2200 {
2201 	struct lkd_fw_comms_desc *fw_desc;
2202 	struct pci_mem_region *region;
2203 	struct fw_response *response;
2204 	void *temp_fw_desc;
2205 	void __iomem *src;
2206 	u16 fw_data_size;
2207 	enum pci_region region_id;
2208 	int rc;
2209 
2210 	fw_desc = &fw_loader->dynamic_loader.comm_desc;
2211 	response = &fw_loader->dynamic_loader.response;
2212 
2213 	region_id = (response->ram_type == COMMS_SRAM) ?
2214 					PCI_REGION_SRAM : PCI_REGION_DRAM;
2215 
2216 	region = &hdev->pci_mem_region[region_id];
2217 
2218 	rc = hl_fw_dynamic_validate_response(hdev, response, region);
2219 	if (rc) {
2220 		dev_err(hdev->dev,
2221 			"invalid mem transfer request for FW descriptor\n");
2222 		return rc;
2223 	}
2224 
2225 	/*
2226 	 * extract address to copy the descriptor from
2227 	 * in addition, as the descriptor value is going to be over-ridden by new data- we mark it
2228 	 * as invalid.
2229 	 * it will be marked again as valid once validated
2230 	 */
2231 	fw_loader->dynamic_loader.fw_desc_valid = false;
2232 	src = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
2233 							response->ram_offset;
2234 
2235 	/*
2236 	 * We do the copy of the fw descriptor in 2 phases:
2237 	 * 1. copy the header + data info according to our lkd_fw_comms_desc definition.
2238 	 *    then we're able to read the actual data size provided by fw.
2239 	 *    this is needed for cases where data in descriptor was changed(add/remove)
2240 	 *    in embedded specs header file before updating lkd copy of the header file
2241 	 * 2. copy descriptor to temporary buffer with aligned size and send it to validation
2242 	 */
2243 	memcpy_fromio(fw_desc, src, sizeof(struct lkd_fw_comms_desc));
2244 	fw_data_size = le16_to_cpu(fw_desc->header.size);
2245 
2246 	temp_fw_desc = vzalloc(sizeof(struct comms_desc_header) + fw_data_size);
2247 	if (!temp_fw_desc)
2248 		return -ENOMEM;
2249 
2250 	memcpy_fromio(temp_fw_desc, src, sizeof(struct comms_desc_header) + fw_data_size);
2251 
2252 	rc = hl_fw_dynamic_validate_descriptor(hdev, fw_loader,
2253 					(struct lkd_fw_comms_desc *) temp_fw_desc);
2254 
2255 	if (!rc)
2256 		hl_fw_dynamic_read_descriptor_msg(hdev, temp_fw_desc);
2257 
2258 	vfree(temp_fw_desc);
2259 
2260 	return rc;
2261 }
2262 
2263 /**
2264  * hl_fw_dynamic_request_descriptor - handshake with CPU to get FW descriptor
2265  *
2266  * @hdev: pointer to the habanalabs device structure
2267  * @fw_loader: managing structure for loading device's FW
2268  * @next_image_size: size to allocate for next FW component
2269  *
2270  * @return 0 on success, otherwise non-zero error code
2271  */
2272 static int hl_fw_dynamic_request_descriptor(struct hl_device *hdev,
2273 						struct fw_load_mgr *fw_loader,
2274 						size_t next_image_size)
2275 {
2276 	int rc;
2277 
2278 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_PREP_DESC,
2279 						next_image_size, true,
2280 						fw_loader->cpu_timeout);
2281 	if (rc)
2282 		return rc;
2283 
2284 	return hl_fw_dynamic_read_and_validate_descriptor(hdev, fw_loader);
2285 }
2286 
2287 /**
2288  * hl_fw_dynamic_read_device_fw_version - read FW version to exposed properties
2289  *
2290  * @hdev: pointer to the habanalabs device structure
2291  * @fwc: the firmware component
2292  * @fw_version: fw component's version string
2293  */
2294 static int hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
2295 					enum hl_fw_component fwc,
2296 					const char *fw_version)
2297 {
2298 	struct asic_fixed_properties *prop = &hdev->asic_prop;
2299 	char *preboot_ver, *boot_ver;
2300 	char btl_ver[32];
2301 	int rc;
2302 
2303 	switch (fwc) {
2304 	case FW_COMP_BOOT_FIT:
2305 		strscpy(prop->uboot_ver, fw_version, VERSION_MAX_LEN);
2306 		boot_ver = extract_fw_ver_from_str(prop->uboot_ver);
2307 		if (boot_ver) {
2308 			dev_info(hdev->dev, "boot-fit version %s\n", boot_ver);
2309 			kfree(boot_ver);
2310 		}
2311 
2312 		break;
2313 	case FW_COMP_PREBOOT:
2314 		strscpy(prop->preboot_ver, fw_version, VERSION_MAX_LEN);
2315 		preboot_ver = strnstr(prop->preboot_ver, "Preboot", VERSION_MAX_LEN);
2316 		dev_info(hdev->dev, "preboot full version: '%s'\n", preboot_ver);
2317 
2318 		if (preboot_ver && preboot_ver != prop->preboot_ver) {
2319 			strscpy(btl_ver, prop->preboot_ver,
2320 				min((int) (preboot_ver - prop->preboot_ver), 31));
2321 			dev_info(hdev->dev, "%s\n", btl_ver);
2322 		}
2323 
2324 		rc = hl_get_sw_major_minor_subminor(hdev, preboot_ver);
2325 		if (rc)
2326 			return rc;
2327 		preboot_ver = extract_fw_ver_from_str(prop->preboot_ver);
2328 		if (preboot_ver) {
2329 			rc = hl_get_preboot_major_minor(hdev, preboot_ver);
2330 			kfree(preboot_ver);
2331 			if (rc)
2332 				return rc;
2333 		}
2334 
2335 		break;
2336 	default:
2337 		dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
2338 		return -EINVAL;
2339 	}
2340 
2341 	return 0;
2342 }
2343 
2344 /**
2345  * hl_fw_dynamic_copy_image - copy image to memory allocated by the FW
2346  *
2347  * @hdev: pointer to the habanalabs device structure
2348  * @fw: fw descriptor
2349  * @fw_loader: managing structure for loading device's FW
2350  */
2351 static int hl_fw_dynamic_copy_image(struct hl_device *hdev,
2352 						const struct firmware *fw,
2353 						struct fw_load_mgr *fw_loader)
2354 {
2355 	struct lkd_fw_comms_desc *fw_desc;
2356 	struct pci_mem_region *region;
2357 	void __iomem *dest;
2358 	u64 addr;
2359 	int rc;
2360 
2361 	fw_desc = &fw_loader->dynamic_loader.comm_desc;
2362 	addr = le64_to_cpu(fw_desc->img_addr);
2363 
2364 	/* find memory region to which to copy the image */
2365 	region = fw_loader->dynamic_loader.image_region;
2366 
2367 	dest = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
2368 					(addr - region->region_base);
2369 
2370 	rc = hl_fw_copy_fw_to_device(hdev, fw, dest,
2371 					fw_loader->boot_fit_img.src_off,
2372 					fw_loader->boot_fit_img.copy_size);
2373 
2374 	return rc;
2375 }
2376 
2377 /**
2378  * hl_fw_dynamic_copy_msg - copy msg to memory allocated by the FW
2379  *
2380  * @hdev: pointer to the habanalabs device structure
2381  * @msg: message
2382  * @fw_loader: managing structure for loading device's FW
2383  */
2384 static int hl_fw_dynamic_copy_msg(struct hl_device *hdev,
2385 		struct lkd_msg_comms *msg, struct fw_load_mgr *fw_loader)
2386 {
2387 	struct lkd_fw_comms_desc *fw_desc;
2388 	struct pci_mem_region *region;
2389 	void __iomem *dest;
2390 	u64 addr;
2391 	int rc;
2392 
2393 	fw_desc = &fw_loader->dynamic_loader.comm_desc;
2394 	addr = le64_to_cpu(fw_desc->img_addr);
2395 
2396 	/* find memory region to which to copy the image */
2397 	region = fw_loader->dynamic_loader.image_region;
2398 
2399 	dest = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
2400 					(addr - region->region_base);
2401 
2402 	rc = hl_fw_copy_msg_to_device(hdev, msg, dest, 0, 0);
2403 
2404 	return rc;
2405 }
2406 
2407 /**
2408  * hl_fw_boot_fit_update_state - update internal data structures after boot-fit
2409  *                               is loaded
2410  *
2411  * @hdev: pointer to the habanalabs device structure
2412  * @cpu_boot_dev_sts0_reg: register holding CPU boot dev status 0
2413  * @cpu_boot_dev_sts1_reg: register holding CPU boot dev status 1
2414  *
2415  * @return 0 on success, otherwise non-zero error code
2416  */
2417 static void hl_fw_boot_fit_update_state(struct hl_device *hdev,
2418 						u32 cpu_boot_dev_sts0_reg,
2419 						u32 cpu_boot_dev_sts1_reg)
2420 {
2421 	struct asic_fixed_properties *prop = &hdev->asic_prop;
2422 
2423 	hdev->fw_loader.fw_comp_loaded |= FW_TYPE_BOOT_CPU;
2424 
2425 	/* Read boot_cpu status bits */
2426 	if (prop->fw_preboot_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_ENABLED) {
2427 		prop->fw_bootfit_cpu_boot_dev_sts0 =
2428 				RREG32(cpu_boot_dev_sts0_reg);
2429 
2430 		prop->hard_reset_done_by_fw = !!(prop->fw_bootfit_cpu_boot_dev_sts0 &
2431 							CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
2432 
2433 		dev_dbg(hdev->dev, "Firmware boot CPU status0 %#x\n",
2434 					prop->fw_bootfit_cpu_boot_dev_sts0);
2435 	}
2436 
2437 	if (prop->fw_cpu_boot_dev_sts1_valid) {
2438 		prop->fw_bootfit_cpu_boot_dev_sts1 =
2439 				RREG32(cpu_boot_dev_sts1_reg);
2440 
2441 		dev_dbg(hdev->dev, "Firmware boot CPU status1 %#x\n",
2442 					prop->fw_bootfit_cpu_boot_dev_sts1);
2443 	}
2444 
2445 	dev_dbg(hdev->dev, "Firmware boot CPU hard-reset is %s\n",
2446 			prop->hard_reset_done_by_fw ? "enabled" : "disabled");
2447 }
2448 
2449 static void hl_fw_dynamic_update_linux_interrupt_if(struct hl_device *hdev)
2450 {
2451 	struct cpu_dyn_regs *dyn_regs =
2452 			&hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
2453 
2454 	/* Check whether all 3 interrupt interfaces are set, if not use a
2455 	 * single interface
2456 	 */
2457 	if (!hdev->asic_prop.gic_interrupts_enable &&
2458 			!(hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
2459 				CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN)) {
2460 		dyn_regs->gic_host_halt_irq = dyn_regs->gic_host_pi_upd_irq;
2461 		dyn_regs->gic_host_ints_irq = dyn_regs->gic_host_pi_upd_irq;
2462 
2463 		dev_warn(hdev->dev,
2464 			"Using a single interrupt interface towards cpucp");
2465 	}
2466 }
2467 /**
2468  * hl_fw_dynamic_load_image - load FW image using dynamic protocol
2469  *
2470  * @hdev: pointer to the habanalabs device structure
2471  * @fw_loader: managing structure for loading device's FW
2472  * @load_fwc: the FW component to be loaded
2473  * @img_ld_timeout: image load timeout
2474  *
2475  * @return 0 on success, otherwise non-zero error code
2476  */
2477 static int hl_fw_dynamic_load_image(struct hl_device *hdev,
2478 						struct fw_load_mgr *fw_loader,
2479 						enum hl_fw_component load_fwc,
2480 						u32 img_ld_timeout)
2481 {
2482 	enum hl_fw_component cur_fwc;
2483 	const struct firmware *fw;
2484 	char *fw_name;
2485 	int rc = 0;
2486 
2487 	/*
2488 	 * when loading image we have one of 2 scenarios:
2489 	 * 1. current FW component is preboot and we want to load boot-fit
2490 	 * 2. current FW component is boot-fit and we want to load linux
2491 	 */
2492 	if (load_fwc == FW_COMP_BOOT_FIT) {
2493 		cur_fwc = FW_COMP_PREBOOT;
2494 		fw_name = fw_loader->boot_fit_img.image_name;
2495 	} else {
2496 		cur_fwc = FW_COMP_BOOT_FIT;
2497 		fw_name = fw_loader->linux_img.image_name;
2498 	}
2499 
2500 	/* request FW in order to communicate to FW the size to be allocated */
2501 	rc = hl_request_fw(hdev, &fw, fw_name);
2502 	if (rc)
2503 		return rc;
2504 
2505 	/* store the image size for future validation */
2506 	fw_loader->dynamic_loader.fw_image_size = fw->size;
2507 
2508 	rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader, fw->size);
2509 	if (rc)
2510 		goto release_fw;
2511 
2512 	/* read preboot version */
2513 	rc = hl_fw_dynamic_read_device_fw_version(hdev, cur_fwc,
2514 				fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
2515 	if (rc)
2516 		goto release_fw;
2517 
2518 	/* copy boot fit to space allocated by FW */
2519 	rc = hl_fw_dynamic_copy_image(hdev, fw, fw_loader);
2520 	if (rc)
2521 		goto release_fw;
2522 
2523 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_DATA_RDY,
2524 						0, true,
2525 						fw_loader->cpu_timeout);
2526 	if (rc)
2527 		goto release_fw;
2528 
2529 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_EXEC,
2530 						0, false,
2531 						img_ld_timeout);
2532 
2533 release_fw:
2534 	hl_release_firmware(fw);
2535 	return rc;
2536 }
2537 
2538 static int hl_fw_dynamic_wait_for_boot_fit_active(struct hl_device *hdev,
2539 					struct fw_load_mgr *fw_loader)
2540 {
2541 	struct dynamic_fw_load_mgr *dyn_loader;
2542 	u32 status;
2543 	int rc;
2544 
2545 	dyn_loader = &fw_loader->dynamic_loader;
2546 
2547 	/*
2548 	 * Make sure CPU boot-loader is running
2549 	 * Note that the CPU_BOOT_STATUS_SRAM_AVAIL is generally set by Linux
2550 	 * yet there is a debug scenario in which we loading uboot (without Linux)
2551 	 * which at later stage is relocated to DRAM. In this case we expect
2552 	 * uboot to set the CPU_BOOT_STATUS_SRAM_AVAIL and so we add it to the
2553 	 * poll flags
2554 	 */
2555 	rc = hl_poll_timeout(
2556 		hdev,
2557 		le32_to_cpu(dyn_loader->comm_desc.cpu_dyn_regs.cpu_boot_status),
2558 		status,
2559 		(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
2560 		(status == CPU_BOOT_STATUS_SRAM_AVAIL),
2561 		hdev->fw_poll_interval_usec,
2562 		dyn_loader->wait_for_bl_timeout);
2563 	if (rc) {
2564 		dev_err(hdev->dev, "failed to wait for boot (status = %d)\n", status);
2565 		return rc;
2566 	}
2567 
2568 	dev_dbg(hdev->dev, "uboot status = %d\n", status);
2569 	return 0;
2570 }
2571 
2572 static int hl_fw_dynamic_wait_for_linux_active(struct hl_device *hdev,
2573 						struct fw_load_mgr *fw_loader)
2574 {
2575 	struct dynamic_fw_load_mgr *dyn_loader;
2576 	u32 status;
2577 	int rc;
2578 
2579 	dyn_loader = &fw_loader->dynamic_loader;
2580 
2581 	/* Make sure CPU linux is running */
2582 
2583 	rc = hl_poll_timeout(
2584 		hdev,
2585 		le32_to_cpu(dyn_loader->comm_desc.cpu_dyn_regs.cpu_boot_status),
2586 		status,
2587 		(status == CPU_BOOT_STATUS_SRAM_AVAIL),
2588 		hdev->fw_poll_interval_usec,
2589 		fw_loader->cpu_timeout);
2590 	if (rc) {
2591 		dev_err(hdev->dev, "failed to wait for Linux (status = %d)\n", status);
2592 		return rc;
2593 	}
2594 
2595 	dev_dbg(hdev->dev, "Boot status = %d\n", status);
2596 	return 0;
2597 }
2598 
2599 /**
2600  * hl_fw_linux_update_state -	update internal data structures after Linux
2601  *				is loaded.
2602  *				Note: Linux initialization is comprised mainly
2603  *				of two stages - loading kernel (SRAM_AVAIL)
2604  *				& loading ARMCP.
2605  *				Therefore reading boot device status in any of
2606  *				these stages might result in different values.
2607  *
2608  * @hdev: pointer to the habanalabs device structure
2609  * @cpu_boot_dev_sts0_reg: register holding CPU boot dev status 0
2610  * @cpu_boot_dev_sts1_reg: register holding CPU boot dev status 1
2611  *
2612  * @return 0 on success, otherwise non-zero error code
2613  */
2614 static void hl_fw_linux_update_state(struct hl_device *hdev,
2615 						u32 cpu_boot_dev_sts0_reg,
2616 						u32 cpu_boot_dev_sts1_reg)
2617 {
2618 	struct asic_fixed_properties *prop = &hdev->asic_prop;
2619 
2620 	hdev->fw_loader.fw_comp_loaded |= FW_TYPE_LINUX;
2621 
2622 	/* Read FW application security bits */
2623 	if (prop->fw_cpu_boot_dev_sts0_valid) {
2624 		prop->fw_app_cpu_boot_dev_sts0 = RREG32(cpu_boot_dev_sts0_reg);
2625 
2626 		prop->hard_reset_done_by_fw = !!(prop->fw_app_cpu_boot_dev_sts0 &
2627 							CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
2628 
2629 		if (prop->fw_app_cpu_boot_dev_sts0 &
2630 				CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN)
2631 			prop->gic_interrupts_enable = false;
2632 
2633 		dev_dbg(hdev->dev,
2634 			"Firmware application CPU status0 %#x\n",
2635 			prop->fw_app_cpu_boot_dev_sts0);
2636 
2637 		dev_dbg(hdev->dev, "GIC controller is %s\n",
2638 				prop->gic_interrupts_enable ?
2639 						"enabled" : "disabled");
2640 	}
2641 
2642 	if (prop->fw_cpu_boot_dev_sts1_valid) {
2643 		prop->fw_app_cpu_boot_dev_sts1 = RREG32(cpu_boot_dev_sts1_reg);
2644 
2645 		dev_dbg(hdev->dev,
2646 			"Firmware application CPU status1 %#x\n",
2647 			prop->fw_app_cpu_boot_dev_sts1);
2648 	}
2649 
2650 	dev_dbg(hdev->dev, "Firmware application CPU hard-reset is %s\n",
2651 			prop->hard_reset_done_by_fw ? "enabled" : "disabled");
2652 
2653 	dev_info(hdev->dev, "Successfully loaded firmware to device\n");
2654 }
2655 
2656 /**
2657  * hl_fw_dynamic_send_msg - send a COMMS message with attached data
2658  *
2659  * @hdev: pointer to the habanalabs device structure
2660  * @fw_loader: managing structure for loading device's FW
2661  * @msg_type: message type
2662  * @data: data to be sent
2663  *
2664  * @return 0 on success, otherwise non-zero error code
2665  */
2666 static int hl_fw_dynamic_send_msg(struct hl_device *hdev,
2667 		struct fw_load_mgr *fw_loader, u8 msg_type, void *data)
2668 {
2669 	struct lkd_msg_comms *msg;
2670 	int rc;
2671 
2672 	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
2673 	if (!msg)
2674 		return -ENOMEM;
2675 
2676 	/* create message to be sent */
2677 	msg->header.type = msg_type;
2678 	msg->header.size = cpu_to_le16(sizeof(struct comms_msg_header));
2679 	msg->header.magic = cpu_to_le32(HL_COMMS_MSG_MAGIC);
2680 
2681 	switch (msg_type) {
2682 	case HL_COMMS_RESET_CAUSE_TYPE:
2683 		msg->reset_cause = *(__u8 *) data;
2684 		break;
2685 
2686 	default:
2687 		dev_err(hdev->dev,
2688 			"Send COMMS message - invalid message type %u\n",
2689 			msg_type);
2690 		rc = -EINVAL;
2691 		goto out;
2692 	}
2693 
2694 	rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader,
2695 			sizeof(struct lkd_msg_comms));
2696 	if (rc)
2697 		goto out;
2698 
2699 	/* copy message to space allocated by FW */
2700 	rc = hl_fw_dynamic_copy_msg(hdev, msg, fw_loader);
2701 	if (rc)
2702 		goto out;
2703 
2704 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_DATA_RDY,
2705 						0, true,
2706 						fw_loader->cpu_timeout);
2707 	if (rc)
2708 		goto out;
2709 
2710 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_EXEC,
2711 						0, true,
2712 						fw_loader->cpu_timeout);
2713 
2714 out:
2715 	kfree(msg);
2716 	return rc;
2717 }
2718 
2719 /**
2720  * hl_fw_dynamic_init_cpu - initialize the device CPU using dynamic protocol
2721  *
2722  * @hdev: pointer to the habanalabs device structure
2723  * @fw_loader: managing structure for loading device's FW
2724  *
2725  * @return 0 on success, otherwise non-zero error code
2726  *
2727  * brief: the dynamic protocol is master (LKD) slave (FW CPU) protocol.
2728  * the communication is done using registers:
2729  * - LKD command register
2730  * - FW status register
2731  * the protocol is race free. this goal is achieved by splitting the requests
2732  * and response to known synchronization points between the LKD and the FW.
2733  * each response to LKD request is known and bound to a predefined timeout.
2734  * in case of timeout expiration without the desired status from FW- the
2735  * protocol (and hence the boot) will fail.
2736  */
2737 static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
2738 					struct fw_load_mgr *fw_loader)
2739 {
2740 	struct cpu_dyn_regs *dyn_regs;
2741 	int rc, fw_error_rc;
2742 
2743 	dev_info(hdev->dev,
2744 		"Loading %sfirmware to device, may take some time...\n",
2745 		hdev->asic_prop.fw_security_enabled ? "secured " : "");
2746 
2747 	/* initialize FW descriptor as invalid */
2748 	fw_loader->dynamic_loader.fw_desc_valid = false;
2749 
2750 	/*
2751 	 * In this stage, "cpu_dyn_regs" contains only LKD's hard coded values!
2752 	 * It will be updated from FW after hl_fw_dynamic_request_descriptor().
2753 	 */
2754 	dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
2755 
2756 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_RST_STATE,
2757 						0, true,
2758 						fw_loader->cpu_timeout);
2759 	if (rc)
2760 		goto protocol_err;
2761 
2762 	if (hdev->reset_info.curr_reset_cause) {
2763 		rc = hl_fw_dynamic_send_msg(hdev, fw_loader,
2764 				HL_COMMS_RESET_CAUSE_TYPE, &hdev->reset_info.curr_reset_cause);
2765 		if (rc)
2766 			goto protocol_err;
2767 
2768 		/* Clear current reset cause */
2769 		hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
2770 	}
2771 
2772 	if (!(hdev->fw_components & FW_TYPE_BOOT_CPU)) {
2773 		struct lkd_fw_binning_info *binning_info;
2774 
2775 		rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader,
2776 							sizeof(struct lkd_msg_comms));
2777 		if (rc)
2778 			goto protocol_err;
2779 
2780 		/* read preboot version */
2781 		rc = hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT,
2782 				fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
2783 
2784 		if (rc)
2785 			return rc;
2786 
2787 		/* read binning info from preboot */
2788 		if (hdev->support_preboot_binning) {
2789 			binning_info = &fw_loader->dynamic_loader.comm_desc.binning_info;
2790 			hdev->tpc_binning = le64_to_cpu(binning_info->tpc_mask_l);
2791 			hdev->dram_binning = le32_to_cpu(binning_info->dram_mask);
2792 			hdev->edma_binning = le32_to_cpu(binning_info->edma_mask);
2793 			hdev->decoder_binning = le32_to_cpu(binning_info->dec_mask);
2794 			hdev->rotator_binning = le32_to_cpu(binning_info->rot_mask);
2795 
2796 			rc = hdev->asic_funcs->set_dram_properties(hdev);
2797 			if (rc)
2798 				return rc;
2799 
2800 			rc = hdev->asic_funcs->set_binning_masks(hdev);
2801 			if (rc)
2802 				return rc;
2803 
2804 			dev_dbg(hdev->dev,
2805 				"Read binning masks: tpc: 0x%llx, dram: 0x%llx, edma: 0x%x, dec: 0x%x, rot:0x%x\n",
2806 				hdev->tpc_binning, hdev->dram_binning, hdev->edma_binning,
2807 				hdev->decoder_binning, hdev->rotator_binning);
2808 		}
2809 
2810 		if (hdev->asic_prop.support_dynamic_resereved_fw_size) {
2811 			hdev->asic_prop.reserved_fw_mem_size =
2812 				le32_to_cpu(fw_loader->dynamic_loader.comm_desc.rsvd_mem_size_mb);
2813 		}
2814 
2815 		return 0;
2816 	}
2817 
2818 	/* load boot fit to FW */
2819 	rc = hl_fw_dynamic_load_image(hdev, fw_loader, FW_COMP_BOOT_FIT,
2820 						fw_loader->boot_fit_timeout);
2821 	if (rc) {
2822 		dev_err(hdev->dev, "failed to load boot fit\n");
2823 		goto protocol_err;
2824 	}
2825 
2826 	rc = hl_fw_dynamic_wait_for_boot_fit_active(hdev, fw_loader);
2827 	if (rc)
2828 		goto protocol_err;
2829 
2830 	hl_fw_boot_fit_update_state(hdev,
2831 			le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
2832 			le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
2833 
2834 	/*
2835 	 * when testing FW load (without Linux) on PLDM we don't want to
2836 	 * wait until boot fit is active as it may take several hours.
2837 	 * instead, we load the bootfit and let it do all initialization in
2838 	 * the background.
2839 	 */
2840 	if (hdev->pldm && !(hdev->fw_components & FW_TYPE_LINUX))
2841 		return 0;
2842 
2843 	/* Enable DRAM scrambling before Linux boot and after successful
2844 	 *  UBoot
2845 	 */
2846 	hdev->asic_funcs->init_cpu_scrambler_dram(hdev);
2847 
2848 	if (!(hdev->fw_components & FW_TYPE_LINUX)) {
2849 		dev_info(hdev->dev, "Skip loading Linux F/W\n");
2850 		return 0;
2851 	}
2852 
2853 	if (fw_loader->skip_bmc) {
2854 		rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader,
2855 							COMMS_SKIP_BMC, 0,
2856 							true,
2857 							fw_loader->cpu_timeout);
2858 		if (rc) {
2859 			dev_err(hdev->dev, "failed to load boot fit\n");
2860 			goto protocol_err;
2861 		}
2862 	}
2863 
2864 	/* load Linux image to FW */
2865 	rc = hl_fw_dynamic_load_image(hdev, fw_loader, FW_COMP_LINUX,
2866 							fw_loader->cpu_timeout);
2867 	if (rc) {
2868 		dev_err(hdev->dev, "failed to load Linux\n");
2869 		goto protocol_err;
2870 	}
2871 
2872 	rc = hl_fw_dynamic_wait_for_linux_active(hdev, fw_loader);
2873 	if (rc)
2874 		goto protocol_err;
2875 
2876 	hl_fw_linux_update_state(hdev,
2877 				le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
2878 				le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
2879 
2880 	hl_fw_dynamic_update_linux_interrupt_if(hdev);
2881 
2882 protocol_err:
2883 	if (fw_loader->dynamic_loader.fw_desc_valid) {
2884 		fw_error_rc = fw_read_errors(hdev, le32_to_cpu(dyn_regs->cpu_boot_err0),
2885 				le32_to_cpu(dyn_regs->cpu_boot_err1),
2886 				le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
2887 				le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
2888 
2889 		if (fw_error_rc)
2890 			return fw_error_rc;
2891 	}
2892 
2893 	return rc;
2894 }
2895 
2896 /**
2897  * hl_fw_static_init_cpu - initialize the device CPU using static protocol
2898  *
2899  * @hdev: pointer to the habanalabs device structure
2900  * @fw_loader: managing structure for loading device's FW
2901  *
2902  * @return 0 on success, otherwise non-zero error code
2903  */
2904 static int hl_fw_static_init_cpu(struct hl_device *hdev,
2905 					struct fw_load_mgr *fw_loader)
2906 {
2907 	u32 cpu_msg_status_reg, cpu_timeout, msg_to_cpu_reg, status;
2908 	u32 cpu_boot_dev_status0_reg, cpu_boot_dev_status1_reg;
2909 	struct static_fw_load_mgr *static_loader;
2910 	u32 cpu_boot_status_reg;
2911 	int rc;
2912 
2913 	if (!(hdev->fw_components & FW_TYPE_BOOT_CPU))
2914 		return 0;
2915 
2916 	/* init common loader parameters */
2917 	cpu_timeout = fw_loader->cpu_timeout;
2918 
2919 	/* init static loader parameters */
2920 	static_loader = &fw_loader->static_loader;
2921 	cpu_msg_status_reg = static_loader->cpu_cmd_status_to_host_reg;
2922 	msg_to_cpu_reg = static_loader->kmd_msg_to_cpu_reg;
2923 	cpu_boot_dev_status0_reg = static_loader->cpu_boot_dev_status0_reg;
2924 	cpu_boot_dev_status1_reg = static_loader->cpu_boot_dev_status1_reg;
2925 	cpu_boot_status_reg = static_loader->cpu_boot_status_reg;
2926 
2927 	dev_info(hdev->dev, "Going to wait for device boot (up to %lds)\n",
2928 		cpu_timeout / USEC_PER_SEC);
2929 
2930 	/* Wait for boot FIT request */
2931 	rc = hl_poll_timeout(
2932 		hdev,
2933 		cpu_boot_status_reg,
2934 		status,
2935 		status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT,
2936 		hdev->fw_poll_interval_usec,
2937 		fw_loader->boot_fit_timeout);
2938 
2939 	if (rc) {
2940 		dev_dbg(hdev->dev,
2941 			"No boot fit request received (status = %d), resuming boot\n", status);
2942 	} else {
2943 		rc = hdev->asic_funcs->load_boot_fit_to_device(hdev);
2944 		if (rc)
2945 			goto out;
2946 
2947 		/* Clear device CPU message status */
2948 		WREG32(cpu_msg_status_reg, CPU_MSG_CLR);
2949 
2950 		/* Signal device CPU that boot loader is ready */
2951 		WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY);
2952 
2953 		/* Poll for CPU device ack */
2954 		rc = hl_poll_timeout(
2955 			hdev,
2956 			cpu_msg_status_reg,
2957 			status,
2958 			status == CPU_MSG_OK,
2959 			hdev->fw_poll_interval_usec,
2960 			fw_loader->boot_fit_timeout);
2961 
2962 		if (rc) {
2963 			dev_err(hdev->dev,
2964 				"Timeout waiting for boot fit load ack (status = %d)\n", status);
2965 			goto out;
2966 		}
2967 
2968 		/* Clear message */
2969 		WREG32(msg_to_cpu_reg, KMD_MSG_NA);
2970 	}
2971 
2972 	/*
2973 	 * Make sure CPU boot-loader is running
2974 	 * Note that the CPU_BOOT_STATUS_SRAM_AVAIL is generally set by Linux
2975 	 * yet there is a debug scenario in which we loading uboot (without Linux)
2976 	 * which at later stage is relocated to DRAM. In this case we expect
2977 	 * uboot to set the CPU_BOOT_STATUS_SRAM_AVAIL and so we add it to the
2978 	 * poll flags
2979 	 */
2980 	rc = hl_poll_timeout(
2981 		hdev,
2982 		cpu_boot_status_reg,
2983 		status,
2984 		(status == CPU_BOOT_STATUS_DRAM_RDY) ||
2985 		(status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
2986 		(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
2987 		(status == CPU_BOOT_STATUS_SRAM_AVAIL),
2988 		hdev->fw_poll_interval_usec,
2989 		cpu_timeout);
2990 
2991 	dev_dbg(hdev->dev, "uboot status = %d\n", status);
2992 
2993 	/* Read U-Boot version now in case we will later fail */
2994 	hl_fw_static_read_device_fw_version(hdev, FW_COMP_BOOT_FIT);
2995 
2996 	/* update state according to boot stage */
2997 	hl_fw_boot_fit_update_state(hdev, cpu_boot_dev_status0_reg,
2998 						cpu_boot_dev_status1_reg);
2999 
3000 	if (rc) {
3001 		detect_cpu_boot_status(hdev, status);
3002 		rc = -EIO;
3003 		goto out;
3004 	}
3005 
3006 	/* Enable DRAM scrambling before Linux boot and after successful
3007 	 *  UBoot
3008 	 */
3009 	hdev->asic_funcs->init_cpu_scrambler_dram(hdev);
3010 
3011 	if (!(hdev->fw_components & FW_TYPE_LINUX)) {
3012 		dev_info(hdev->dev, "Skip loading Linux F/W\n");
3013 		rc = 0;
3014 		goto out;
3015 	}
3016 
3017 	if (status == CPU_BOOT_STATUS_SRAM_AVAIL) {
3018 		rc = 0;
3019 		goto out;
3020 	}
3021 
3022 	dev_info(hdev->dev,
3023 		"Loading firmware to device, may take some time...\n");
3024 
3025 	rc = hdev->asic_funcs->load_firmware_to_device(hdev);
3026 	if (rc)
3027 		goto out;
3028 
3029 	if (fw_loader->skip_bmc) {
3030 		WREG32(msg_to_cpu_reg, KMD_MSG_SKIP_BMC);
3031 
3032 		rc = hl_poll_timeout(
3033 			hdev,
3034 			cpu_boot_status_reg,
3035 			status,
3036 			(status == CPU_BOOT_STATUS_BMC_WAITING_SKIPPED),
3037 			hdev->fw_poll_interval_usec,
3038 			cpu_timeout);
3039 
3040 		if (rc) {
3041 			dev_err(hdev->dev,
3042 				"Failed to get ACK on skipping BMC (status = %d)\n",
3043 				status);
3044 			WREG32(msg_to_cpu_reg, KMD_MSG_NA);
3045 			rc = -EIO;
3046 			goto out;
3047 		}
3048 	}
3049 
3050 	WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY);
3051 
3052 	rc = hl_poll_timeout(
3053 		hdev,
3054 		cpu_boot_status_reg,
3055 		status,
3056 		(status == CPU_BOOT_STATUS_SRAM_AVAIL),
3057 		hdev->fw_poll_interval_usec,
3058 		cpu_timeout);
3059 
3060 	/* Clear message */
3061 	WREG32(msg_to_cpu_reg, KMD_MSG_NA);
3062 
3063 	if (rc) {
3064 		if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
3065 			dev_err(hdev->dev,
3066 				"Device reports FIT image is corrupted\n");
3067 		else
3068 			dev_err(hdev->dev,
3069 				"Failed to load firmware to device (status = %d)\n",
3070 				status);
3071 
3072 		rc = -EIO;
3073 		goto out;
3074 	}
3075 
3076 	rc = fw_read_errors(hdev, fw_loader->static_loader.boot_err0_reg,
3077 					fw_loader->static_loader.boot_err1_reg,
3078 					cpu_boot_dev_status0_reg,
3079 					cpu_boot_dev_status1_reg);
3080 	if (rc)
3081 		return rc;
3082 
3083 	hl_fw_linux_update_state(hdev, cpu_boot_dev_status0_reg,
3084 						cpu_boot_dev_status1_reg);
3085 
3086 	return 0;
3087 
3088 out:
3089 	fw_read_errors(hdev, fw_loader->static_loader.boot_err0_reg,
3090 					fw_loader->static_loader.boot_err1_reg,
3091 					cpu_boot_dev_status0_reg,
3092 					cpu_boot_dev_status1_reg);
3093 
3094 	return rc;
3095 }
3096 
3097 /**
3098  * hl_fw_init_cpu - initialize the device CPU
3099  *
3100  * @hdev: pointer to the habanalabs device structure
3101  *
3102  * @return 0 on success, otherwise non-zero error code
3103  *
3104  * perform necessary initializations for device's CPU. takes into account if
3105  * init protocol is static or dynamic.
3106  */
3107 int hl_fw_init_cpu(struct hl_device *hdev)
3108 {
3109 	struct asic_fixed_properties *prop = &hdev->asic_prop;
3110 	struct fw_load_mgr *fw_loader = &hdev->fw_loader;
3111 
3112 	return  prop->dynamic_fw_load ?
3113 			hl_fw_dynamic_init_cpu(hdev, fw_loader) :
3114 			hl_fw_static_init_cpu(hdev, fw_loader);
3115 }
3116 
3117 void hl_fw_set_pll_profile(struct hl_device *hdev)
3118 {
3119 	hl_fw_set_frequency(hdev, hdev->asic_prop.clk_pll_index,
3120 				hdev->asic_prop.max_freq_value);
3121 }
3122 
3123 int hl_fw_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
3124 {
3125 	long value;
3126 
3127 	if (!hl_device_operational(hdev, NULL))
3128 		return -ENODEV;
3129 
3130 	if (!hdev->pdev) {
3131 		*cur_clk = 0;
3132 		*max_clk = 0;
3133 		return 0;
3134 	}
3135 
3136 	value = hl_fw_get_frequency(hdev, hdev->asic_prop.clk_pll_index, false);
3137 
3138 	if (value < 0) {
3139 		dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n", value);
3140 		return value;
3141 	}
3142 
3143 	*max_clk = (value / 1000 / 1000);
3144 
3145 	value = hl_fw_get_frequency(hdev, hdev->asic_prop.clk_pll_index, true);
3146 
3147 	if (value < 0) {
3148 		dev_err(hdev->dev, "Failed to retrieve device current clock %ld\n", value);
3149 		return value;
3150 	}
3151 
3152 	*cur_clk = (value / 1000 / 1000);
3153 
3154 	return 0;
3155 }
3156 
3157 long hl_fw_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
3158 {
3159 	struct cpucp_packet pkt;
3160 	u32 used_pll_idx;
3161 	u64 result;
3162 	int rc;
3163 
3164 	rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
3165 	if (rc)
3166 		return rc;
3167 
3168 	memset(&pkt, 0, sizeof(pkt));
3169 
3170 	if (curr)
3171 		pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_CURR_GET <<
3172 						CPUCP_PKT_CTL_OPCODE_SHIFT);
3173 	else
3174 		pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
3175 
3176 	pkt.pll_index = cpu_to_le32((u32)used_pll_idx);
3177 
3178 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
3179 
3180 	if (rc) {
3181 		dev_err(hdev->dev, "Failed to get frequency of PLL %d, error %d\n",
3182 			used_pll_idx, rc);
3183 		return rc;
3184 	}
3185 
3186 	return (long) result;
3187 }
3188 
3189 void hl_fw_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
3190 {
3191 	struct cpucp_packet pkt;
3192 	u32 used_pll_idx;
3193 	int rc;
3194 
3195 	rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
3196 	if (rc)
3197 		return;
3198 
3199 	memset(&pkt, 0, sizeof(pkt));
3200 
3201 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
3202 	pkt.pll_index = cpu_to_le32((u32)used_pll_idx);
3203 	pkt.value = cpu_to_le64(freq);
3204 
3205 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
3206 
3207 	if (rc)
3208 		dev_err(hdev->dev, "Failed to set frequency to PLL %d, error %d\n",
3209 			used_pll_idx, rc);
3210 }
3211 
3212 long hl_fw_get_max_power(struct hl_device *hdev)
3213 {
3214 	struct cpucp_packet pkt;
3215 	u64 result;
3216 	int rc;
3217 
3218 	memset(&pkt, 0, sizeof(pkt));
3219 
3220 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_MAX_POWER_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
3221 
3222 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
3223 
3224 	if (rc) {
3225 		dev_err(hdev->dev, "Failed to get max power, error %d\n", rc);
3226 		return rc;
3227 	}
3228 
3229 	return result;
3230 }
3231 
3232 void hl_fw_set_max_power(struct hl_device *hdev)
3233 {
3234 	struct cpucp_packet pkt;
3235 	int rc;
3236 
3237 	/* TODO: remove this after simulator supports this packet */
3238 	if (!hdev->pdev)
3239 		return;
3240 
3241 	memset(&pkt, 0, sizeof(pkt));
3242 
3243 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_MAX_POWER_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
3244 	pkt.value = cpu_to_le64(hdev->max_power);
3245 
3246 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
3247 
3248 	if (rc)
3249 		dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);
3250 }
3251 
3252 static int hl_fw_get_sec_attest_data(struct hl_device *hdev, u32 packet_id, void *data, u32 size,
3253 					u32 nonce, u32 timeout)
3254 {
3255 	struct cpucp_packet pkt = {};
3256 	dma_addr_t req_dma_addr;
3257 	void *req_cpu_addr;
3258 	int rc;
3259 
3260 	req_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, size, &req_dma_addr);
3261 	if (!req_cpu_addr) {
3262 		dev_err(hdev->dev,
3263 			"Failed to allocate DMA memory for CPU-CP packet %u\n", packet_id);
3264 		return -ENOMEM;
3265 	}
3266 
3267 	memset(data, 0, size);
3268 
3269 	pkt.ctl = cpu_to_le32(packet_id << CPUCP_PKT_CTL_OPCODE_SHIFT);
3270 	pkt.addr = cpu_to_le64(req_dma_addr);
3271 	pkt.data_max_size = cpu_to_le32(size);
3272 	pkt.nonce = cpu_to_le32(nonce);
3273 
3274 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
3275 					timeout, NULL);
3276 	if (rc) {
3277 		dev_err(hdev->dev,
3278 			"Failed to handle CPU-CP pkt %u, error %d\n", packet_id, rc);
3279 		goto out;
3280 	}
3281 
3282 	memcpy(data, req_cpu_addr, size);
3283 
3284 out:
3285 	hl_cpu_accessible_dma_pool_free(hdev, size, req_cpu_addr);
3286 
3287 	return rc;
3288 }
3289 
3290 int hl_fw_get_sec_attest_info(struct hl_device *hdev, struct cpucp_sec_attest_info *sec_attest_info,
3291 				u32 nonce)
3292 {
3293 	return hl_fw_get_sec_attest_data(hdev, CPUCP_PACKET_SEC_ATTEST_GET, sec_attest_info,
3294 					sizeof(struct cpucp_sec_attest_info), nonce,
3295 					HL_CPUCP_SEC_ATTEST_INFO_TINEOUT_USEC);
3296 }
3297 
3298 int hl_fw_send_generic_request(struct hl_device *hdev, enum hl_passthrough_type sub_opcode,
3299 						dma_addr_t buff, u32 *size)
3300 {
3301 	struct cpucp_packet pkt = {};
3302 	u64 result;
3303 	int rc = 0;
3304 
3305 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_GENERIC_PASSTHROUGH << CPUCP_PKT_CTL_OPCODE_SHIFT);
3306 	pkt.addr = cpu_to_le64(buff);
3307 	pkt.data_max_size = cpu_to_le32(*size);
3308 	pkt.pkt_subidx = cpu_to_le32(sub_opcode);
3309 
3310 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *)&pkt, sizeof(pkt),
3311 						HL_CPUCP_INFO_TIMEOUT_USEC, &result);
3312 	if (rc)
3313 		dev_err(hdev->dev, "failed to send CPUCP data of generic fw pkt\n");
3314 	else
3315 		dev_dbg(hdev->dev, "generic pkt was successful, result: 0x%llx\n", result);
3316 
3317 	*size = (u32)result;
3318 
3319 	return rc;
3320 }
3321