xref: /linux/drivers/accel/habanalabs/common/security.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2020 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #include "habanalabs.h"
9 
10 static const char * const hl_glbl_error_cause[] = {
11 	"Error due to un-priv read",
12 	"Error due to un-secure read",
13 	"Error due to read from unmapped reg",
14 	"Error due to un-priv write",
15 	"Error due to un-secure write",
16 	"Error due to write to unmapped reg",
17 	"N/A",
18 	"N/A",
19 	"N/A",
20 	"N/A",
21 	"N/A",
22 	"N/A",
23 	"N/A",
24 	"N/A",
25 	"N/A",
26 	"N/A",
27 	"External I/F write sec violation",
28 	"External I/F write to un-mapped reg",
29 	"N/A",
30 	"N/A",
31 	"N/A",
32 	"N/A",
33 	"N/A",
34 	"N/A",
35 	"Read to write only",
36 	"Write to read only"
37 };
38 
39 /**
40  * hl_get_pb_block - return the relevant block within the block array
41  *
42  * @hdev: pointer to hl_device structure
43  * @mm_reg_addr: register address in the desired block
44  * @pb_blocks: blocks array
45  * @array_size: blocks array size
46  *
47  */
48 static int hl_get_pb_block(struct hl_device *hdev, u32 mm_reg_addr,
49 		const u32 pb_blocks[], int array_size)
50 {
51 	int i;
52 	u32 start_addr, end_addr;
53 
54 	for (i = 0 ; i < array_size ; i++) {
55 		start_addr = pb_blocks[i];
56 		end_addr = start_addr + HL_BLOCK_SIZE;
57 
58 		if ((mm_reg_addr >= start_addr) && (mm_reg_addr < end_addr))
59 			return i;
60 	}
61 
62 	dev_err(hdev->dev, "No protection domain was found for 0x%x\n",
63 			mm_reg_addr);
64 	return -EDOM;
65 }
66 
67 /**
68  * hl_unset_pb_in_block - clear a specific protection bit in a block
69  *
70  * @hdev: pointer to hl_device structure
71  * @reg_offset: register offset will be converted to bit offset in pb block
72  * @sgs_entry: pb array
73  *
74  */
75 static int hl_unset_pb_in_block(struct hl_device *hdev, u32 reg_offset,
76 				struct hl_block_glbl_sec *sgs_entry)
77 {
78 	if ((reg_offset >= HL_BLOCK_SIZE) || (reg_offset & 0x3)) {
79 		dev_err(hdev->dev,
80 			"Register offset(%d) is out of range(%d) or invalid\n",
81 			reg_offset, HL_BLOCK_SIZE);
82 		return -EINVAL;
83 	}
84 
85 	UNSET_GLBL_SEC_BIT(sgs_entry->sec_array,
86 			 (reg_offset & (HL_BLOCK_SIZE - 1)) >> 2);
87 
88 	return 0;
89 }
90 
91 /**
92  * hl_unsecure_register - locate the relevant block for this register and
93  *                        remove corresponding protection bit
94  *
95  * @hdev: pointer to hl_device structure
96  * @mm_reg_addr: register address to unsecure
97  * @offset: additional offset to the register address
98  * @pb_blocks: blocks array
99  * @sgs_array: pb array
100  * @array_size: blocks array size
101  *
102  */
103 int hl_unsecure_register(struct hl_device *hdev, u32 mm_reg_addr, int offset,
104 		const u32 pb_blocks[], struct hl_block_glbl_sec sgs_array[],
105 		int array_size)
106 {
107 	u32 reg_offset;
108 	int block_num;
109 
110 	block_num = hl_get_pb_block(hdev, mm_reg_addr + offset, pb_blocks,
111 			array_size);
112 	if (block_num < 0)
113 		return block_num;
114 
115 	reg_offset = (mm_reg_addr + offset) - pb_blocks[block_num];
116 
117 	return hl_unset_pb_in_block(hdev, reg_offset, &sgs_array[block_num]);
118 }
119 
120 /**
121  * hl_unsecure_register_range - locate the relevant block for this register
122  *                              range and remove corresponding protection bit
123  *
124  * @hdev: pointer to hl_device structure
125  * @mm_reg_range: register address range to unsecure
126  * @offset: additional offset to the register address
127  * @pb_blocks: blocks array
128  * @sgs_array: pb array
129  * @array_size: blocks array size
130  *
131  */
132 static int hl_unsecure_register_range(struct hl_device *hdev,
133 		struct range mm_reg_range, int offset, const u32 pb_blocks[],
134 		struct hl_block_glbl_sec sgs_array[],
135 		int array_size)
136 {
137 	u32 reg_offset;
138 	int i, block_num, rc = 0;
139 
140 	block_num = hl_get_pb_block(hdev,
141 			mm_reg_range.start + offset, pb_blocks,
142 			array_size);
143 	if (block_num < 0)
144 		return block_num;
145 
146 	for (i = mm_reg_range.start ; i <= mm_reg_range.end ; i += 4) {
147 		reg_offset = (i + offset) - pb_blocks[block_num];
148 		rc |= hl_unset_pb_in_block(hdev, reg_offset,
149 					&sgs_array[block_num]);
150 	}
151 
152 	return rc;
153 }
154 
155 /**
156  * hl_unsecure_registers - locate the relevant block for all registers and
157  *                        remove corresponding protection bit
158  *
159  * @hdev: pointer to hl_device structure
160  * @mm_reg_array: register address array to unsecure
161  * @mm_array_size: register array size
162  * @offset: additional offset to the register address
163  * @pb_blocks: blocks array
164  * @sgs_array: pb array
165  * @blocks_array_size: blocks array size
166  *
167  */
168 int hl_unsecure_registers(struct hl_device *hdev, const u32 mm_reg_array[],
169 		int mm_array_size, int offset, const u32 pb_blocks[],
170 		struct hl_block_glbl_sec sgs_array[], int blocks_array_size)
171 {
172 	int i, rc = 0;
173 
174 	for (i = 0 ; i < mm_array_size ; i++) {
175 		rc = hl_unsecure_register(hdev, mm_reg_array[i], offset,
176 				pb_blocks, sgs_array, blocks_array_size);
177 
178 		if (rc)
179 			return rc;
180 	}
181 
182 	return rc;
183 }
184 
185 /**
186  * hl_unsecure_registers_range - locate the relevant block for all register
187  *                        ranges and remove corresponding protection bit
188  *
189  * @hdev: pointer to hl_device structure
190  * @mm_reg_range_array: register address range array to unsecure
191  * @mm_array_size: register array size
192  * @offset: additional offset to the register address
193  * @pb_blocks: blocks array
194  * @sgs_array: pb array
195  * @blocks_array_size: blocks array size
196  *
197  */
198 static int hl_unsecure_registers_range(struct hl_device *hdev,
199 		const struct range mm_reg_range_array[], int mm_array_size,
200 		int offset, const u32 pb_blocks[],
201 		struct hl_block_glbl_sec sgs_array[], int blocks_array_size)
202 {
203 	int i, rc = 0;
204 
205 	for (i = 0 ; i < mm_array_size ; i++) {
206 		rc = hl_unsecure_register_range(hdev, mm_reg_range_array[i],
207 			offset, pb_blocks, sgs_array, blocks_array_size);
208 
209 		if (rc)
210 			return rc;
211 	}
212 
213 	return rc;
214 }
215 
216 /**
217  * hl_ack_pb_security_violations - Ack security violation
218  *
219  * @hdev: pointer to hl_device structure
220  * @pb_blocks: blocks array
221  * @block_offset: additional offset to the block
222  * @array_size: blocks array size
223  *
224  */
225 static void hl_ack_pb_security_violations(struct hl_device *hdev,
226 		const u32 pb_blocks[], u32 block_offset, int array_size)
227 {
228 	int i;
229 	u32 cause, addr, block_base;
230 
231 	for (i = 0 ; i < array_size ; i++) {
232 		block_base = pb_blocks[i] + block_offset;
233 		cause = RREG32(block_base + HL_BLOCK_GLBL_ERR_CAUSE);
234 		if (cause) {
235 			addr = RREG32(block_base + HL_BLOCK_GLBL_ERR_ADDR);
236 			hdev->asic_funcs->pb_print_security_errors(hdev,
237 					block_base, cause, addr);
238 			WREG32(block_base + HL_BLOCK_GLBL_ERR_CAUSE, cause);
239 		}
240 	}
241 }
242 
243 /**
244  * hl_config_glbl_sec - set pb in HW according to given pb array
245  *
246  * @hdev: pointer to hl_device structure
247  * @pb_blocks: blocks array
248  * @sgs_array: pb array
249  * @block_offset: additional offset to the block
250  * @array_size: blocks array size
251  *
252  */
253 void hl_config_glbl_sec(struct hl_device *hdev, const u32 pb_blocks[],
254 		struct hl_block_glbl_sec sgs_array[], u32 block_offset,
255 		int array_size)
256 {
257 	int i, j;
258 	u32 sgs_base;
259 
260 	if (hdev->pldm)
261 		usleep_range(100, 1000);
262 
263 	for (i = 0 ; i < array_size ; i++) {
264 		sgs_base = block_offset + pb_blocks[i] +
265 				HL_BLOCK_GLBL_SEC_OFFS;
266 
267 		for (j = 0 ; j < HL_BLOCK_GLBL_SEC_LEN ; j++)
268 			WREG32(sgs_base + j * sizeof(u32),
269 				sgs_array[i].sec_array[j]);
270 	}
271 }
272 
273 /**
274  * hl_secure_block - locally memsets a block to 0
275  *
276  * @hdev: pointer to hl_device structure
277  * @sgs_array: pb array to clear
278  * @array_size: blocks array size
279  *
280  */
281 void hl_secure_block(struct hl_device *hdev,
282 		struct hl_block_glbl_sec sgs_array[], int array_size)
283 {
284 	int i;
285 
286 	for (i = 0 ; i < array_size ; i++)
287 		memset((char *)(sgs_array[i].sec_array), 0,
288 			HL_BLOCK_GLBL_SEC_SIZE);
289 }
290 
291 /**
292  * hl_init_pb_with_mask - set selected pb instances with mask in HW according
293  *                        to given configuration
294  *
295  * @hdev: pointer to hl_device structure
296  * @num_dcores: number of decores to apply configuration to
297  *              set to HL_PB_SHARED if need to apply only once
298  * @dcore_offset: offset between dcores
299  * @num_instances: number of instances to apply configuration to
300  * @instance_offset: offset between instances
301  * @pb_blocks: blocks array
302  * @blocks_array_size: blocks array size
303  * @user_regs_array: unsecured register array
304  * @user_regs_array_size: unsecured register array size
305  * @mask: enabled instances mask: 1- enabled, 0- disabled
306  */
307 int hl_init_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
308 		u32 dcore_offset, u32 num_instances, u32 instance_offset,
309 		const u32 pb_blocks[], u32 blocks_array_size,
310 		const u32 *user_regs_array, u32 user_regs_array_size, u64 mask)
311 {
312 	int i, j;
313 	struct hl_block_glbl_sec *glbl_sec;
314 
315 	glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size);
316 	if (!glbl_sec)
317 		return -ENOMEM;
318 
319 	hl_secure_block(hdev, glbl_sec, blocks_array_size);
320 	hl_unsecure_registers(hdev, user_regs_array, user_regs_array_size, 0,
321 			pb_blocks, glbl_sec, blocks_array_size);
322 
323 	/* Fill all blocks with the same configuration */
324 	for (i = 0 ; i < num_dcores ; i++) {
325 		for (j = 0 ; j < num_instances ; j++) {
326 			int seq = i * num_instances + j;
327 
328 			if (!(mask & BIT_ULL(seq)))
329 				continue;
330 
331 			hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
332 					i * dcore_offset + j * instance_offset,
333 					blocks_array_size);
334 		}
335 	}
336 
337 	kfree(glbl_sec);
338 
339 	return 0;
340 }
341 
342 /**
343  * hl_init_pb - set pb in HW according to given configuration
344  *
345  * @hdev: pointer to hl_device structure
346  * @num_dcores: number of decores to apply configuration to
347  *              set to HL_PB_SHARED if need to apply only once
348  * @dcore_offset: offset between dcores
349  * @num_instances: number of instances to apply configuration to
350  * @instance_offset: offset between instances
351  * @pb_blocks: blocks array
352  * @blocks_array_size: blocks array size
353  * @user_regs_array: unsecured register array
354  * @user_regs_array_size: unsecured register array size
355  *
356  */
357 int hl_init_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
358 		u32 num_instances, u32 instance_offset,
359 		const u32 pb_blocks[], u32 blocks_array_size,
360 		const u32 *user_regs_array, u32 user_regs_array_size)
361 {
362 	return hl_init_pb_with_mask(hdev, num_dcores, dcore_offset,
363 			num_instances, instance_offset, pb_blocks,
364 			blocks_array_size, user_regs_array,
365 			user_regs_array_size, ULLONG_MAX);
366 }
367 
368 /**
369  * hl_init_pb_ranges_with_mask - set pb instances using mask in HW according to
370  *                               given configuration unsecurring registers
371  *                               ranges instead of specific registers
372  *
373  * @hdev: pointer to hl_device structure
374  * @num_dcores: number of decores to apply configuration to
375  *              set to HL_PB_SHARED if need to apply only once
376  * @dcore_offset: offset between dcores
377  * @num_instances: number of instances to apply configuration to
378  * @instance_offset: offset between instances
379  * @pb_blocks: blocks array
380  * @blocks_array_size: blocks array size
381  * @user_regs_range_array: unsecured register range array
382  * @user_regs_range_array_size: unsecured register range array size
383  * @mask: enabled instances mask: 1- enabled, 0- disabled
384  */
385 int hl_init_pb_ranges_with_mask(struct hl_device *hdev, u32 num_dcores,
386 		u32 dcore_offset, u32 num_instances, u32 instance_offset,
387 		const u32 pb_blocks[], u32 blocks_array_size,
388 		const struct range *user_regs_range_array,
389 		u32 user_regs_range_array_size, u64 mask)
390 {
391 	int i, j, rc = 0;
392 	struct hl_block_glbl_sec *glbl_sec;
393 
394 	glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size);
395 	if (!glbl_sec)
396 		return -ENOMEM;
397 
398 	hl_secure_block(hdev, glbl_sec, blocks_array_size);
399 	rc = hl_unsecure_registers_range(hdev, user_regs_range_array,
400 			user_regs_range_array_size, 0, pb_blocks, glbl_sec,
401 			blocks_array_size);
402 	if (rc)
403 		goto free_glbl_sec;
404 
405 	/* Fill all blocks with the same configuration */
406 	for (i = 0 ; i < num_dcores ; i++) {
407 		for (j = 0 ; j < num_instances ; j++) {
408 			int seq = i * num_instances + j;
409 
410 			if (!(mask & BIT_ULL(seq)))
411 				continue;
412 
413 			hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
414 					i * dcore_offset + j * instance_offset,
415 					blocks_array_size);
416 		}
417 	}
418 
419 free_glbl_sec:
420 	kfree(glbl_sec);
421 
422 	return rc;
423 }
424 
425 /**
426  * hl_init_pb_ranges - set pb in HW according to given configuration unsecurring
427  *                     registers ranges instead of specific registers
428  *
429  * @hdev: pointer to hl_device structure
430  * @num_dcores: number of decores to apply configuration to
431  *              set to HL_PB_SHARED if need to apply only once
432  * @dcore_offset: offset between dcores
433  * @num_instances: number of instances to apply configuration to
434  * @instance_offset: offset between instances
435  * @pb_blocks: blocks array
436  * @blocks_array_size: blocks array size
437  * @user_regs_range_array: unsecured register range array
438  * @user_regs_range_array_size: unsecured register range array size
439  *
440  */
441 int hl_init_pb_ranges(struct hl_device *hdev, u32 num_dcores,
442 		u32 dcore_offset, u32 num_instances, u32 instance_offset,
443 		const u32 pb_blocks[], u32 blocks_array_size,
444 		const struct range *user_regs_range_array,
445 		u32 user_regs_range_array_size)
446 {
447 	return hl_init_pb_ranges_with_mask(hdev, num_dcores, dcore_offset,
448 			num_instances, instance_offset, pb_blocks,
449 			blocks_array_size, user_regs_range_array,
450 			user_regs_range_array_size, ULLONG_MAX);
451 }
452 
453 /**
454  * hl_init_pb_single_dcore - set pb for a single docre in HW
455  * according to given configuration
456  *
457  * @hdev: pointer to hl_device structure
458  * @dcore_offset: offset from the dcore0
459  * @num_instances: number of instances to apply configuration to
460  * @instance_offset: offset between instances
461  * @pb_blocks: blocks array
462  * @blocks_array_size: blocks array size
463  * @user_regs_array: unsecured register array
464  * @user_regs_array_size: unsecured register array size
465  *
466  */
467 int hl_init_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
468 		u32 num_instances, u32 instance_offset,
469 		const u32 pb_blocks[], u32 blocks_array_size,
470 		const u32 *user_regs_array, u32 user_regs_array_size)
471 {
472 	int i, rc = 0;
473 	struct hl_block_glbl_sec *glbl_sec;
474 
475 	glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size);
476 	if (!glbl_sec)
477 		return -ENOMEM;
478 
479 	hl_secure_block(hdev, glbl_sec, blocks_array_size);
480 	rc = hl_unsecure_registers(hdev, user_regs_array, user_regs_array_size,
481 			0, pb_blocks, glbl_sec, blocks_array_size);
482 	if (rc)
483 		goto free_glbl_sec;
484 
485 	/* Fill all blocks with the same configuration */
486 	for (i = 0 ; i < num_instances ; i++)
487 		hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
488 				dcore_offset + i * instance_offset,
489 				blocks_array_size);
490 
491 free_glbl_sec:
492 	kfree(glbl_sec);
493 
494 	return rc;
495 }
496 
497 /**
498  * hl_init_pb_ranges_single_dcore - set pb for a single docre in HW according
499  *                                  to given configuration unsecurring
500  *                                  registers ranges instead of specific
501  *                                  registers
502  *
503  * @hdev: pointer to hl_device structure
504  * @dcore_offset: offset from the dcore0
505  * @num_instances: number of instances to apply configuration to
506  * @instance_offset: offset between instances
507  * @pb_blocks: blocks array
508  * @blocks_array_size: blocks array size
509  * @user_regs_range_array: unsecured register range array
510  * @user_regs_range_array_size: unsecured register range array size
511  *
512  */
513 int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset,
514 		u32 num_instances, u32 instance_offset,
515 		const u32 pb_blocks[], u32 blocks_array_size,
516 		const struct range *user_regs_range_array, u32 user_regs_range_array_size)
517 {
518 	int i;
519 	struct hl_block_glbl_sec *glbl_sec;
520 
521 	glbl_sec = kzalloc_objs(struct hl_block_glbl_sec, blocks_array_size);
522 	if (!glbl_sec)
523 		return -ENOMEM;
524 
525 	hl_secure_block(hdev, glbl_sec, blocks_array_size);
526 	hl_unsecure_registers_range(hdev, user_regs_range_array,
527 			user_regs_range_array_size, 0, pb_blocks, glbl_sec,
528 			blocks_array_size);
529 
530 	/* Fill all blocks with the same configuration */
531 	for (i = 0 ; i < num_instances ; i++)
532 		hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
533 				dcore_offset + i * instance_offset,
534 				blocks_array_size);
535 
536 	kfree(glbl_sec);
537 
538 	return 0;
539 }
540 
541 /**
542  * hl_ack_pb_with_mask - ack pb with mask in HW according to given configuration
543  *
544  * @hdev: pointer to hl_device structure
545  * @num_dcores: number of decores to apply configuration to
546  *              set to HL_PB_SHARED if need to apply only once
547  * @dcore_offset: offset between dcores
548  * @num_instances: number of instances to apply configuration to
549  * @instance_offset: offset between instances
550  * @pb_blocks: blocks array
551  * @blocks_array_size: blocks array size
552  * @mask: enabled instances mask: 1- enabled, 0- disabled
553  *
554  */
555 void hl_ack_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
556 		u32 dcore_offset, u32 num_instances, u32 instance_offset,
557 		const u32 pb_blocks[], u32 blocks_array_size, u64 mask)
558 {
559 	int i, j;
560 
561 	/* ack all blocks */
562 	for (i = 0 ; i < num_dcores ; i++) {
563 		for (j = 0 ; j < num_instances ; j++) {
564 			int seq = i * num_instances + j;
565 
566 			if (!(mask & BIT_ULL(seq)))
567 				continue;
568 
569 			hl_ack_pb_security_violations(hdev, pb_blocks,
570 					i * dcore_offset + j * instance_offset,
571 					blocks_array_size);
572 		}
573 	}
574 }
575 
576 /**
577  * hl_ack_pb - ack pb in HW according to given configuration
578  *
579  * @hdev: pointer to hl_device structure
580  * @num_dcores: number of decores to apply configuration to
581  *              set to HL_PB_SHARED if need to apply only once
582  * @dcore_offset: offset between dcores
583  * @num_instances: number of instances to apply configuration to
584  * @instance_offset: offset between instances
585  * @pb_blocks: blocks array
586  * @blocks_array_size: blocks array size
587  *
588  */
589 void hl_ack_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
590 		u32 num_instances, u32 instance_offset,
591 		const u32 pb_blocks[], u32 blocks_array_size)
592 {
593 	hl_ack_pb_with_mask(hdev, num_dcores, dcore_offset, num_instances,
594 			instance_offset, pb_blocks, blocks_array_size,
595 			ULLONG_MAX);
596 }
597 
598 /**
599  * hl_ack_pb_single_dcore - ack pb for single docre in HW
600  * according to given configuration
601  *
602  * @hdev: pointer to hl_device structure
603  * @dcore_offset: offset from dcore0
604  * @num_instances: number of instances to apply configuration to
605  * @instance_offset: offset between instances
606  * @pb_blocks: blocks array
607  * @blocks_array_size: blocks array size
608  *
609  */
610 void hl_ack_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
611 		u32 num_instances, u32 instance_offset,
612 		const u32 pb_blocks[], u32 blocks_array_size)
613 {
614 	int i;
615 
616 	/* ack all blocks */
617 	for (i = 0 ; i < num_instances ; i++)
618 		hl_ack_pb_security_violations(hdev, pb_blocks,
619 				dcore_offset + i * instance_offset,
620 				blocks_array_size);
621 
622 }
623 
624 static u32 hl_automated_get_block_base_addr(struct hl_device *hdev,
625 		struct hl_special_block_info *block_info,
626 		u32 major, u32 minor, u32 sub_minor)
627 {
628 	u32 fw_block_base_address = block_info->base_addr +
629 			major * block_info->major_offset +
630 			minor * block_info->minor_offset +
631 			sub_minor * block_info->sub_minor_offset;
632 	struct asic_fixed_properties *prop = &hdev->asic_prop;
633 
634 	/* Calculation above returns an address for FW use, and therefore should
635 	 * be casted for driver use.
636 	 */
637 	return (fw_block_base_address - lower_32_bits(prop->cfg_base_address));
638 }
639 
640 static bool hl_check_block_type_exclusion(struct hl_skip_blocks_cfg *skip_blocks_cfg,
641 		int block_type)
642 {
643 	int i;
644 
645 	/* Check if block type is listed in the exclusion list of block types */
646 	for (i = 0 ; i < skip_blocks_cfg->block_types_len ; i++)
647 		if (block_type == skip_blocks_cfg->block_types[i])
648 			return true;
649 
650 	return false;
651 }
652 
653 static bool hl_check_block_range_exclusion(struct hl_device *hdev,
654 		struct hl_skip_blocks_cfg *skip_blocks_cfg,
655 		struct hl_special_block_info *block_info,
656 		u32 major, u32 minor, u32 sub_minor)
657 {
658 	u32 blocks_in_range, block_base_addr_in_range, block_base_addr;
659 	int i, j;
660 
661 	block_base_addr = hl_automated_get_block_base_addr(hdev, block_info,
662 			major, minor, sub_minor);
663 
664 	for (i = 0 ; i < skip_blocks_cfg->block_ranges_len ; i++) {
665 		blocks_in_range = (skip_blocks_cfg->block_ranges[i].end -
666 				skip_blocks_cfg->block_ranges[i].start) /
667 				HL_BLOCK_SIZE + 1;
668 		for (j = 0 ; j < blocks_in_range ; j++) {
669 			block_base_addr_in_range = skip_blocks_cfg->block_ranges[i].start +
670 					j * HL_BLOCK_SIZE;
671 			if (block_base_addr == block_base_addr_in_range)
672 				return true;
673 		}
674 	}
675 
676 	return false;
677 }
678 
679 static int hl_read_glbl_errors(struct hl_device *hdev,
680 		u32 blk_idx, u32 major, u32 minor, u32 sub_minor, void *data)
681 {
682 	struct asic_fixed_properties *prop = &hdev->asic_prop;
683 	struct hl_special_block_info *special_blocks = prop->special_blocks;
684 	struct hl_special_block_info *current_block = &special_blocks[blk_idx];
685 	u32 glbl_err_addr, glbl_err_cause, addr_val, cause_val, block_base,
686 		base = current_block->base_addr - lower_32_bits(prop->cfg_base_address);
687 	int i;
688 
689 	block_base = base + major * current_block->major_offset +
690 			minor * current_block->minor_offset +
691 			sub_minor * current_block->sub_minor_offset;
692 
693 	glbl_err_cause = block_base + HL_GLBL_ERR_CAUSE_OFFSET;
694 	cause_val = RREG32(glbl_err_cause);
695 	if (!cause_val)
696 		return 0;
697 
698 	glbl_err_addr = block_base + HL_GLBL_ERR_ADDR_OFFSET;
699 	addr_val = RREG32(glbl_err_addr);
700 
701 	for (i = 0 ; i <= prop->glbl_err_max_cause_num ; i++) {
702 		if (cause_val & BIT(i))
703 			dev_err_ratelimited(hdev->dev,
704 					"%s, addr %#llx\n",
705 					hl_glbl_error_cause[i],
706 					prop->cfg_base_address + block_base +
707 						FIELD_GET(HL_GLBL_ERR_ADDRESS_MASK, addr_val));
708 	}
709 
710 	WREG32(glbl_err_cause, cause_val);
711 
712 	return 0;
713 }
714 
715 void hl_check_for_glbl_errors(struct hl_device *hdev)
716 {
717 	struct asic_fixed_properties *prop = &hdev->asic_prop;
718 	struct hl_special_blocks_cfg special_blocks_cfg;
719 	struct iterate_special_ctx glbl_err_iter;
720 	int rc;
721 
722 	memset(&special_blocks_cfg, 0, sizeof(special_blocks_cfg));
723 	special_blocks_cfg.skip_blocks_cfg = &prop->skip_special_blocks_cfg;
724 
725 	glbl_err_iter.fn = &hl_read_glbl_errors;
726 	glbl_err_iter.data = &special_blocks_cfg;
727 
728 	rc = hl_iterate_special_blocks(hdev, &glbl_err_iter);
729 	if (rc)
730 		dev_err_ratelimited(hdev->dev,
731 			"Could not iterate special blocks, glbl error check failed\n");
732 }
733 
734 int hl_iterate_special_blocks(struct hl_device *hdev, struct iterate_special_ctx *ctx)
735 {
736 	struct hl_special_blocks_cfg *special_blocks_cfg =
737 			(struct hl_special_blocks_cfg *)ctx->data;
738 	struct hl_skip_blocks_cfg *skip_blocks_cfg =
739 			special_blocks_cfg->skip_blocks_cfg;
740 	u32 major, minor, sub_minor, blk_idx, num_blocks;
741 	struct hl_special_block_info *block_info_arr;
742 	int rc;
743 
744 	block_info_arr = hdev->asic_prop.special_blocks;
745 	if (!block_info_arr)
746 		return -EINVAL;
747 
748 	num_blocks = hdev->asic_prop.num_of_special_blocks;
749 
750 	for (blk_idx = 0 ; blk_idx < num_blocks ; blk_idx++, block_info_arr++) {
751 		if (hl_check_block_type_exclusion(skip_blocks_cfg, block_info_arr->block_type))
752 			continue;
753 
754 		for (major = 0 ; major < block_info_arr->major ; major++) {
755 			minor = 0;
756 			do {
757 				sub_minor = 0;
758 				do {
759 					if ((hl_check_block_range_exclusion(hdev,
760 							skip_blocks_cfg, block_info_arr,
761 							major, minor, sub_minor)) ||
762 						(skip_blocks_cfg->skip_block_hook &&
763 						skip_blocks_cfg->skip_block_hook(hdev,
764 							special_blocks_cfg,
765 							blk_idx, major, minor, sub_minor))) {
766 						sub_minor++;
767 						continue;
768 					}
769 
770 					rc = ctx->fn(hdev, blk_idx, major, minor,
771 								sub_minor, ctx->data);
772 					if (rc)
773 						return rc;
774 
775 					sub_minor++;
776 				} while (sub_minor < block_info_arr->sub_minor);
777 
778 				minor++;
779 			} while (minor < block_info_arr->minor);
780 		}
781 	}
782 
783 	return 0;
784 }
785