xref: /linux/drivers/accel/habanalabs/gaudi2/gaudi2P.h (revision 55a42f78ffd386e01a5404419f8c5ded7db70a21)
1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * Copyright 2020-2022 HabanaLabs, Ltd.
4  * All Rights Reserved.
5  *
6  */
7 
8 #ifndef GAUDI2P_H_
9 #define GAUDI2P_H_
10 
11 #include <uapi/drm/habanalabs_accel.h>
12 #include "../common/habanalabs.h"
13 #include <linux/habanalabs/hl_boot_if.h>
14 #include "../include/gaudi2/gaudi2.h"
15 #include "../include/gaudi2/gaudi2_packets.h"
16 #include "../include/gaudi2/gaudi2_fw_if.h"
17 #include "../include/gaudi2/gaudi2_async_events.h"
18 
19 #define GAUDI2_LINUX_FW_FILE	"habanalabs/gaudi2/gaudi2-fit.itb"
20 #define GAUDI2_BOOT_FIT_FILE	"habanalabs/gaudi2/gaudi2-boot-fit.itb"
21 
22 #define GAUDI2_CPU_TIMEOUT_USEC		30000000	/* 30s */
23 
24 #define NUMBER_OF_PDMA_QUEUES		2
25 #define NUMBER_OF_EDMA_QUEUES		8
26 #define NUMBER_OF_MME_QUEUES		4
27 #define NUMBER_OF_TPC_QUEUES		25
28 #define NUMBER_OF_NIC_QUEUES		24
29 #define NUMBER_OF_ROT_QUEUES		2
30 #define NUMBER_OF_CPU_QUEUES		1
31 
32 #define NUMBER_OF_HW_QUEUES		((NUMBER_OF_PDMA_QUEUES + \
33 					NUMBER_OF_EDMA_QUEUES + \
34 					NUMBER_OF_MME_QUEUES + \
35 					NUMBER_OF_TPC_QUEUES + \
36 					NUMBER_OF_NIC_QUEUES + \
37 					NUMBER_OF_ROT_QUEUES + \
38 					NUMBER_OF_CPU_QUEUES) * \
39 					NUM_OF_PQ_PER_QMAN)
40 
41 #define NUMBER_OF_QUEUES		(NUMBER_OF_CPU_QUEUES + NUMBER_OF_HW_QUEUES)
42 
43 #define DCORE_NUM_OF_SOB		\
44 	(((mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8191 - \
45 	mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0) + 4) >> 2)
46 
47 #define DCORE_NUM_OF_MONITORS		\
48 	(((mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2047 - \
49 	mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_0) + 4) >> 2)
50 
51 #define NUMBER_OF_DEC		((NUM_OF_DEC_PER_DCORE * NUM_OF_DCORES) + NUMBER_OF_PCIE_DEC)
52 
53 /* Map all arcs dccm + arc schedulers acp blocks */
54 #define NUM_OF_USER_ACP_BLOCKS		(NUM_OF_SCHEDULER_ARC + 2)
55 #define NUM_OF_USER_NIC_UMR_BLOCKS	15
56 #define NUM_OF_EXPOSED_SM_BLOCKS	((NUM_OF_DCORES - 1) * 2)
57 #define NUM_USER_MAPPED_BLOCKS \
58 	(NUM_ARC_CPUS + NUM_OF_USER_ACP_BLOCKS + NUMBER_OF_DEC + \
59 	NUM_OF_EXPOSED_SM_BLOCKS + \
60 	(NIC_NUMBER_OF_ENGINES * NUM_OF_USER_NIC_UMR_BLOCKS))
61 
62 /* Within the user mapped array, decoder entries start post all the ARC related
63  * entries
64  */
65 #define USR_MAPPED_BLK_DEC_START_IDX \
66 	(NUM_ARC_CPUS + NUM_OF_USER_ACP_BLOCKS + \
67 	(NIC_NUMBER_OF_ENGINES * NUM_OF_USER_NIC_UMR_BLOCKS))
68 
69 #define USR_MAPPED_BLK_SM_START_IDX \
70 	(NUM_ARC_CPUS + NUM_OF_USER_ACP_BLOCKS + NUMBER_OF_DEC + \
71 	(NIC_NUMBER_OF_ENGINES * NUM_OF_USER_NIC_UMR_BLOCKS))
72 
73 #define SM_OBJS_BLOCK_SIZE		(mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_0 - \
74 					 mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0)
75 
76 #define GAUDI2_MAX_PENDING_CS		64
77 
78 #if !IS_MAX_PENDING_CS_VALID(GAUDI2_MAX_PENDING_CS)
79 #error "GAUDI2_MAX_PENDING_CS must be power of 2 and greater than 1"
80 #endif
81 
82 #define CORESIGHT_TIMEOUT_USEC			100000		/* 100 ms */
83 
84 #define GAUDI2_PREBOOT_REQ_TIMEOUT_USEC		25000000	/* 25s */
85 #define GAUDI2_PREBOOT_EXTENDED_REQ_TIMEOUT_USEC 85000000	/* 85s */
86 
87 #define GAUDI2_BOOT_FIT_REQ_TIMEOUT_USEC	10000000	/* 10s */
88 
89 #define GAUDI2_NIC_CLK_FREQ			450000000ull	/* 450 MHz */
90 
91 #define DC_POWER_DEFAULT			60000		/* 60W */
92 
93 #define GAUDI2_HBM_NUM				6
94 
95 #define DMA_MAX_TRANSFER_SIZE			U32_MAX
96 
97 #define GAUDI2_DEFAULT_CARD_NAME		"HL225"
98 
99 #define QMAN_STREAMS				4
100 
101 #define NUM_OF_MME_SBTE_PORTS			5
102 #define NUM_OF_MME_WB_PORTS			2
103 
104 #define GAUDI2_ENGINE_ID_DCORE_OFFSET \
105 	(GAUDI2_DCORE1_ENGINE_ID_EDMA_0 - GAUDI2_DCORE0_ENGINE_ID_EDMA_0)
106 
107 /* DRAM Memory Map */
108 
109 #define CPU_FW_IMAGE_SIZE			0x10000000	/* 256MB */
110 #define CPU_FW_IMAGE_ADDR			DRAM_PHYS_BASE
111 #define PMMU_PAGE_TABLES_SIZE			0x10000000      /* 256MB */
112 #define EDMA_PQS_SIZE				SZ_2M
113 #define EDMA_SCRATCHPAD_SIZE			SZ_1M
114 #define HMMU_PAGE_TABLES_SIZE			SZ_1M
115 
116 #define NIC_NUMBER_OF_PORTS			NIC_NUMBER_OF_ENGINES
117 
118 #define NUMBER_OF_PCIE_DEC			2
119 #define PCIE_DEC_SHIFT				8
120 
121 #define SRAM_USER_BASE_OFFSET			0
122 
123 /* cluster binning */
124 #define MAX_FAULTY_HBMS				1
125 #define GAUDI2_XBAR_EDGE_FULL_MASK		0xF
126 #define GAUDI2_EDMA_FULL_MASK			0xFF
127 #define GAUDI2_DRAM_FULL_MASK			0x3F
128 
129 /* Host virtual address space. */
130 
131 #define VA_HOST_SPACE_PAGE_START		0xFFF0000000000000ull
132 #define VA_HOST_SPACE_PAGE_END			0xFFF0800000000000ull /* 140TB */
133 
134 #define VA_HOST_SPACE_HPAGE_START		0xFFF0800000000000ull
135 #define VA_HOST_SPACE_HPAGE_END			0xFFF1000000000000ull /* 140TB */
136 
137 /* 140TB */
138 #define VA_HOST_SPACE_PAGE_SIZE		(VA_HOST_SPACE_PAGE_END - VA_HOST_SPACE_PAGE_START)
139 
140 /* 140TB */
141 #define VA_HOST_SPACE_HPAGE_SIZE	(VA_HOST_SPACE_HPAGE_END - VA_HOST_SPACE_HPAGE_START)
142 
143 #define VA_HOST_SPACE_SIZE		(VA_HOST_SPACE_PAGE_SIZE + VA_HOST_SPACE_HPAGE_SIZE)
144 
145 #define HOST_SPACE_INTERNAL_CB_SZ		SZ_2M
146 
147 /*
148  * HBM virtual address space
149  * Gaudi2 has 6 HBM devices, each supporting 16GB total of 96GB at most.
150  * No core separation is supported so we can have one chunk of virtual address
151  * space just above the physical ones.
152  * The virtual address space starts immediately after the end of the physical
153  * address space which is determined at run-time.
154  */
155 #define VA_HBM_SPACE_END		0x1002000000000000ull
156 
157 #define HW_CAP_PLL			BIT_ULL(0)
158 #define HW_CAP_DRAM			BIT_ULL(1)
159 #define HW_CAP_PMMU			BIT_ULL(2)
160 #define HW_CAP_CPU			BIT_ULL(3)
161 #define HW_CAP_MSIX			BIT_ULL(4)
162 
163 #define HW_CAP_CPU_Q			BIT_ULL(5)
164 #define HW_CAP_CPU_Q_SHIFT		5
165 
166 #define HW_CAP_CLK_GATE			BIT_ULL(6)
167 #define HW_CAP_KDMA			BIT_ULL(7)
168 #define HW_CAP_SRAM_SCRAMBLER		BIT_ULL(8)
169 
170 #define HW_CAP_DCORE0_DMMU0		BIT_ULL(9)
171 #define HW_CAP_DCORE0_DMMU1		BIT_ULL(10)
172 #define HW_CAP_DCORE0_DMMU2		BIT_ULL(11)
173 #define HW_CAP_DCORE0_DMMU3		BIT_ULL(12)
174 #define HW_CAP_DCORE1_DMMU0		BIT_ULL(13)
175 #define HW_CAP_DCORE1_DMMU1		BIT_ULL(14)
176 #define HW_CAP_DCORE1_DMMU2		BIT_ULL(15)
177 #define HW_CAP_DCORE1_DMMU3		BIT_ULL(16)
178 #define HW_CAP_DCORE2_DMMU0		BIT_ULL(17)
179 #define HW_CAP_DCORE2_DMMU1		BIT_ULL(18)
180 #define HW_CAP_DCORE2_DMMU2		BIT_ULL(19)
181 #define HW_CAP_DCORE2_DMMU3		BIT_ULL(20)
182 #define HW_CAP_DCORE3_DMMU0		BIT_ULL(21)
183 #define HW_CAP_DCORE3_DMMU1		BIT_ULL(22)
184 #define HW_CAP_DCORE3_DMMU2		BIT_ULL(23)
185 #define HW_CAP_DCORE3_DMMU3		BIT_ULL(24)
186 #define HW_CAP_DMMU_MASK		GENMASK_ULL(24, 9)
187 #define HW_CAP_DMMU_SHIFT		9
188 #define HW_CAP_PDMA_MASK		BIT_ULL(26)
189 #define HW_CAP_EDMA_MASK		GENMASK_ULL(34, 27)
190 #define HW_CAP_EDMA_SHIFT		27
191 #define HW_CAP_MME_MASK			GENMASK_ULL(38, 35)
192 #define HW_CAP_MME_SHIFT		35
193 #define HW_CAP_ROT_MASK			GENMASK_ULL(40, 39)
194 #define HW_CAP_ROT_SHIFT		39
195 #define HW_CAP_HBM_SCRAMBLER_HW_RESET	BIT_ULL(41)
196 #define HW_CAP_HBM_SCRAMBLER_SW_RESET	BIT_ULL(42)
197 #define HW_CAP_HBM_SCRAMBLER_MASK	(HW_CAP_HBM_SCRAMBLER_HW_RESET | \
198 						HW_CAP_HBM_SCRAMBLER_SW_RESET)
199 #define HW_CAP_HBM_SCRAMBLER_SHIFT	41
200 #define HW_CAP_RESERVED			BIT(43)
201 #define HW_CAP_MMU_MASK			(HW_CAP_PMMU | HW_CAP_DMMU_MASK)
202 
203 /* Range Registers */
204 #define RR_TYPE_SHORT			0
205 #define RR_TYPE_LONG			1
206 #define RR_TYPE_SHORT_PRIV		2
207 #define RR_TYPE_LONG_PRIV		3
208 #define NUM_SHORT_LBW_RR		14
209 #define NUM_LONG_LBW_RR			4
210 #define NUM_SHORT_HBW_RR		6
211 #define NUM_LONG_HBW_RR			4
212 
213 /* RAZWI initiator coordinates- X- 5 bits, Y- 4 bits */
214 #define RAZWI_INITIATOR_X_SHIFT		0
215 #define RAZWI_INITIATOR_X_MASK		0x1F
216 #define RAZWI_INITIATOR_Y_SHIFT		5
217 #define RAZWI_INITIATOR_Y_MASK		0xF
218 
219 #define RTR_ID_X_Y(x, y) \
220 	((((y) & RAZWI_INITIATOR_Y_MASK) << RAZWI_INITIATOR_Y_SHIFT) | \
221 		(((x) & RAZWI_INITIATOR_X_MASK) << RAZWI_INITIATOR_X_SHIFT))
222 
223 /* decoders have separate mask */
224 #define HW_CAP_DEC_SHIFT		0
225 #define HW_CAP_DEC_MASK			GENMASK_ULL(9, 0)
226 
227 /* TPCs have separate mask */
228 #define HW_CAP_TPC_SHIFT		0
229 #define HW_CAP_TPC_MASK			GENMASK_ULL(24, 0)
230 
231 /* nics have separate mask */
232 #define HW_CAP_NIC_SHIFT		0
233 #define HW_CAP_NIC_MASK			GENMASK_ULL(NIC_NUMBER_OF_ENGINES - 1, 0)
234 
235 #define GAUDI2_ARC_PCI_MSB_ADDR(addr)	(((addr) & GENMASK_ULL(49, 28)) >> 28)
236 
237 #define GAUDI2_SOB_INCREMENT_BY_ONE	(FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK, 1) | \
238 					FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_MASK, 1))
239 
240 #define GAUDI2_NUM_TESTED_QS		(GAUDI2_QUEUE_ID_CPU_PQ - GAUDI2_QUEUE_ID_PDMA_0_0)
241 
242 
243 extern const char *gaudi2_engine_id_str[];
244 extern const char *gaudi2_queue_id_str[];
245 
246 #define GAUDI2_ENG_ID_TO_STR(initiator) ((initiator) >= GAUDI2_ENGINE_ID_SIZE ? "not found" :	\
247 						gaudi2_engine_id_str[initiator])
248 
249 #define GAUDI2_QUEUE_ID_TO_STR(initiator) ((initiator) >= GAUDI2_QUEUE_ID_SIZE ? "not found" :	\
250 						gaudi2_queue_id_str[initiator])
251 
252 enum gaudi2_reserved_sob_id {
253 	GAUDI2_RESERVED_SOB_CS_COMPLETION_FIRST,
254 	GAUDI2_RESERVED_SOB_CS_COMPLETION_LAST =
255 			GAUDI2_RESERVED_SOB_CS_COMPLETION_FIRST + GAUDI2_MAX_PENDING_CS - 1,
256 	GAUDI2_RESERVED_SOB_KDMA_COMPLETION,
257 	GAUDI2_RESERVED_SOB_DEC_NRM_FIRST,
258 	GAUDI2_RESERVED_SOB_DEC_NRM_LAST =
259 			GAUDI2_RESERVED_SOB_DEC_NRM_FIRST + NUMBER_OF_DEC - 1,
260 	GAUDI2_RESERVED_SOB_DEC_ABNRM_FIRST,
261 	GAUDI2_RESERVED_SOB_DEC_ABNRM_LAST =
262 			GAUDI2_RESERVED_SOB_DEC_ABNRM_FIRST + NUMBER_OF_DEC - 1,
263 	GAUDI2_RESERVED_SOB_NUMBER
264 };
265 
266 enum gaudi2_reserved_mon_id {
267 	GAUDI2_RESERVED_MON_CS_COMPLETION_FIRST,
268 	GAUDI2_RESERVED_MON_CS_COMPLETION_LAST =
269 			GAUDI2_RESERVED_MON_CS_COMPLETION_FIRST + GAUDI2_MAX_PENDING_CS - 1,
270 	GAUDI2_RESERVED_MON_KDMA_COMPLETION,
271 	GAUDI2_RESERVED_MON_DEC_NRM_FIRST,
272 	GAUDI2_RESERVED_MON_DEC_NRM_LAST =
273 			GAUDI2_RESERVED_MON_DEC_NRM_FIRST + 3 * NUMBER_OF_DEC - 1,
274 	GAUDI2_RESERVED_MON_DEC_ABNRM_FIRST,
275 	GAUDI2_RESERVED_MON_DEC_ABNRM_LAST =
276 			GAUDI2_RESERVED_MON_DEC_ABNRM_FIRST + 3 * NUMBER_OF_DEC - 1,
277 	GAUDI2_RESERVED_MON_NUMBER
278 };
279 
280 enum gaudi2_reserved_cq_id {
281 	GAUDI2_RESERVED_CQ_CS_COMPLETION,
282 	GAUDI2_RESERVED_CQ_KDMA_COMPLETION,
283 	GAUDI2_RESERVED_CQ_NUMBER
284 };
285 
286 /*
287  * Gaudi2 subtitute TPCs Numbering
288  * At most- two faulty TPCs are allowed
289  * First replacement to a faulty TPC will be TPC24, second- TPC23
290  */
291 enum substitude_tpc {
292 	FAULTY_TPC_SUBTS_1_TPC_24,
293 	FAULTY_TPC_SUBTS_2_TPC_23,
294 	MAX_FAULTY_TPCS
295 };
296 
297 enum gaudi2_dma_core_id {
298 	DMA_CORE_ID_PDMA0, /* Dcore 0 */
299 	DMA_CORE_ID_PDMA1, /* Dcore 0 */
300 	DMA_CORE_ID_EDMA0, /* Dcore 0 */
301 	DMA_CORE_ID_EDMA1, /* Dcore 0 */
302 	DMA_CORE_ID_EDMA2, /* Dcore 1 */
303 	DMA_CORE_ID_EDMA3, /* Dcore 1 */
304 	DMA_CORE_ID_EDMA4, /* Dcore 2 */
305 	DMA_CORE_ID_EDMA5, /* Dcore 2 */
306 	DMA_CORE_ID_EDMA6, /* Dcore 3 */
307 	DMA_CORE_ID_EDMA7, /* Dcore 3 */
308 	DMA_CORE_ID_KDMA, /* Dcore 0 */
309 	DMA_CORE_ID_SIZE
310 };
311 
312 enum gaudi2_rotator_id {
313 	ROTATOR_ID_0,
314 	ROTATOR_ID_1,
315 	ROTATOR_ID_SIZE,
316 };
317 
318 enum gaudi2_mme_id {
319 	MME_ID_DCORE0,
320 	MME_ID_DCORE1,
321 	MME_ID_DCORE2,
322 	MME_ID_DCORE3,
323 	MME_ID_SIZE,
324 };
325 
326 enum gaudi2_tpc_id {
327 	TPC_ID_DCORE0_TPC0,
328 	TPC_ID_DCORE0_TPC1,
329 	TPC_ID_DCORE0_TPC2,
330 	TPC_ID_DCORE0_TPC3,
331 	TPC_ID_DCORE0_TPC4,
332 	TPC_ID_DCORE0_TPC5,
333 	TPC_ID_DCORE1_TPC0,
334 	TPC_ID_DCORE1_TPC1,
335 	TPC_ID_DCORE1_TPC2,
336 	TPC_ID_DCORE1_TPC3,
337 	TPC_ID_DCORE1_TPC4,
338 	TPC_ID_DCORE1_TPC5,
339 	TPC_ID_DCORE2_TPC0,
340 	TPC_ID_DCORE2_TPC1,
341 	TPC_ID_DCORE2_TPC2,
342 	TPC_ID_DCORE2_TPC3,
343 	TPC_ID_DCORE2_TPC4,
344 	TPC_ID_DCORE2_TPC5,
345 	TPC_ID_DCORE3_TPC0,
346 	TPC_ID_DCORE3_TPC1,
347 	TPC_ID_DCORE3_TPC2,
348 	TPC_ID_DCORE3_TPC3,
349 	TPC_ID_DCORE3_TPC4,
350 	TPC_ID_DCORE3_TPC5,
351 	/* the PCI TPC is placed last (mapped liked HW) */
352 	TPC_ID_DCORE0_TPC6,
353 	TPC_ID_SIZE,
354 };
355 
356 enum gaudi2_dec_id {
357 	DEC_ID_DCORE0_DEC0,
358 	DEC_ID_DCORE0_DEC1,
359 	DEC_ID_DCORE1_DEC0,
360 	DEC_ID_DCORE1_DEC1,
361 	DEC_ID_DCORE2_DEC0,
362 	DEC_ID_DCORE2_DEC1,
363 	DEC_ID_DCORE3_DEC0,
364 	DEC_ID_DCORE3_DEC1,
365 	DEC_ID_PCIE_VDEC0,
366 	DEC_ID_PCIE_VDEC1,
367 	DEC_ID_SIZE,
368 };
369 
370 enum gaudi2_hbm_id {
371 	HBM_ID0,
372 	HBM_ID1,
373 	HBM_ID2,
374 	HBM_ID3,
375 	HBM_ID4,
376 	HBM_ID5,
377 	HBM_ID_SIZE,
378 };
379 
380 /* specific EDMA enumeration */
381 enum gaudi2_edma_id {
382 	EDMA_ID_DCORE0_INSTANCE0,
383 	EDMA_ID_DCORE0_INSTANCE1,
384 	EDMA_ID_DCORE1_INSTANCE0,
385 	EDMA_ID_DCORE1_INSTANCE1,
386 	EDMA_ID_DCORE2_INSTANCE0,
387 	EDMA_ID_DCORE2_INSTANCE1,
388 	EDMA_ID_DCORE3_INSTANCE0,
389 	EDMA_ID_DCORE3_INSTANCE1,
390 	EDMA_ID_SIZE,
391 };
392 
393 /* User interrupt count is aligned with HW CQ count.
394  * We have 64 CQ's per dcore, CQ0 in dcore 0 is reserved for legacy mode
395  */
396 #define GAUDI2_NUM_USER_INTERRUPTS 64
397 #define GAUDI2_NUM_RESERVED_INTERRUPTS 1
398 #define GAUDI2_TOTAL_USER_INTERRUPTS (GAUDI2_NUM_USER_INTERRUPTS + GAUDI2_NUM_RESERVED_INTERRUPTS)
399 
400 enum gaudi2_irq_num {
401 	GAUDI2_IRQ_NUM_EVENT_QUEUE = GAUDI2_EVENT_QUEUE_MSIX_IDX,
402 	GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM,
403 	GAUDI2_IRQ_NUM_DCORE0_DEC0_ABNRM,
404 	GAUDI2_IRQ_NUM_DCORE0_DEC1_NRM,
405 	GAUDI2_IRQ_NUM_DCORE0_DEC1_ABNRM,
406 	GAUDI2_IRQ_NUM_DCORE1_DEC0_NRM,
407 	GAUDI2_IRQ_NUM_DCORE1_DEC0_ABNRM,
408 	GAUDI2_IRQ_NUM_DCORE1_DEC1_NRM,
409 	GAUDI2_IRQ_NUM_DCORE1_DEC1_ABNRM,
410 	GAUDI2_IRQ_NUM_DCORE2_DEC0_NRM,
411 	GAUDI2_IRQ_NUM_DCORE2_DEC0_ABNRM,
412 	GAUDI2_IRQ_NUM_DCORE2_DEC1_NRM,
413 	GAUDI2_IRQ_NUM_DCORE2_DEC1_ABNRM,
414 	GAUDI2_IRQ_NUM_DCORE3_DEC0_NRM,
415 	GAUDI2_IRQ_NUM_DCORE3_DEC0_ABNRM,
416 	GAUDI2_IRQ_NUM_DCORE3_DEC1_NRM,
417 	GAUDI2_IRQ_NUM_DCORE3_DEC1_ABNRM,
418 	GAUDI2_IRQ_NUM_SHARED_DEC0_NRM,
419 	GAUDI2_IRQ_NUM_SHARED_DEC0_ABNRM,
420 	GAUDI2_IRQ_NUM_SHARED_DEC1_NRM,
421 	GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM,
422 	GAUDI2_IRQ_NUM_DEC_LAST = GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM,
423 	GAUDI2_IRQ_NUM_COMPLETION,
424 	GAUDI2_IRQ_NUM_NIC_PORT_FIRST,
425 	GAUDI2_IRQ_NUM_NIC_PORT_LAST = (GAUDI2_IRQ_NUM_NIC_PORT_FIRST + NIC_NUMBER_OF_PORTS - 1),
426 	GAUDI2_IRQ_NUM_TPC_ASSERT,
427 	GAUDI2_IRQ_NUM_EQ_ERROR,
428 	GAUDI2_IRQ_NUM_USER_FIRST,
429 	GAUDI2_IRQ_NUM_USER_LAST = (GAUDI2_IRQ_NUM_USER_FIRST + GAUDI2_NUM_USER_INTERRUPTS - 1),
430 	GAUDI2_IRQ_NUM_RESERVED_FIRST,
431 	GAUDI2_IRQ_NUM_RESERVED_LAST = (GAUDI2_MSIX_ENTRIES - GAUDI2_NUM_RESERVED_INTERRUPTS - 1),
432 	GAUDI2_IRQ_NUM_UNEXPECTED_ERROR = RESERVED_MSIX_UNEXPECTED_USER_ERROR_INTERRUPT,
433 	GAUDI2_IRQ_NUM_LAST = (GAUDI2_MSIX_ENTRIES - 1)
434 };
435 
436 static_assert(GAUDI2_IRQ_NUM_USER_FIRST > GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM);
437 
438 /**
439  * struct dup_block_ctx - context to initialize unit instances across multiple
440  *                        blocks where block can be either a dcore of duplicated
441  *                        common module. this code relies on constant offsets
442  *                        of blocks and unit instances in a block.
443  * @instance_cfg_fn: instance specific configuration function.
444  * @data: private configuration data.
445  * @base: base address of the first instance in the first block.
446  * @block_off: subsequent blocks address spacing.
447  * @instance_off: subsequent block's instances address spacing.
448  * @enabled_mask: mask of enabled instances (1- enabled, 0- disabled).
449  * @blocks: number of blocks.
450  * @instances: unit instances per block.
451  */
452 struct dup_block_ctx {
453 	void (*instance_cfg_fn)(struct hl_device *hdev, u64 base, void *data);
454 	void *data;
455 	u64 base;
456 	u64 block_off;
457 	u64 instance_off;
458 	u64 enabled_mask;
459 	unsigned int blocks;
460 	unsigned int instances;
461 };
462 
463 /**
464  * struct gaudi2_queues_test_info - Holds the address of a the messages used for testing the
465  *                                  device queues.
466  * @dma_addr: the address used by the HW for accessing the message.
467  * @kern_addr: The address used by the driver for accessing the message.
468  */
469 struct gaudi2_queues_test_info {
470 	dma_addr_t dma_addr;
471 	void *kern_addr;
472 };
473 
474 /**
475  * struct gaudi2_device - ASIC specific manage structure.
476  * @cpucp_info_get: get information on device from CPU-CP
477  * @mapped_blocks: array that holds the base address and size of all blocks
478  *                 the user can map.
479  * @lfsr_rand_seeds: array of MME ACC random seeds to set.
480  * @hw_queues_lock: protects the H/W queues from concurrent access.
481  * @scratchpad_kernel_address: general purpose PAGE_SIZE contiguous memory,
482  *                             this memory region should be write-only.
483  *                             currently used for HBW QMAN writes which is
484  *                             redundant.
485  * @scratchpad_bus_address: scratchpad bus address
486  * @virt_msix_db_cpu_addr: host memory page for the virtual MSI-X doorbell.
487  * @virt_msix_db_dma_addr: bus address of the page for the virtual MSI-X doorbell.
488  * @dram_bar_cur_addr: current address of DRAM PCI bar.
489  * @hw_cap_initialized: This field contains a bit per H/W engine. When that
490  *                      engine is initialized, that bit is set by the driver to
491  *                      signal we can use this engine in later code paths.
492  *                      Each bit is cleared upon reset of its corresponding H/W
493  *                      engine.
494  * @active_hw_arc: This field contains a bit per ARC of an H/W engine with
495  *                 exception of TPC and NIC engines. Once an engine arc is
496  *                 initialized, its respective bit is set. Driver can uniquely
497  *                 identify each initialized ARC and use this information in
498  *                 later code paths. Each respective bit is cleared upon reset
499  *                 of its corresponding ARC of the H/W engine.
500  * @dec_hw_cap_initialized: This field contains a bit per decoder H/W engine.
501  *                      When that engine is initialized, that bit is set by
502  *                      the driver to signal we can use this engine in later
503  *                      code paths.
504  *                      Each bit is cleared upon reset of its corresponding H/W
505  *                      engine.
506  * @tpc_hw_cap_initialized: This field contains a bit per TPC H/W engine.
507  *                      When that engine is initialized, that bit is set by
508  *                      the driver to signal we can use this engine in later
509  *                      code paths.
510  *                      Each bit is cleared upon reset of its corresponding H/W
511  *                      engine.
512  * @active_tpc_arc: This field contains a bit per ARC of the TPC engines.
513  *                  Once an engine arc is initialized, its respective bit is
514  *                  set. Each respective bit is cleared upon reset of its
515  *                  corresponding ARC of the TPC engine.
516  * @nic_hw_cap_initialized: This field contains a bit per nic H/W engine.
517  * @active_nic_arc: This field contains a bit per ARC of the NIC engines.
518  *                  Once an engine arc is initialized, its respective bit is
519  *                  set. Each respective bit is cleared upon reset of its
520  *                  corresponding ARC of the NIC engine.
521  * @hw_events: array that holds all H/W events that are defined valid.
522  * @events_stat: array that holds histogram of all received events.
523  * @events_stat_aggregate: same as events_stat but doesn't get cleared on reset.
524  * @num_of_valid_hw_events: used to hold the number of valid H/W events.
525  * @nic_ports: array that holds all NIC ports manage structures.
526  * @nic_macros: array that holds all NIC macro manage structures.
527  * @core_info: core info to be used by the Ethernet driver.
528  * @aux_ops: functions for core <-> aux drivers communication.
529  * @flush_db_fifo: flag to force flush DB FIFO after a write.
530  * @hbm_cfg: HBM subsystem settings
531  * @hw_queues_lock_mutex: used by simulator instead of hw_queues_lock.
532  * @queues_test_info: information used by the driver when testing the HW queues.
533  */
534 struct gaudi2_device {
535 	int (*cpucp_info_get)(struct hl_device *hdev);
536 
537 	struct user_mapped_block	mapped_blocks[NUM_USER_MAPPED_BLOCKS];
538 	int				lfsr_rand_seeds[MME_NUM_OF_LFSR_SEEDS];
539 
540 	spinlock_t			hw_queues_lock;
541 
542 	void				*scratchpad_kernel_address;
543 	dma_addr_t			scratchpad_bus_address;
544 
545 	void				*virt_msix_db_cpu_addr;
546 	dma_addr_t			virt_msix_db_dma_addr;
547 
548 	u64				dram_bar_cur_addr;
549 	u64				hw_cap_initialized;
550 	u64				active_hw_arc;
551 	u64				dec_hw_cap_initialized;
552 	u64				tpc_hw_cap_initialized;
553 	u64				active_tpc_arc;
554 	u64				nic_hw_cap_initialized;
555 	u64				active_nic_arc;
556 	u32				hw_events[GAUDI2_EVENT_SIZE];
557 	u32				events_stat[GAUDI2_EVENT_SIZE];
558 	u32				events_stat_aggregate[GAUDI2_EVENT_SIZE];
559 	u32				num_of_valid_hw_events;
560 
561 	/* Queue testing */
562 	struct gaudi2_queues_test_info	queues_test_info[GAUDI2_NUM_TESTED_QS];
563 };
564 
565 /*
566  * Types of the Gaudi2 IP blocks, used by special blocks iterator.
567  * Required for scenarios where only particular block types can be
568  * addressed (e.g., special PLDM images).
569  */
570 enum gaudi2_block_types {
571 	GAUDI2_BLOCK_TYPE_PLL,
572 	GAUDI2_BLOCK_TYPE_RTR,
573 	GAUDI2_BLOCK_TYPE_CPU,
574 	GAUDI2_BLOCK_TYPE_HIF,
575 	GAUDI2_BLOCK_TYPE_HBM,
576 	GAUDI2_BLOCK_TYPE_NIC,
577 	GAUDI2_BLOCK_TYPE_PCIE,
578 	GAUDI2_BLOCK_TYPE_PCIE_PMA,
579 	GAUDI2_BLOCK_TYPE_PDMA,
580 	GAUDI2_BLOCK_TYPE_EDMA,
581 	GAUDI2_BLOCK_TYPE_PMMU,
582 	GAUDI2_BLOCK_TYPE_PSOC,
583 	GAUDI2_BLOCK_TYPE_ROT,
584 	GAUDI2_BLOCK_TYPE_ARC_FARM,
585 	GAUDI2_BLOCK_TYPE_DEC,
586 	GAUDI2_BLOCK_TYPE_MME,
587 	GAUDI2_BLOCK_TYPE_EU_BIST,
588 	GAUDI2_BLOCK_TYPE_SYNC_MNGR,
589 	GAUDI2_BLOCK_TYPE_STLB,
590 	GAUDI2_BLOCK_TYPE_TPC,
591 	GAUDI2_BLOCK_TYPE_HMMU,
592 	GAUDI2_BLOCK_TYPE_SRAM,
593 	GAUDI2_BLOCK_TYPE_XBAR,
594 	GAUDI2_BLOCK_TYPE_KDMA,
595 	GAUDI2_BLOCK_TYPE_XDMA,
596 	GAUDI2_BLOCK_TYPE_XFT,
597 	GAUDI2_BLOCK_TYPE_MAX
598 };
599 
600 extern const u32 gaudi2_dma_core_blocks_bases[DMA_CORE_ID_SIZE];
601 extern const u32 gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_SIZE];
602 extern const u32 gaudi2_mme_acc_blocks_bases[MME_ID_SIZE];
603 extern const u32 gaudi2_mme_ctrl_lo_blocks_bases[MME_ID_SIZE];
604 extern const u32 edma_stream_base[NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES];
605 extern const u32 gaudi2_rot_blocks_bases[ROTATOR_ID_SIZE];
606 
607 void gaudi2_iterate_tpcs(struct hl_device *hdev, struct iterate_module_ctx *ctx);
608 int gaudi2_coresight_init(struct hl_device *hdev);
609 int gaudi2_debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, void *data);
610 void gaudi2_halt_coresight(struct hl_device *hdev, struct hl_ctx *ctx);
611 void gaudi2_init_blocks(struct hl_device *hdev, struct dup_block_ctx *cfg_ctx);
612 bool gaudi2_is_hmmu_enabled(struct hl_device *hdev, int dcore_id, int hmmu_id);
613 void gaudi2_write_rr_to_all_lbw_rtrs(struct hl_device *hdev, u8 rr_type, u32 rr_index, u64 min_val,
614 					u64 max_val);
615 void gaudi2_pb_print_security_errors(struct hl_device *hdev, u32 block_addr, u32 cause,
616 					u32 offended_addr);
617 int gaudi2_init_security(struct hl_device *hdev);
618 void gaudi2_ack_protection_bits_errors(struct hl_device *hdev);
619 int gaudi2_send_device_activity(struct hl_device *hdev, bool open);
620 
621 #endif /* GAUDI2P_H_ */
622