xref: /linux/drivers/accel/habanalabs/goya/goya.c (revision 189f164e573e18d9f8876dbd3ad8fcbe11f93037)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2016-2022 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #include "goyaP.h"
9 #include "../include/hw_ip/mmu/mmu_general.h"
10 #include "../include/hw_ip/mmu/mmu_v1_0.h"
11 #include "../include/goya/asic_reg/goya_masks.h"
12 #include "../include/goya/goya_reg_map.h"
13 
14 #include <linux/pci.h>
15 #include <linux/hwmon.h>
16 #include <linux/iommu.h>
17 #include <linux/seq_file.h>
18 
19 /*
20  * GOYA security scheme:
21  *
22  * 1. Host is protected by:
23  *        - Range registers (When MMU is enabled, DMA RR does NOT protect host)
24  *        - MMU
25  *
26  * 2. DRAM is protected by:
27  *        - Range registers (protect the first 512MB)
28  *        - MMU (isolation between users)
29  *
30  * 3. Configuration is protected by:
31  *        - Range registers
32  *        - Protection bits
33  *
34  * When MMU is disabled:
35  *
36  * QMAN DMA: PQ, CQ, CP, DMA are secured.
37  * PQ, CB and the data are on the host.
38  *
39  * QMAN TPC/MME:
40  * PQ, CQ and CP are not secured.
41  * PQ, CB and the data are on the SRAM/DRAM.
42  *
43  * Since QMAN DMA is secured, the driver is parsing the DMA CB:
44  *     - checks DMA pointer
45  *     - WREG, MSG_PROT are not allowed.
46  *     - MSG_LONG/SHORT are allowed.
47  *
48  * A read/write transaction by the QMAN to a protected area will succeed if
49  * and only if the QMAN's CP is secured and MSG_PROT is used
50  *
51  *
52  * When MMU is enabled:
53  *
54  * QMAN DMA: PQ, CQ and CP are secured.
55  * MMU is set to bypass on the Secure props register of the QMAN.
56  * The reasons we don't enable MMU for PQ, CQ and CP are:
57  *     - PQ entry is in kernel address space and the driver doesn't map it.
58  *     - CP writes to MSIX register and to kernel address space (completion
59  *       queue).
60  *
61  * DMA is not secured but because CP is secured, the driver still needs to parse
62  * the CB, but doesn't need to check the DMA addresses.
63  *
64  * For QMAN DMA 0, DMA is also secured because only the driver uses this DMA and
65  * the driver doesn't map memory in MMU.
66  *
67  * QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode)
68  *
69  * DMA RR does NOT protect host because DMA is not secured
70  *
71  */
72 
73 #define GOYA_BOOT_FIT_FILE	"habanalabs/goya/goya-boot-fit.itb"
74 #define GOYA_LINUX_FW_FILE	"habanalabs/goya/goya-fit.itb"
75 
76 #define GOYA_MMU_REGS_NUM		63
77 
78 #define GOYA_DMA_POOL_BLK_SIZE		0x100		/* 256 bytes */
79 
80 #define GOYA_RESET_TIMEOUT_MSEC		500		/* 500ms */
81 #define GOYA_PLDM_RESET_TIMEOUT_MSEC	20000		/* 20s */
82 #define GOYA_RESET_WAIT_MSEC		1		/* 1ms */
83 #define GOYA_CPU_RESET_WAIT_MSEC	100		/* 100ms */
84 #define GOYA_PLDM_RESET_WAIT_MSEC	1000		/* 1s */
85 #define GOYA_TEST_QUEUE_WAIT_USEC	100000		/* 100ms */
86 #define GOYA_PLDM_MMU_TIMEOUT_USEC	(MMU_CONFIG_TIMEOUT_USEC * 100)
87 #define GOYA_PLDM_QMAN0_TIMEOUT_USEC	(HL_DEVICE_TIMEOUT_USEC * 30)
88 #define GOYA_BOOT_FIT_REQ_TIMEOUT_USEC	1000000		/* 1s */
89 #define GOYA_MSG_TO_CPU_TIMEOUT_USEC	4000000		/* 4s */
90 #define GOYA_WAIT_FOR_BL_TIMEOUT_USEC	15000000	/* 15s */
91 
92 #define GOYA_QMAN0_FENCE_VAL		0xD169B243
93 
94 #define GOYA_MAX_STRING_LEN		20
95 
96 #define GOYA_CB_POOL_CB_CNT		512
97 #define GOYA_CB_POOL_CB_SIZE		0x20000		/* 128KB */
98 
99 #define IS_QM_IDLE(engine, qm_glbl_sts0) \
100 	(((qm_glbl_sts0) & engine##_QM_IDLE_MASK) == engine##_QM_IDLE_MASK)
101 #define IS_DMA_QM_IDLE(qm_glbl_sts0)	IS_QM_IDLE(DMA, qm_glbl_sts0)
102 #define IS_TPC_QM_IDLE(qm_glbl_sts0)	IS_QM_IDLE(TPC, qm_glbl_sts0)
103 #define IS_MME_QM_IDLE(qm_glbl_sts0)	IS_QM_IDLE(MME, qm_glbl_sts0)
104 
105 #define IS_CMDQ_IDLE(engine, cmdq_glbl_sts0) \
106 	(((cmdq_glbl_sts0) & engine##_CMDQ_IDLE_MASK) == \
107 			engine##_CMDQ_IDLE_MASK)
108 #define IS_TPC_CMDQ_IDLE(cmdq_glbl_sts0) \
109 	IS_CMDQ_IDLE(TPC, cmdq_glbl_sts0)
110 #define IS_MME_CMDQ_IDLE(cmdq_glbl_sts0) \
111 	IS_CMDQ_IDLE(MME, cmdq_glbl_sts0)
112 
113 #define IS_DMA_IDLE(dma_core_sts0) \
114 	!((dma_core_sts0) & DMA_CH_0_STS0_DMA_BUSY_MASK)
115 
116 #define IS_TPC_IDLE(tpc_cfg_sts) \
117 	(((tpc_cfg_sts) & TPC_CFG_IDLE_MASK) == TPC_CFG_IDLE_MASK)
118 
119 #define IS_MME_IDLE(mme_arch_sts) \
120 	(((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK)
121 
122 static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
123 		"goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
124 		"goya cq 4", "goya cpu eq"
125 };
126 
127 static u16 goya_packet_sizes[MAX_PACKET_ID] = {
128 	[PACKET_WREG_32]	= sizeof(struct packet_wreg32),
129 	[PACKET_WREG_BULK]	= sizeof(struct packet_wreg_bulk),
130 	[PACKET_MSG_LONG]	= sizeof(struct packet_msg_long),
131 	[PACKET_MSG_SHORT]	= sizeof(struct packet_msg_short),
132 	[PACKET_CP_DMA]		= sizeof(struct packet_cp_dma),
133 	[PACKET_MSG_PROT]	= sizeof(struct packet_msg_prot),
134 	[PACKET_FENCE]		= sizeof(struct packet_fence),
135 	[PACKET_LIN_DMA]	= sizeof(struct packet_lin_dma),
136 	[PACKET_NOP]		= sizeof(struct packet_nop),
137 	[PACKET_STOP]		= sizeof(struct packet_stop)
138 };
139 
validate_packet_id(enum packet_id id)140 static inline bool validate_packet_id(enum packet_id id)
141 {
142 	switch (id) {
143 	case PACKET_WREG_32:
144 	case PACKET_WREG_BULK:
145 	case PACKET_MSG_LONG:
146 	case PACKET_MSG_SHORT:
147 	case PACKET_CP_DMA:
148 	case PACKET_MSG_PROT:
149 	case PACKET_FENCE:
150 	case PACKET_LIN_DMA:
151 	case PACKET_NOP:
152 	case PACKET_STOP:
153 		return true;
154 	default:
155 		return false;
156 	}
157 }
158 
159 static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = {
160 	mmDMA_QM_0_GLBL_NON_SECURE_PROPS,
161 	mmDMA_QM_1_GLBL_NON_SECURE_PROPS,
162 	mmDMA_QM_2_GLBL_NON_SECURE_PROPS,
163 	mmDMA_QM_3_GLBL_NON_SECURE_PROPS,
164 	mmDMA_QM_4_GLBL_NON_SECURE_PROPS,
165 	mmTPC0_QM_GLBL_SECURE_PROPS,
166 	mmTPC0_QM_GLBL_NON_SECURE_PROPS,
167 	mmTPC0_CMDQ_GLBL_SECURE_PROPS,
168 	mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS,
169 	mmTPC0_CFG_ARUSER,
170 	mmTPC0_CFG_AWUSER,
171 	mmTPC1_QM_GLBL_SECURE_PROPS,
172 	mmTPC1_QM_GLBL_NON_SECURE_PROPS,
173 	mmTPC1_CMDQ_GLBL_SECURE_PROPS,
174 	mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS,
175 	mmTPC1_CFG_ARUSER,
176 	mmTPC1_CFG_AWUSER,
177 	mmTPC2_QM_GLBL_SECURE_PROPS,
178 	mmTPC2_QM_GLBL_NON_SECURE_PROPS,
179 	mmTPC2_CMDQ_GLBL_SECURE_PROPS,
180 	mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS,
181 	mmTPC2_CFG_ARUSER,
182 	mmTPC2_CFG_AWUSER,
183 	mmTPC3_QM_GLBL_SECURE_PROPS,
184 	mmTPC3_QM_GLBL_NON_SECURE_PROPS,
185 	mmTPC3_CMDQ_GLBL_SECURE_PROPS,
186 	mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS,
187 	mmTPC3_CFG_ARUSER,
188 	mmTPC3_CFG_AWUSER,
189 	mmTPC4_QM_GLBL_SECURE_PROPS,
190 	mmTPC4_QM_GLBL_NON_SECURE_PROPS,
191 	mmTPC4_CMDQ_GLBL_SECURE_PROPS,
192 	mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS,
193 	mmTPC4_CFG_ARUSER,
194 	mmTPC4_CFG_AWUSER,
195 	mmTPC5_QM_GLBL_SECURE_PROPS,
196 	mmTPC5_QM_GLBL_NON_SECURE_PROPS,
197 	mmTPC5_CMDQ_GLBL_SECURE_PROPS,
198 	mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS,
199 	mmTPC5_CFG_ARUSER,
200 	mmTPC5_CFG_AWUSER,
201 	mmTPC6_QM_GLBL_SECURE_PROPS,
202 	mmTPC6_QM_GLBL_NON_SECURE_PROPS,
203 	mmTPC6_CMDQ_GLBL_SECURE_PROPS,
204 	mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS,
205 	mmTPC6_CFG_ARUSER,
206 	mmTPC6_CFG_AWUSER,
207 	mmTPC7_QM_GLBL_SECURE_PROPS,
208 	mmTPC7_QM_GLBL_NON_SECURE_PROPS,
209 	mmTPC7_CMDQ_GLBL_SECURE_PROPS,
210 	mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS,
211 	mmTPC7_CFG_ARUSER,
212 	mmTPC7_CFG_AWUSER,
213 	mmMME_QM_GLBL_SECURE_PROPS,
214 	mmMME_QM_GLBL_NON_SECURE_PROPS,
215 	mmMME_CMDQ_GLBL_SECURE_PROPS,
216 	mmMME_CMDQ_GLBL_NON_SECURE_PROPS,
217 	mmMME_SBA_CONTROL_DATA,
218 	mmMME_SBB_CONTROL_DATA,
219 	mmMME_SBC_CONTROL_DATA,
220 	mmMME_WBC_CONTROL_DATA,
221 	mmPCIE_WRAP_PSOC_ARUSER,
222 	mmPCIE_WRAP_PSOC_AWUSER
223 };
224 
225 static u32 goya_all_events[] = {
226 	GOYA_ASYNC_EVENT_ID_PCIE_IF,
227 	GOYA_ASYNC_EVENT_ID_TPC0_ECC,
228 	GOYA_ASYNC_EVENT_ID_TPC1_ECC,
229 	GOYA_ASYNC_EVENT_ID_TPC2_ECC,
230 	GOYA_ASYNC_EVENT_ID_TPC3_ECC,
231 	GOYA_ASYNC_EVENT_ID_TPC4_ECC,
232 	GOYA_ASYNC_EVENT_ID_TPC5_ECC,
233 	GOYA_ASYNC_EVENT_ID_TPC6_ECC,
234 	GOYA_ASYNC_EVENT_ID_TPC7_ECC,
235 	GOYA_ASYNC_EVENT_ID_MME_ECC,
236 	GOYA_ASYNC_EVENT_ID_MME_ECC_EXT,
237 	GOYA_ASYNC_EVENT_ID_MMU_ECC,
238 	GOYA_ASYNC_EVENT_ID_DMA_MACRO,
239 	GOYA_ASYNC_EVENT_ID_DMA_ECC,
240 	GOYA_ASYNC_EVENT_ID_CPU_IF_ECC,
241 	GOYA_ASYNC_EVENT_ID_PSOC_MEM,
242 	GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT,
243 	GOYA_ASYNC_EVENT_ID_SRAM0,
244 	GOYA_ASYNC_EVENT_ID_SRAM1,
245 	GOYA_ASYNC_EVENT_ID_SRAM2,
246 	GOYA_ASYNC_EVENT_ID_SRAM3,
247 	GOYA_ASYNC_EVENT_ID_SRAM4,
248 	GOYA_ASYNC_EVENT_ID_SRAM5,
249 	GOYA_ASYNC_EVENT_ID_SRAM6,
250 	GOYA_ASYNC_EVENT_ID_SRAM7,
251 	GOYA_ASYNC_EVENT_ID_SRAM8,
252 	GOYA_ASYNC_EVENT_ID_SRAM9,
253 	GOYA_ASYNC_EVENT_ID_SRAM10,
254 	GOYA_ASYNC_EVENT_ID_SRAM11,
255 	GOYA_ASYNC_EVENT_ID_SRAM12,
256 	GOYA_ASYNC_EVENT_ID_SRAM13,
257 	GOYA_ASYNC_EVENT_ID_SRAM14,
258 	GOYA_ASYNC_EVENT_ID_SRAM15,
259 	GOYA_ASYNC_EVENT_ID_SRAM16,
260 	GOYA_ASYNC_EVENT_ID_SRAM17,
261 	GOYA_ASYNC_EVENT_ID_SRAM18,
262 	GOYA_ASYNC_EVENT_ID_SRAM19,
263 	GOYA_ASYNC_EVENT_ID_SRAM20,
264 	GOYA_ASYNC_EVENT_ID_SRAM21,
265 	GOYA_ASYNC_EVENT_ID_SRAM22,
266 	GOYA_ASYNC_EVENT_ID_SRAM23,
267 	GOYA_ASYNC_EVENT_ID_SRAM24,
268 	GOYA_ASYNC_EVENT_ID_SRAM25,
269 	GOYA_ASYNC_EVENT_ID_SRAM26,
270 	GOYA_ASYNC_EVENT_ID_SRAM27,
271 	GOYA_ASYNC_EVENT_ID_SRAM28,
272 	GOYA_ASYNC_EVENT_ID_SRAM29,
273 	GOYA_ASYNC_EVENT_ID_GIC500,
274 	GOYA_ASYNC_EVENT_ID_PLL0,
275 	GOYA_ASYNC_EVENT_ID_PLL1,
276 	GOYA_ASYNC_EVENT_ID_PLL3,
277 	GOYA_ASYNC_EVENT_ID_PLL4,
278 	GOYA_ASYNC_EVENT_ID_PLL5,
279 	GOYA_ASYNC_EVENT_ID_PLL6,
280 	GOYA_ASYNC_EVENT_ID_AXI_ECC,
281 	GOYA_ASYNC_EVENT_ID_L2_RAM_ECC,
282 	GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET,
283 	GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT,
284 	GOYA_ASYNC_EVENT_ID_PCIE_DEC,
285 	GOYA_ASYNC_EVENT_ID_TPC0_DEC,
286 	GOYA_ASYNC_EVENT_ID_TPC1_DEC,
287 	GOYA_ASYNC_EVENT_ID_TPC2_DEC,
288 	GOYA_ASYNC_EVENT_ID_TPC3_DEC,
289 	GOYA_ASYNC_EVENT_ID_TPC4_DEC,
290 	GOYA_ASYNC_EVENT_ID_TPC5_DEC,
291 	GOYA_ASYNC_EVENT_ID_TPC6_DEC,
292 	GOYA_ASYNC_EVENT_ID_TPC7_DEC,
293 	GOYA_ASYNC_EVENT_ID_MME_WACS,
294 	GOYA_ASYNC_EVENT_ID_MME_WACSD,
295 	GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER,
296 	GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC,
297 	GOYA_ASYNC_EVENT_ID_PSOC,
298 	GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR,
299 	GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR,
300 	GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR,
301 	GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR,
302 	GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR,
303 	GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR,
304 	GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR,
305 	GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR,
306 	GOYA_ASYNC_EVENT_ID_TPC0_CMDQ,
307 	GOYA_ASYNC_EVENT_ID_TPC1_CMDQ,
308 	GOYA_ASYNC_EVENT_ID_TPC2_CMDQ,
309 	GOYA_ASYNC_EVENT_ID_TPC3_CMDQ,
310 	GOYA_ASYNC_EVENT_ID_TPC4_CMDQ,
311 	GOYA_ASYNC_EVENT_ID_TPC5_CMDQ,
312 	GOYA_ASYNC_EVENT_ID_TPC6_CMDQ,
313 	GOYA_ASYNC_EVENT_ID_TPC7_CMDQ,
314 	GOYA_ASYNC_EVENT_ID_TPC0_QM,
315 	GOYA_ASYNC_EVENT_ID_TPC1_QM,
316 	GOYA_ASYNC_EVENT_ID_TPC2_QM,
317 	GOYA_ASYNC_EVENT_ID_TPC3_QM,
318 	GOYA_ASYNC_EVENT_ID_TPC4_QM,
319 	GOYA_ASYNC_EVENT_ID_TPC5_QM,
320 	GOYA_ASYNC_EVENT_ID_TPC6_QM,
321 	GOYA_ASYNC_EVENT_ID_TPC7_QM,
322 	GOYA_ASYNC_EVENT_ID_MME_QM,
323 	GOYA_ASYNC_EVENT_ID_MME_CMDQ,
324 	GOYA_ASYNC_EVENT_ID_DMA0_QM,
325 	GOYA_ASYNC_EVENT_ID_DMA1_QM,
326 	GOYA_ASYNC_EVENT_ID_DMA2_QM,
327 	GOYA_ASYNC_EVENT_ID_DMA3_QM,
328 	GOYA_ASYNC_EVENT_ID_DMA4_QM,
329 	GOYA_ASYNC_EVENT_ID_DMA0_CH,
330 	GOYA_ASYNC_EVENT_ID_DMA1_CH,
331 	GOYA_ASYNC_EVENT_ID_DMA2_CH,
332 	GOYA_ASYNC_EVENT_ID_DMA3_CH,
333 	GOYA_ASYNC_EVENT_ID_DMA4_CH,
334 	GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU,
335 	GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU,
336 	GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU,
337 	GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU,
338 	GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU,
339 	GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU,
340 	GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU,
341 	GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU,
342 	GOYA_ASYNC_EVENT_ID_DMA_BM_CH0,
343 	GOYA_ASYNC_EVENT_ID_DMA_BM_CH1,
344 	GOYA_ASYNC_EVENT_ID_DMA_BM_CH2,
345 	GOYA_ASYNC_EVENT_ID_DMA_BM_CH3,
346 	GOYA_ASYNC_EVENT_ID_DMA_BM_CH4,
347 	GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S,
348 	GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E,
349 	GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S,
350 	GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E
351 };
352 
353 static s64 goya_state_dump_specs_props[SP_MAX] = {0};
354 
355 static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
356 static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
357 static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev);
358 static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
359 
goya_set_fixed_properties(struct hl_device * hdev)360 int goya_set_fixed_properties(struct hl_device *hdev)
361 {
362 	struct asic_fixed_properties *prop = &hdev->asic_prop;
363 	int i;
364 
365 	prop->max_queues = GOYA_QUEUE_ID_SIZE;
366 	prop->hw_queues_props = kzalloc_objs(struct hw_queue_properties,
367 					     prop->max_queues);
368 
369 	if (!prop->hw_queues_props)
370 		return -ENOMEM;
371 
372 	for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
373 		prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
374 		prop->hw_queues_props[i].driver_only = 0;
375 		prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_KERNEL;
376 	}
377 
378 	for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
379 		prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
380 		prop->hw_queues_props[i].driver_only = 1;
381 		prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_KERNEL;
382 	}
383 
384 	for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
385 			NUMBER_OF_INT_HW_QUEUES; i++) {
386 		prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
387 		prop->hw_queues_props[i].driver_only = 0;
388 		prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_USER;
389 	}
390 
391 	prop->cfg_base_address = CFG_BASE;
392 	prop->device_dma_offset_for_host_access = HOST_PHYS_BASE;
393 	prop->host_base_address = HOST_PHYS_BASE;
394 	prop->host_end_address = prop->host_base_address + HOST_PHYS_SIZE;
395 	prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
396 	prop->completion_mode = HL_COMPLETION_MODE_JOB;
397 	prop->dram_base_address = DRAM_PHYS_BASE;
398 	prop->dram_size = DRAM_PHYS_DEFAULT_SIZE;
399 	prop->dram_end_address = prop->dram_base_address + prop->dram_size;
400 	prop->dram_user_base_address = DRAM_BASE_ADDR_USER;
401 
402 	prop->sram_base_address = SRAM_BASE_ADDR;
403 	prop->sram_size = SRAM_SIZE;
404 	prop->sram_end_address = prop->sram_base_address + prop->sram_size;
405 	prop->sram_user_base_address = prop->sram_base_address +
406 						SRAM_USER_BASE_OFFSET;
407 
408 	prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR;
409 	prop->mmu_dram_default_page_addr = MMU_DRAM_DEFAULT_PAGE_ADDR;
410 	if (hdev->pldm)
411 		prop->mmu_pgt_size = 0x800000; /* 8MB */
412 	else
413 		prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
414 	prop->mmu_pte_size = HL_PTE_SIZE;
415 	prop->dram_page_size = PAGE_SIZE_2MB;
416 	prop->device_mem_alloc_default_page_size = prop->dram_page_size;
417 	prop->dram_supports_virtual_memory = true;
418 
419 	prop->dmmu.hop_shifts[MMU_HOP0] = MMU_V1_0_HOP0_SHIFT;
420 	prop->dmmu.hop_shifts[MMU_HOP1] = MMU_V1_0_HOP1_SHIFT;
421 	prop->dmmu.hop_shifts[MMU_HOP2] = MMU_V1_0_HOP2_SHIFT;
422 	prop->dmmu.hop_shifts[MMU_HOP3] = MMU_V1_0_HOP3_SHIFT;
423 	prop->dmmu.hop_shifts[MMU_HOP4] = MMU_V1_0_HOP4_SHIFT;
424 	prop->dmmu.hop_masks[MMU_HOP0] = MMU_V1_0_HOP0_MASK;
425 	prop->dmmu.hop_masks[MMU_HOP1] = MMU_V1_0_HOP1_MASK;
426 	prop->dmmu.hop_masks[MMU_HOP2] = MMU_V1_0_HOP2_MASK;
427 	prop->dmmu.hop_masks[MMU_HOP3] = MMU_V1_0_HOP3_MASK;
428 	prop->dmmu.hop_masks[MMU_HOP4] = MMU_V1_0_HOP4_MASK;
429 	prop->dmmu.start_addr = VA_DDR_SPACE_START;
430 	prop->dmmu.end_addr = VA_DDR_SPACE_END;
431 	prop->dmmu.page_size = PAGE_SIZE_2MB;
432 	prop->dmmu.num_hops = MMU_ARCH_5_HOPS;
433 	prop->dmmu.last_mask = LAST_MASK;
434 	/* TODO: will be duplicated until implementing per-MMU props */
435 	prop->dmmu.hop_table_size = HOP_TABLE_SIZE_512_PTE;
436 	prop->dmmu.hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
437 
438 	/* shifts and masks are the same in PMMU and DMMU */
439 	memcpy(&prop->pmmu, &prop->dmmu, sizeof(prop->dmmu));
440 	prop->pmmu.start_addr = VA_HOST_SPACE_START;
441 	prop->pmmu.end_addr = VA_HOST_SPACE_END;
442 	prop->pmmu.page_size = PAGE_SIZE_4KB;
443 	prop->pmmu.num_hops = MMU_ARCH_5_HOPS;
444 	prop->pmmu.last_mask = LAST_MASK;
445 	/* TODO: will be duplicated until implementing per-MMU props */
446 	prop->pmmu.hop_table_size = HOP_TABLE_SIZE_512_PTE;
447 	prop->pmmu.hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
448 
449 	/* PMMU and HPMMU are the same except of page size */
450 	memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
451 	prop->pmmu_huge.page_size = PAGE_SIZE_2MB;
452 
453 	prop->dram_size_for_default_page_mapping = VA_DDR_SPACE_END;
454 	prop->cfg_size = CFG_SIZE;
455 	prop->max_asid = MAX_ASID;
456 	prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE;
457 	prop->high_pll = PLL_HIGH_DEFAULT;
458 	prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT;
459 	prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE;
460 	prop->max_power_default = MAX_POWER_DEFAULT;
461 	prop->dc_power_default = DC_POWER_DEFAULT;
462 	prop->tpc_enabled_mask = TPC_ENABLED_MASK;
463 	prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
464 	prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
465 
466 	strscpy_pad(prop->cpucp_info.card_name, GOYA_DEFAULT_CARD_NAME,
467 		CARD_NAME_MAX_LEN);
468 
469 	prop->max_pending_cs = GOYA_MAX_PENDING_CS;
470 
471 	prop->first_available_user_interrupt = USHRT_MAX;
472 	prop->tpc_interrupt_id = USHRT_MAX;
473 	prop->eq_interrupt_id = GOYA_EVENT_QUEUE_MSIX_IDX;
474 
475 	for (i = 0 ; i < HL_MAX_DCORES ; i++)
476 		prop->first_available_cq[i] = USHRT_MAX;
477 
478 	prop->fw_cpu_boot_dev_sts0_valid = false;
479 	prop->fw_cpu_boot_dev_sts1_valid = false;
480 	prop->hard_reset_done_by_fw = false;
481 	prop->gic_interrupts_enable = true;
482 
483 	prop->server_type = HL_SERVER_TYPE_UNKNOWN;
484 
485 	prop->clk_pll_index = HL_GOYA_MME_PLL;
486 
487 	prop->use_get_power_for_reset_history = true;
488 
489 	prop->configurable_stop_on_err = true;
490 
491 	prop->set_max_power_on_device_init = true;
492 
493 	prop->dma_mask = 48;
494 
495 	return 0;
496 }
497 
498 /*
499  * goya_pci_bars_map - Map PCI BARS of Goya device
500  *
501  * @hdev: pointer to hl_device structure
502  *
503  * Request PCI regions and map them to kernel virtual addresses.
504  * Returns 0 on success
505  *
506  */
goya_pci_bars_map(struct hl_device * hdev)507 static int goya_pci_bars_map(struct hl_device *hdev)
508 {
509 	static const char * const name[] = {"SRAM_CFG", "MSIX", "DDR"};
510 	bool is_wc[3] = {false, false, true};
511 	int rc;
512 
513 	rc = hl_pci_bars_map(hdev, name, is_wc);
514 	if (rc)
515 		return rc;
516 
517 	hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] +
518 			(CFG_BASE - SRAM_BASE_ADDR);
519 
520 	return 0;
521 }
522 
goya_set_ddr_bar_base(struct hl_device * hdev,u64 addr)523 static u64 goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
524 {
525 	struct goya_device *goya = hdev->asic_specific;
526 	struct hl_inbound_pci_region pci_region;
527 	u64 old_addr = addr;
528 	int rc;
529 
530 	if ((goya) && (goya->ddr_bar_cur_addr == addr))
531 		return old_addr;
532 
533 	/* Inbound Region 1 - Bar 4 - Point to DDR */
534 	pci_region.mode = PCI_BAR_MATCH_MODE;
535 	pci_region.bar = DDR_BAR_ID;
536 	pci_region.addr = addr;
537 	rc = hl_pci_set_inbound_region(hdev, 1, &pci_region);
538 	if (rc)
539 		return U64_MAX;
540 
541 	if (goya) {
542 		old_addr = goya->ddr_bar_cur_addr;
543 		goya->ddr_bar_cur_addr = addr;
544 	}
545 
546 	return old_addr;
547 }
548 
549 /*
550  * goya_init_iatu - Initialize the iATU unit inside the PCI controller
551  *
552  * @hdev: pointer to hl_device structure
553  *
554  * This is needed in case the firmware doesn't initialize the iATU
555  *
556  */
goya_init_iatu(struct hl_device * hdev)557 static int goya_init_iatu(struct hl_device *hdev)
558 {
559 	struct hl_inbound_pci_region inbound_region;
560 	struct hl_outbound_pci_region outbound_region;
561 	int rc;
562 
563 	if (hdev->asic_prop.iatu_done_by_fw)
564 		return 0;
565 
566 	/* Inbound Region 0 - Bar 0 - Point to SRAM and CFG */
567 	inbound_region.mode = PCI_BAR_MATCH_MODE;
568 	inbound_region.bar = SRAM_CFG_BAR_ID;
569 	inbound_region.addr = SRAM_BASE_ADDR;
570 	rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region);
571 	if (rc)
572 		goto done;
573 
574 	/* Inbound Region 1 - Bar 4 - Point to DDR */
575 	inbound_region.mode = PCI_BAR_MATCH_MODE;
576 	inbound_region.bar = DDR_BAR_ID;
577 	inbound_region.addr = DRAM_PHYS_BASE;
578 	rc = hl_pci_set_inbound_region(hdev, 1, &inbound_region);
579 	if (rc)
580 		goto done;
581 
582 	/* Outbound Region 0 - Point to Host  */
583 	outbound_region.addr = HOST_PHYS_BASE;
584 	outbound_region.size = HOST_PHYS_SIZE;
585 	rc = hl_pci_set_outbound_region(hdev, &outbound_region);
586 
587 done:
588 	return rc;
589 }
590 
goya_get_hw_state(struct hl_device * hdev)591 static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
592 {
593 	return RREG32(mmHW_STATE);
594 }
595 
596 /*
597  * goya_early_init - GOYA early initialization code
598  *
599  * @hdev: pointer to hl_device structure
600  *
601  * Verify PCI bars
602  * Set DMA masks
603  * PCI controller initialization
604  * Map PCI bars
605  *
606  */
goya_early_init(struct hl_device * hdev)607 static int goya_early_init(struct hl_device *hdev)
608 {
609 	struct asic_fixed_properties *prop = &hdev->asic_prop;
610 	struct pci_dev *pdev = hdev->pdev;
611 	resource_size_t pci_bar_size;
612 	u32 fw_boot_status, val;
613 	int rc;
614 
615 	rc = goya_set_fixed_properties(hdev);
616 	if (rc) {
617 		dev_err(hdev->dev, "Failed to get fixed properties\n");
618 		return rc;
619 	}
620 
621 	/* Check BAR sizes */
622 	pci_bar_size = pci_resource_len(pdev, SRAM_CFG_BAR_ID);
623 
624 	if (pci_bar_size != CFG_BAR_SIZE) {
625 		dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n",
626 			SRAM_CFG_BAR_ID, &pci_bar_size, CFG_BAR_SIZE);
627 		rc = -ENODEV;
628 		goto free_queue_props;
629 	}
630 
631 	pci_bar_size = pci_resource_len(pdev, MSIX_BAR_ID);
632 
633 	if (pci_bar_size != MSIX_BAR_SIZE) {
634 		dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n",
635 			MSIX_BAR_ID, &pci_bar_size, MSIX_BAR_SIZE);
636 		rc = -ENODEV;
637 		goto free_queue_props;
638 	}
639 
640 	prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
641 	hdev->dram_pci_bar_start = pci_resource_start(pdev, DDR_BAR_ID);
642 
643 	/* If FW security is enabled at this point it means no access to ELBI */
644 	if (hdev->asic_prop.fw_security_enabled) {
645 		hdev->asic_prop.iatu_done_by_fw = true;
646 		goto pci_init;
647 	}
648 
649 	rc = hl_pci_elbi_read(hdev, CFG_BASE + mmCPU_BOOT_DEV_STS0,
650 				&fw_boot_status);
651 	if (rc)
652 		goto free_queue_props;
653 
654 	/* Check whether FW is configuring iATU */
655 	if ((fw_boot_status & CPU_BOOT_DEV_STS0_ENABLED) &&
656 			(fw_boot_status & CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN))
657 		hdev->asic_prop.iatu_done_by_fw = true;
658 
659 pci_init:
660 	rc = hl_pci_init(hdev);
661 	if (rc)
662 		goto free_queue_props;
663 
664 	/* Before continuing in the initialization, we need to read the preboot
665 	 * version to determine whether we run with a security-enabled firmware
666 	 */
667 	rc = hl_fw_read_preboot_status(hdev);
668 	if (rc) {
669 		if (hdev->reset_on_preboot_fail)
670 			/* we are already on failure flow, so don't check if hw_fini fails. */
671 			hdev->asic_funcs->hw_fini(hdev, true, false);
672 		goto pci_fini;
673 	}
674 
675 	if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
676 		dev_dbg(hdev->dev, "H/W state is dirty, must reset before initializing\n");
677 		rc = hdev->asic_funcs->hw_fini(hdev, true, false);
678 		if (rc) {
679 			dev_err(hdev->dev, "failed to reset HW in dirty state (%d)\n", rc);
680 			goto pci_fini;
681 		}
682 	}
683 
684 	if (!hdev->pldm) {
685 		val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
686 		if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
687 			dev_warn(hdev->dev,
688 				"PCI strap is not configured correctly, PCI bus errors may occur\n");
689 	}
690 
691 	return 0;
692 
693 pci_fini:
694 	hl_pci_fini(hdev);
695 free_queue_props:
696 	kfree(hdev->asic_prop.hw_queues_props);
697 	return rc;
698 }
699 
700 /*
701  * goya_early_fini - GOYA early finalization code
702  *
703  * @hdev: pointer to hl_device structure
704  *
705  * Unmap PCI bars
706  *
707  */
goya_early_fini(struct hl_device * hdev)708 static int goya_early_fini(struct hl_device *hdev)
709 {
710 	kfree(hdev->asic_prop.hw_queues_props);
711 	hl_pci_fini(hdev);
712 
713 	return 0;
714 }
715 
goya_mmu_prepare_reg(struct hl_device * hdev,u64 reg,u32 asid)716 static void goya_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid)
717 {
718 	/* mask to zero the MMBP and ASID bits */
719 	WREG32_AND(reg, ~0x7FF);
720 	WREG32_OR(reg, asid);
721 }
722 
goya_qman0_set_security(struct hl_device * hdev,bool secure)723 static void goya_qman0_set_security(struct hl_device *hdev, bool secure)
724 {
725 	struct goya_device *goya = hdev->asic_specific;
726 
727 	if (!(goya->hw_cap_initialized & HW_CAP_MMU))
728 		return;
729 
730 	if (secure)
731 		WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED);
732 	else
733 		WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED);
734 
735 	RREG32(mmDMA_QM_0_GLBL_PROT);
736 }
737 
738 /*
739  * goya_fetch_psoc_frequency - Fetch PSOC frequency values
740  *
741  * @hdev: pointer to hl_device structure
742  *
743  */
goya_fetch_psoc_frequency(struct hl_device * hdev)744 static void goya_fetch_psoc_frequency(struct hl_device *hdev)
745 {
746 	struct asic_fixed_properties *prop = &hdev->asic_prop;
747 	u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
748 	u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
749 	int rc;
750 
751 	if (hdev->asic_prop.fw_security_enabled) {
752 		struct goya_device *goya = hdev->asic_specific;
753 
754 		if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
755 			return;
756 
757 		rc = hl_fw_cpucp_pll_info_get(hdev, HL_GOYA_PCI_PLL,
758 				pll_freq_arr);
759 
760 		if (rc)
761 			return;
762 
763 		freq = pll_freq_arr[1];
764 	} else {
765 		div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
766 		div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
767 		nr = RREG32(mmPSOC_PCI_PLL_NR);
768 		nf = RREG32(mmPSOC_PCI_PLL_NF);
769 		od = RREG32(mmPSOC_PCI_PLL_OD);
770 
771 		if (div_sel == DIV_SEL_REF_CLK ||
772 				div_sel == DIV_SEL_DIVIDED_REF) {
773 			if (div_sel == DIV_SEL_REF_CLK)
774 				freq = PLL_REF_CLK;
775 			else
776 				freq = PLL_REF_CLK / (div_fctr + 1);
777 		} else if (div_sel == DIV_SEL_PLL_CLK ||
778 				div_sel == DIV_SEL_DIVIDED_PLL) {
779 			pll_clk = PLL_REF_CLK * (nf + 1) /
780 					((nr + 1) * (od + 1));
781 			if (div_sel == DIV_SEL_PLL_CLK)
782 				freq = pll_clk;
783 			else
784 				freq = pll_clk / (div_fctr + 1);
785 		} else {
786 			dev_warn(hdev->dev,
787 				"Received invalid div select value: %d",
788 				div_sel);
789 			freq = 0;
790 		}
791 	}
792 
793 	prop->psoc_timestamp_frequency = freq;
794 	prop->psoc_pci_pll_nr = nr;
795 	prop->psoc_pci_pll_nf = nf;
796 	prop->psoc_pci_pll_od = od;
797 	prop->psoc_pci_pll_div_factor = div_fctr;
798 }
799 
800 /*
801  * goya_set_frequency - set the frequency of the device
802  *
803  * @hdev: pointer to habanalabs device structure
804  * @freq: the new frequency value
805  *
806  * Change the frequency if needed. This function has no protection against
807  * concurrency, therefore it is assumed that the calling function has protected
808  * itself against the case of calling this function from multiple threads with
809  * different values
810  *
811  * Returns 0 if no change was done, otherwise returns 1
812  */
goya_set_frequency(struct hl_device * hdev,enum hl_pll_frequency freq)813 int goya_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq)
814 {
815 	struct goya_device *goya = hdev->asic_specific;
816 
817 	if ((goya->pm_mng_profile == PM_MANUAL) ||
818 			(goya->curr_pll_profile == freq))
819 		return 0;
820 
821 	dev_dbg(hdev->dev, "Changing device frequency to %s\n",
822 		freq == PLL_HIGH ? "high" : "low");
823 
824 	goya_set_pll_profile(hdev, freq);
825 
826 	goya->curr_pll_profile = freq;
827 
828 	return 1;
829 }
830 
goya_set_freq_to_low_job(struct work_struct * work)831 static void goya_set_freq_to_low_job(struct work_struct *work)
832 {
833 	struct goya_work_freq *goya_work = container_of(work,
834 						struct goya_work_freq,
835 						work_freq.work);
836 	struct hl_device *hdev = goya_work->hdev;
837 
838 	mutex_lock(&hdev->fpriv_list_lock);
839 
840 	if (!hdev->is_compute_ctx_active)
841 		goya_set_frequency(hdev, PLL_LOW);
842 
843 	mutex_unlock(&hdev->fpriv_list_lock);
844 
845 	schedule_delayed_work(&goya_work->work_freq,
846 			usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
847 }
848 
goya_late_init(struct hl_device * hdev)849 int goya_late_init(struct hl_device *hdev)
850 {
851 	struct asic_fixed_properties *prop = &hdev->asic_prop;
852 	struct goya_device *goya = hdev->asic_specific;
853 	int rc;
854 
855 	goya_fetch_psoc_frequency(hdev);
856 
857 	rc = goya_mmu_clear_pgt_range(hdev);
858 	if (rc) {
859 		dev_err(hdev->dev,
860 			"Failed to clear MMU page tables range %d\n", rc);
861 		return rc;
862 	}
863 
864 	rc = goya_mmu_set_dram_default_page(hdev);
865 	if (rc) {
866 		dev_err(hdev->dev, "Failed to set DRAM default page %d\n", rc);
867 		return rc;
868 	}
869 
870 	rc = goya_mmu_add_mappings_for_device_cpu(hdev);
871 	if (rc)
872 		return rc;
873 
874 	rc = goya_init_cpu_queues(hdev);
875 	if (rc)
876 		return rc;
877 
878 	rc = goya_test_cpu_queue(hdev);
879 	if (rc)
880 		return rc;
881 
882 	rc = goya_cpucp_info_get(hdev);
883 	if (rc) {
884 		dev_err(hdev->dev, "Failed to get cpucp info %d\n", rc);
885 		return rc;
886 	}
887 
888 	/* Now that we have the DRAM size in ASIC prop, we need to check
889 	 * its size and configure the DMA_IF DDR wrap protection (which is in
890 	 * the MMU block) accordingly. The value is the log2 of the DRAM size
891 	 */
892 	WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
893 
894 	rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS, 0x0);
895 	if (rc)
896 		return rc;
897 
898 	/* force setting to low frequency */
899 	goya->curr_pll_profile = PLL_LOW;
900 
901 	goya->pm_mng_profile = PM_AUTO;
902 
903 	goya_set_pll_profile(hdev, PLL_LOW);
904 
905 	schedule_delayed_work(&goya->goya_work->work_freq,
906 		usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
907 
908 	return 0;
909 }
910 
911 /*
912  * goya_late_fini - GOYA late tear-down code
913  *
914  * @hdev: pointer to hl_device structure
915  *
916  * Free sensors allocated structures
917  */
goya_late_fini(struct hl_device * hdev)918 void goya_late_fini(struct hl_device *hdev)
919 {
920 	struct goya_device *goya = hdev->asic_specific;
921 
922 	cancel_delayed_work_sync(&goya->goya_work->work_freq);
923 
924 	hl_hwmon_release_resources(hdev);
925 }
926 
goya_set_pci_memory_regions(struct hl_device * hdev)927 static void goya_set_pci_memory_regions(struct hl_device *hdev)
928 {
929 	struct asic_fixed_properties *prop = &hdev->asic_prop;
930 	struct pci_mem_region *region;
931 
932 	/* CFG */
933 	region = &hdev->pci_mem_region[PCI_REGION_CFG];
934 	region->region_base = CFG_BASE;
935 	region->region_size = CFG_SIZE;
936 	region->offset_in_bar = CFG_BASE - SRAM_BASE_ADDR;
937 	region->bar_size = CFG_BAR_SIZE;
938 	region->bar_id = SRAM_CFG_BAR_ID;
939 	region->used = 1;
940 
941 	/* SRAM */
942 	region = &hdev->pci_mem_region[PCI_REGION_SRAM];
943 	region->region_base = SRAM_BASE_ADDR;
944 	region->region_size = SRAM_SIZE;
945 	region->offset_in_bar = 0;
946 	region->bar_size = CFG_BAR_SIZE;
947 	region->bar_id = SRAM_CFG_BAR_ID;
948 	region->used = 1;
949 
950 	/* DRAM */
951 	region = &hdev->pci_mem_region[PCI_REGION_DRAM];
952 	region->region_base = DRAM_PHYS_BASE;
953 	region->region_size = hdev->asic_prop.dram_size;
954 	region->offset_in_bar = 0;
955 	region->bar_size = prop->dram_pci_bar_size;
956 	region->bar_id = DDR_BAR_ID;
957 	region->used = 1;
958 }
959 
960 /*
961  * goya_sw_init - Goya software initialization code
962  *
963  * @hdev: pointer to hl_device structure
964  *
965  */
goya_sw_init(struct hl_device * hdev)966 static int goya_sw_init(struct hl_device *hdev)
967 {
968 	struct goya_device *goya;
969 	int rc;
970 
971 	/* Allocate device structure */
972 	goya = kzalloc_obj(*goya);
973 	if (!goya)
974 		return -ENOMEM;
975 
976 	/* according to goya_init_iatu */
977 	goya->ddr_bar_cur_addr = DRAM_PHYS_BASE;
978 
979 	goya->mme_clk = GOYA_PLL_FREQ_LOW;
980 	goya->tpc_clk = GOYA_PLL_FREQ_LOW;
981 	goya->ic_clk = GOYA_PLL_FREQ_LOW;
982 
983 	hdev->asic_specific = goya;
984 
985 	/* Create DMA pool for small allocations */
986 	hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
987 			&hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0);
988 	if (!hdev->dma_pool) {
989 		dev_err(hdev->dev, "failed to create DMA pool\n");
990 		rc = -ENOMEM;
991 		goto free_goya_device;
992 	}
993 
994 	hdev->cpu_accessible_dma_mem = hl_asic_dma_alloc_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE,
995 							&hdev->cpu_accessible_dma_address,
996 							GFP_KERNEL | __GFP_ZERO);
997 
998 	if (!hdev->cpu_accessible_dma_mem) {
999 		rc = -ENOMEM;
1000 		goto free_dma_pool;
1001 	}
1002 
1003 	dev_dbg(hdev->dev, "cpu accessible memory at bus address %pad\n",
1004 		&hdev->cpu_accessible_dma_address);
1005 
1006 	hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1);
1007 	if (!hdev->cpu_accessible_dma_pool) {
1008 		dev_err(hdev->dev,
1009 			"Failed to create CPU accessible DMA pool\n");
1010 		rc = -ENOMEM;
1011 		goto free_cpu_dma_mem;
1012 	}
1013 
1014 	rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
1015 				(uintptr_t) hdev->cpu_accessible_dma_mem,
1016 				HL_CPU_ACCESSIBLE_MEM_SIZE, -1);
1017 	if (rc) {
1018 		dev_err(hdev->dev,
1019 			"Failed to add memory to CPU accessible DMA pool\n");
1020 		rc = -EFAULT;
1021 		goto free_cpu_accessible_dma_pool;
1022 	}
1023 
1024 	spin_lock_init(&goya->hw_queues_lock);
1025 	hdev->supports_coresight = true;
1026 	hdev->asic_prop.supports_compute_reset = true;
1027 	hdev->asic_prop.allow_inference_soft_reset = true;
1028 	hdev->supports_wait_for_multi_cs = false;
1029 	hdev->supports_ctx_switch = true;
1030 
1031 	hdev->asic_funcs->set_pci_memory_regions(hdev);
1032 
1033 	goya->goya_work = kmalloc_obj(struct goya_work_freq);
1034 	if (!goya->goya_work) {
1035 		rc = -ENOMEM;
1036 		goto free_cpu_accessible_dma_pool;
1037 	}
1038 
1039 	goya->goya_work->hdev = hdev;
1040 	INIT_DELAYED_WORK(&goya->goya_work->work_freq, goya_set_freq_to_low_job);
1041 
1042 	return 0;
1043 
1044 free_cpu_accessible_dma_pool:
1045 	gen_pool_destroy(hdev->cpu_accessible_dma_pool);
1046 free_cpu_dma_mem:
1047 	hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem,
1048 					hdev->cpu_accessible_dma_address);
1049 free_dma_pool:
1050 	dma_pool_destroy(hdev->dma_pool);
1051 free_goya_device:
1052 	kfree(goya);
1053 
1054 	return rc;
1055 }
1056 
1057 /*
1058  * goya_sw_fini - Goya software tear-down code
1059  *
1060  * @hdev: pointer to hl_device structure
1061  *
1062  */
goya_sw_fini(struct hl_device * hdev)1063 static int goya_sw_fini(struct hl_device *hdev)
1064 {
1065 	struct goya_device *goya = hdev->asic_specific;
1066 
1067 	gen_pool_destroy(hdev->cpu_accessible_dma_pool);
1068 
1069 	hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem,
1070 					hdev->cpu_accessible_dma_address);
1071 
1072 	dma_pool_destroy(hdev->dma_pool);
1073 
1074 	kfree(goya->goya_work);
1075 	kfree(goya);
1076 
1077 	return 0;
1078 }
1079 
goya_init_dma_qman(struct hl_device * hdev,int dma_id,dma_addr_t bus_address)1080 static void goya_init_dma_qman(struct hl_device *hdev, int dma_id,
1081 		dma_addr_t bus_address)
1082 {
1083 	struct goya_device *goya = hdev->asic_specific;
1084 	u32 mtr_base_lo, mtr_base_hi;
1085 	u32 so_base_lo, so_base_hi;
1086 	u32 gic_base_lo, gic_base_hi;
1087 	u32 reg_off = dma_id * (mmDMA_QM_1_PQ_PI - mmDMA_QM_0_PQ_PI);
1088 	u32 dma_err_cfg = QMAN_DMA_ERR_MSG_EN;
1089 
1090 	mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1091 	mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1092 	so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1093 	so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1094 
1095 	gic_base_lo =
1096 		lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1097 	gic_base_hi =
1098 		upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1099 
1100 	WREG32(mmDMA_QM_0_PQ_BASE_LO + reg_off, lower_32_bits(bus_address));
1101 	WREG32(mmDMA_QM_0_PQ_BASE_HI + reg_off, upper_32_bits(bus_address));
1102 
1103 	WREG32(mmDMA_QM_0_PQ_SIZE + reg_off, ilog2(HL_QUEUE_LENGTH));
1104 	WREG32(mmDMA_QM_0_PQ_PI + reg_off, 0);
1105 	WREG32(mmDMA_QM_0_PQ_CI + reg_off, 0);
1106 
1107 	WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1108 	WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1109 	WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1110 	WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1111 	WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1112 	WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1113 	WREG32(mmDMA_QM_0_GLBL_ERR_WDATA + reg_off,
1114 			GOYA_ASYNC_EVENT_ID_DMA0_QM + dma_id);
1115 
1116 	/* PQ has buffer of 2 cache lines, while CQ has 8 lines */
1117 	WREG32(mmDMA_QM_0_PQ_CFG1 + reg_off, 0x00020002);
1118 	WREG32(mmDMA_QM_0_CQ_CFG1 + reg_off, 0x00080008);
1119 
1120 	if (goya->hw_cap_initialized & HW_CAP_MMU)
1121 		WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_PARTLY_TRUSTED);
1122 	else
1123 		WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_FULLY_TRUSTED);
1124 
1125 	if (hdev->stop_on_err)
1126 		dma_err_cfg |= 1 << DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT;
1127 
1128 	WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, dma_err_cfg);
1129 	WREG32(mmDMA_QM_0_GLBL_CFG0 + reg_off, QMAN_DMA_ENABLE);
1130 }
1131 
goya_init_dma_ch(struct hl_device * hdev,int dma_id)1132 static void goya_init_dma_ch(struct hl_device *hdev, int dma_id)
1133 {
1134 	u32 gic_base_lo, gic_base_hi;
1135 	u64 sob_addr;
1136 	u32 reg_off = dma_id * (mmDMA_CH_1_CFG1 - mmDMA_CH_0_CFG1);
1137 
1138 	gic_base_lo =
1139 		lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1140 	gic_base_hi =
1141 		upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1142 
1143 	WREG32(mmDMA_CH_0_ERRMSG_ADDR_LO + reg_off, gic_base_lo);
1144 	WREG32(mmDMA_CH_0_ERRMSG_ADDR_HI + reg_off, gic_base_hi);
1145 	WREG32(mmDMA_CH_0_ERRMSG_WDATA + reg_off,
1146 			GOYA_ASYNC_EVENT_ID_DMA0_CH + dma_id);
1147 
1148 	if (dma_id)
1149 		sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
1150 				(dma_id - 1) * 4;
1151 	else
1152 		sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
1153 
1154 	WREG32(mmDMA_CH_0_WR_COMP_ADDR_HI + reg_off, upper_32_bits(sob_addr));
1155 	WREG32(mmDMA_CH_0_WR_COMP_WDATA + reg_off, 0x80000001);
1156 }
1157 
1158 /*
1159  * goya_init_dma_qmans - Initialize QMAN DMA registers
1160  *
1161  * @hdev: pointer to hl_device structure
1162  *
1163  * Initialize the H/W registers of the QMAN DMA channels
1164  *
1165  */
goya_init_dma_qmans(struct hl_device * hdev)1166 void goya_init_dma_qmans(struct hl_device *hdev)
1167 {
1168 	struct goya_device *goya = hdev->asic_specific;
1169 	struct hl_hw_queue *q;
1170 	int i;
1171 
1172 	if (goya->hw_cap_initialized & HW_CAP_DMA)
1173 		return;
1174 
1175 	q = &hdev->kernel_queues[0];
1176 
1177 	for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) {
1178 		q->cq_id = q->msi_vec = i;
1179 		goya_init_dma_qman(hdev, i, q->bus_address);
1180 		goya_init_dma_ch(hdev, i);
1181 	}
1182 
1183 	goya->hw_cap_initialized |= HW_CAP_DMA;
1184 }
1185 
1186 /*
1187  * goya_disable_external_queues - Disable external queues
1188  *
1189  * @hdev: pointer to hl_device structure
1190  *
1191  */
goya_disable_external_queues(struct hl_device * hdev)1192 static void goya_disable_external_queues(struct hl_device *hdev)
1193 {
1194 	struct goya_device *goya = hdev->asic_specific;
1195 
1196 	if (!(goya->hw_cap_initialized & HW_CAP_DMA))
1197 		return;
1198 
1199 	WREG32(mmDMA_QM_0_GLBL_CFG0, 0);
1200 	WREG32(mmDMA_QM_1_GLBL_CFG0, 0);
1201 	WREG32(mmDMA_QM_2_GLBL_CFG0, 0);
1202 	WREG32(mmDMA_QM_3_GLBL_CFG0, 0);
1203 	WREG32(mmDMA_QM_4_GLBL_CFG0, 0);
1204 }
1205 
goya_stop_queue(struct hl_device * hdev,u32 cfg_reg,u32 cp_sts_reg,u32 glbl_sts0_reg)1206 static int goya_stop_queue(struct hl_device *hdev, u32 cfg_reg,
1207 				u32 cp_sts_reg, u32 glbl_sts0_reg)
1208 {
1209 	int rc;
1210 	u32 status;
1211 
1212 	/* use the values of TPC0 as they are all the same*/
1213 
1214 	WREG32(cfg_reg, 1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
1215 
1216 	status = RREG32(cp_sts_reg);
1217 	if (status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK) {
1218 		rc = hl_poll_timeout(
1219 			hdev,
1220 			cp_sts_reg,
1221 			status,
1222 			!(status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK),
1223 			1000,
1224 			QMAN_FENCE_TIMEOUT_USEC);
1225 
1226 		/* if QMAN is stuck in fence no need to check for stop */
1227 		if (rc)
1228 			return 0;
1229 	}
1230 
1231 	rc = hl_poll_timeout(
1232 		hdev,
1233 		glbl_sts0_reg,
1234 		status,
1235 		(status & TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK),
1236 		1000,
1237 		QMAN_STOP_TIMEOUT_USEC);
1238 
1239 	if (rc) {
1240 		dev_err(hdev->dev,
1241 			"Timeout while waiting for QMAN to stop\n");
1242 		return -EINVAL;
1243 	}
1244 
1245 	return 0;
1246 }
1247 
1248 /*
1249  * goya_stop_external_queues - Stop external queues
1250  *
1251  * @hdev: pointer to hl_device structure
1252  *
1253  * Returns 0 on success
1254  *
1255  */
goya_stop_external_queues(struct hl_device * hdev)1256 static int goya_stop_external_queues(struct hl_device *hdev)
1257 {
1258 	int rc, retval = 0;
1259 
1260 	struct goya_device *goya = hdev->asic_specific;
1261 
1262 	if (!(goya->hw_cap_initialized & HW_CAP_DMA))
1263 		return retval;
1264 
1265 	rc = goya_stop_queue(hdev,
1266 			mmDMA_QM_0_GLBL_CFG1,
1267 			mmDMA_QM_0_CP_STS,
1268 			mmDMA_QM_0_GLBL_STS0);
1269 
1270 	if (rc) {
1271 		dev_err(hdev->dev, "failed to stop DMA QMAN 0\n");
1272 		retval = -EIO;
1273 	}
1274 
1275 	rc = goya_stop_queue(hdev,
1276 			mmDMA_QM_1_GLBL_CFG1,
1277 			mmDMA_QM_1_CP_STS,
1278 			mmDMA_QM_1_GLBL_STS0);
1279 
1280 	if (rc) {
1281 		dev_err(hdev->dev, "failed to stop DMA QMAN 1\n");
1282 		retval = -EIO;
1283 	}
1284 
1285 	rc = goya_stop_queue(hdev,
1286 			mmDMA_QM_2_GLBL_CFG1,
1287 			mmDMA_QM_2_CP_STS,
1288 			mmDMA_QM_2_GLBL_STS0);
1289 
1290 	if (rc) {
1291 		dev_err(hdev->dev, "failed to stop DMA QMAN 2\n");
1292 		retval = -EIO;
1293 	}
1294 
1295 	rc = goya_stop_queue(hdev,
1296 			mmDMA_QM_3_GLBL_CFG1,
1297 			mmDMA_QM_3_CP_STS,
1298 			mmDMA_QM_3_GLBL_STS0);
1299 
1300 	if (rc) {
1301 		dev_err(hdev->dev, "failed to stop DMA QMAN 3\n");
1302 		retval = -EIO;
1303 	}
1304 
1305 	rc = goya_stop_queue(hdev,
1306 			mmDMA_QM_4_GLBL_CFG1,
1307 			mmDMA_QM_4_CP_STS,
1308 			mmDMA_QM_4_GLBL_STS0);
1309 
1310 	if (rc) {
1311 		dev_err(hdev->dev, "failed to stop DMA QMAN 4\n");
1312 		retval = -EIO;
1313 	}
1314 
1315 	return retval;
1316 }
1317 
1318 /*
1319  * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
1320  *
1321  * @hdev: pointer to hl_device structure
1322  *
1323  * Returns 0 on success
1324  *
1325  */
goya_init_cpu_queues(struct hl_device * hdev)1326 int goya_init_cpu_queues(struct hl_device *hdev)
1327 {
1328 	struct goya_device *goya = hdev->asic_specific;
1329 	struct asic_fixed_properties *prop = &hdev->asic_prop;
1330 	struct hl_eq *eq;
1331 	u32 status;
1332 	struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
1333 	int err;
1334 
1335 	if (!hdev->cpu_queues_enable)
1336 		return 0;
1337 
1338 	if (goya->hw_cap_initialized & HW_CAP_CPU_Q)
1339 		return 0;
1340 
1341 	eq = &hdev->event_queue;
1342 
1343 	WREG32(mmCPU_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address));
1344 	WREG32(mmCPU_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address));
1345 
1346 	WREG32(mmCPU_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address));
1347 	WREG32(mmCPU_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address));
1348 
1349 	WREG32(mmCPU_CQ_BASE_ADDR_LOW,
1350 			lower_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
1351 	WREG32(mmCPU_CQ_BASE_ADDR_HIGH,
1352 			upper_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
1353 
1354 	WREG32(mmCPU_PQ_LENGTH, HL_QUEUE_SIZE_IN_BYTES);
1355 	WREG32(mmCPU_EQ_LENGTH, HL_EQ_SIZE_IN_BYTES);
1356 	WREG32(mmCPU_CQ_LENGTH, HL_CPU_ACCESSIBLE_MEM_SIZE);
1357 
1358 	/* Used for EQ CI */
1359 	WREG32(mmCPU_EQ_CI, 0);
1360 
1361 	WREG32(mmCPU_IF_PF_PQ_PI, 0);
1362 
1363 	WREG32(mmCPU_PQ_INIT_STATUS, PQ_INIT_STATUS_READY_FOR_CP);
1364 
1365 	WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
1366 			GOYA_ASYNC_EVENT_ID_PI_UPDATE);
1367 
1368 	err = hl_poll_timeout(
1369 		hdev,
1370 		mmCPU_PQ_INIT_STATUS,
1371 		status,
1372 		(status == PQ_INIT_STATUS_READY_FOR_HOST),
1373 		1000,
1374 		GOYA_CPU_TIMEOUT_USEC);
1375 
1376 	if (err) {
1377 		dev_err(hdev->dev,
1378 			"Failed to setup communication with device CPU\n");
1379 		return -EIO;
1380 	}
1381 
1382 	/* update FW application security bits */
1383 	if (prop->fw_cpu_boot_dev_sts0_valid)
1384 		prop->fw_app_cpu_boot_dev_sts0 = RREG32(mmCPU_BOOT_DEV_STS0);
1385 
1386 	if (prop->fw_cpu_boot_dev_sts1_valid)
1387 		prop->fw_app_cpu_boot_dev_sts1 = RREG32(mmCPU_BOOT_DEV_STS1);
1388 
1389 	goya->hw_cap_initialized |= HW_CAP_CPU_Q;
1390 	return 0;
1391 }
1392 
goya_set_pll_refclk(struct hl_device * hdev)1393 static void goya_set_pll_refclk(struct hl_device *hdev)
1394 {
1395 	WREG32(mmCPU_PLL_DIV_SEL_0, 0x0);
1396 	WREG32(mmCPU_PLL_DIV_SEL_1, 0x0);
1397 	WREG32(mmCPU_PLL_DIV_SEL_2, 0x0);
1398 	WREG32(mmCPU_PLL_DIV_SEL_3, 0x0);
1399 
1400 	WREG32(mmIC_PLL_DIV_SEL_0, 0x0);
1401 	WREG32(mmIC_PLL_DIV_SEL_1, 0x0);
1402 	WREG32(mmIC_PLL_DIV_SEL_2, 0x0);
1403 	WREG32(mmIC_PLL_DIV_SEL_3, 0x0);
1404 
1405 	WREG32(mmMC_PLL_DIV_SEL_0, 0x0);
1406 	WREG32(mmMC_PLL_DIV_SEL_1, 0x0);
1407 	WREG32(mmMC_PLL_DIV_SEL_2, 0x0);
1408 	WREG32(mmMC_PLL_DIV_SEL_3, 0x0);
1409 
1410 	WREG32(mmPSOC_MME_PLL_DIV_SEL_0, 0x0);
1411 	WREG32(mmPSOC_MME_PLL_DIV_SEL_1, 0x0);
1412 	WREG32(mmPSOC_MME_PLL_DIV_SEL_2, 0x0);
1413 	WREG32(mmPSOC_MME_PLL_DIV_SEL_3, 0x0);
1414 
1415 	WREG32(mmPSOC_PCI_PLL_DIV_SEL_0, 0x0);
1416 	WREG32(mmPSOC_PCI_PLL_DIV_SEL_1, 0x0);
1417 	WREG32(mmPSOC_PCI_PLL_DIV_SEL_2, 0x0);
1418 	WREG32(mmPSOC_PCI_PLL_DIV_SEL_3, 0x0);
1419 
1420 	WREG32(mmPSOC_EMMC_PLL_DIV_SEL_0, 0x0);
1421 	WREG32(mmPSOC_EMMC_PLL_DIV_SEL_1, 0x0);
1422 	WREG32(mmPSOC_EMMC_PLL_DIV_SEL_2, 0x0);
1423 	WREG32(mmPSOC_EMMC_PLL_DIV_SEL_3, 0x0);
1424 
1425 	WREG32(mmTPC_PLL_DIV_SEL_0, 0x0);
1426 	WREG32(mmTPC_PLL_DIV_SEL_1, 0x0);
1427 	WREG32(mmTPC_PLL_DIV_SEL_2, 0x0);
1428 	WREG32(mmTPC_PLL_DIV_SEL_3, 0x0);
1429 }
1430 
goya_disable_clk_rlx(struct hl_device * hdev)1431 static void goya_disable_clk_rlx(struct hl_device *hdev)
1432 {
1433 	WREG32(mmPSOC_MME_PLL_CLK_RLX_0, 0x100010);
1434 	WREG32(mmIC_PLL_CLK_RLX_0, 0x100010);
1435 }
1436 
_goya_tpc_mbist_workaround(struct hl_device * hdev,u8 tpc_id)1437 static void _goya_tpc_mbist_workaround(struct hl_device *hdev, u8 tpc_id)
1438 {
1439 	u64 tpc_eml_address;
1440 	u32 val, tpc_offset, tpc_eml_offset, tpc_slm_offset;
1441 	int err, slm_index;
1442 
1443 	tpc_offset = tpc_id * 0x40000;
1444 	tpc_eml_offset = tpc_id * 0x200000;
1445 	tpc_eml_address = (mmTPC0_EML_CFG_BASE + tpc_eml_offset - CFG_BASE);
1446 	tpc_slm_offset = tpc_eml_address + 0x100000;
1447 
1448 	/*
1449 	 * Workaround for Bug H2 #2443 :
1450 	 * "TPC SB is not initialized on chip reset"
1451 	 */
1452 
1453 	val = RREG32(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset);
1454 	if (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK)
1455 		dev_warn(hdev->dev, "TPC%d MBIST ACTIVE is not cleared\n",
1456 			tpc_id);
1457 
1458 	WREG32(mmTPC0_CFG_FUNC_MBIST_PAT + tpc_offset, val & 0xFFFFF000);
1459 
1460 	WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_0 + tpc_offset, 0x37FF);
1461 	WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_1 + tpc_offset, 0x303F);
1462 	WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_2 + tpc_offset, 0x71FF);
1463 	WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_3 + tpc_offset, 0x71FF);
1464 	WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_4 + tpc_offset, 0x70FF);
1465 	WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_5 + tpc_offset, 0x70FF);
1466 	WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_6 + tpc_offset, 0x70FF);
1467 	WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_7 + tpc_offset, 0x70FF);
1468 	WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_8 + tpc_offset, 0x70FF);
1469 	WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_9 + tpc_offset, 0x70FF);
1470 
1471 	WREG32_OR(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1472 		1 << TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT);
1473 
1474 	err = hl_poll_timeout(
1475 		hdev,
1476 		mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1477 		val,
1478 		(val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK),
1479 		1000,
1480 		HL_DEVICE_TIMEOUT_USEC);
1481 
1482 	if (err)
1483 		dev_err(hdev->dev,
1484 			"Timeout while waiting for TPC%d MBIST DONE\n", tpc_id);
1485 
1486 	WREG32_OR(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1487 		1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT);
1488 
1489 	msleep(GOYA_RESET_WAIT_MSEC);
1490 
1491 	WREG32_AND(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1492 		~(1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT));
1493 
1494 	msleep(GOYA_RESET_WAIT_MSEC);
1495 
1496 	for (slm_index = 0 ; slm_index < 256 ; slm_index++)
1497 		WREG32(tpc_slm_offset + (slm_index << 2), 0);
1498 
1499 	val = RREG32(tpc_slm_offset);
1500 }
1501 
goya_tpc_mbist_workaround(struct hl_device * hdev)1502 static void goya_tpc_mbist_workaround(struct hl_device *hdev)
1503 {
1504 	struct goya_device *goya = hdev->asic_specific;
1505 	int i;
1506 
1507 	if (hdev->pldm)
1508 		return;
1509 
1510 	if (goya->hw_cap_initialized & HW_CAP_TPC_MBIST)
1511 		return;
1512 
1513 	/* Workaround for H2 #2443 */
1514 
1515 	for (i = 0 ; i < TPC_MAX_NUM ; i++)
1516 		_goya_tpc_mbist_workaround(hdev, i);
1517 
1518 	goya->hw_cap_initialized |= HW_CAP_TPC_MBIST;
1519 }
1520 
1521 /*
1522  * goya_init_golden_registers - Initialize golden registers
1523  *
1524  * @hdev: pointer to hl_device structure
1525  *
1526  * Initialize the H/W registers of the device
1527  *
1528  */
goya_init_golden_registers(struct hl_device * hdev)1529 static void goya_init_golden_registers(struct hl_device *hdev)
1530 {
1531 	struct goya_device *goya = hdev->asic_specific;
1532 	u32 polynom[10], tpc_intr_mask, offset;
1533 	int i;
1534 
1535 	if (goya->hw_cap_initialized & HW_CAP_GOLDEN)
1536 		return;
1537 
1538 	polynom[0] = 0x00020080;
1539 	polynom[1] = 0x00401000;
1540 	polynom[2] = 0x00200800;
1541 	polynom[3] = 0x00002000;
1542 	polynom[4] = 0x00080200;
1543 	polynom[5] = 0x00040100;
1544 	polynom[6] = 0x00100400;
1545 	polynom[7] = 0x00004000;
1546 	polynom[8] = 0x00010000;
1547 	polynom[9] = 0x00008000;
1548 
1549 	/* Mask all arithmetic interrupts from TPC */
1550 	tpc_intr_mask = 0x7FFF;
1551 
1552 	for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x20000) {
1553 		WREG32(mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1554 		WREG32(mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1555 		WREG32(mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1556 		WREG32(mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1557 		WREG32(mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1558 
1559 		WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB + offset, 0x204);
1560 		WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB + offset, 0x204);
1561 		WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB + offset, 0x204);
1562 		WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB + offset, 0x204);
1563 		WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB + offset, 0x204);
1564 
1565 
1566 		WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB + offset, 0x206);
1567 		WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB + offset, 0x206);
1568 		WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB + offset, 0x206);
1569 		WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB + offset, 0x207);
1570 		WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB + offset, 0x207);
1571 
1572 		WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB + offset, 0x207);
1573 		WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB + offset, 0x207);
1574 		WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB + offset, 0x206);
1575 		WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB + offset, 0x206);
1576 		WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB + offset, 0x206);
1577 
1578 		WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB + offset, 0x101);
1579 		WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB + offset, 0x102);
1580 		WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB + offset, 0x103);
1581 		WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB + offset, 0x104);
1582 		WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB + offset, 0x105);
1583 
1584 		WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB + offset, 0x105);
1585 		WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB + offset, 0x104);
1586 		WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB + offset, 0x103);
1587 		WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB + offset, 0x102);
1588 		WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB + offset, 0x101);
1589 	}
1590 
1591 	WREG32(mmMME_STORE_MAX_CREDIT, 0x21);
1592 	WREG32(mmMME_AGU, 0x0f0f0f10);
1593 	WREG32(mmMME_SEI_MASK, ~0x0);
1594 
1595 	WREG32(mmMME6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1596 	WREG32(mmMME5_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1597 	WREG32(mmMME4_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1598 	WREG32(mmMME3_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1599 	WREG32(mmMME2_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1600 	WREG32(mmMME1_RTR_HBW_RD_RQ_N_ARB, 0x07010701);
1601 	WREG32(mmMME6_RTR_HBW_RD_RQ_S_ARB, 0x04010401);
1602 	WREG32(mmMME5_RTR_HBW_RD_RQ_S_ARB, 0x04050401);
1603 	WREG32(mmMME4_RTR_HBW_RD_RQ_S_ARB, 0x03070301);
1604 	WREG32(mmMME3_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1605 	WREG32(mmMME2_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1606 	WREG32(mmMME1_RTR_HBW_RD_RQ_S_ARB, 0x01050105);
1607 	WREG32(mmMME6_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1608 	WREG32(mmMME5_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1609 	WREG32(mmMME4_RTR_HBW_RD_RQ_W_ARB, 0x01040301);
1610 	WREG32(mmMME3_RTR_HBW_RD_RQ_W_ARB, 0x01030401);
1611 	WREG32(mmMME2_RTR_HBW_RD_RQ_W_ARB, 0x01040101);
1612 	WREG32(mmMME1_RTR_HBW_RD_RQ_W_ARB, 0x01050101);
1613 	WREG32(mmMME6_RTR_HBW_WR_RQ_N_ARB, 0x02020202);
1614 	WREG32(mmMME5_RTR_HBW_WR_RQ_N_ARB, 0x01070101);
1615 	WREG32(mmMME4_RTR_HBW_WR_RQ_N_ARB, 0x02020201);
1616 	WREG32(mmMME3_RTR_HBW_WR_RQ_N_ARB, 0x07020701);
1617 	WREG32(mmMME2_RTR_HBW_WR_RQ_N_ARB, 0x01020101);
1618 	WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1619 	WREG32(mmMME6_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1620 	WREG32(mmMME5_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1621 	WREG32(mmMME4_RTR_HBW_WR_RQ_S_ARB, 0x07020701);
1622 	WREG32(mmMME3_RTR_HBW_WR_RQ_S_ARB, 0x02020201);
1623 	WREG32(mmMME2_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1624 	WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01020102);
1625 	WREG32(mmMME6_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1626 	WREG32(mmMME5_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1627 	WREG32(mmMME4_RTR_HBW_WR_RQ_W_ARB, 0x07020707);
1628 	WREG32(mmMME3_RTR_HBW_WR_RQ_W_ARB, 0x01020201);
1629 	WREG32(mmMME2_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1630 	WREG32(mmMME1_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1631 	WREG32(mmMME6_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1632 	WREG32(mmMME5_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1633 	WREG32(mmMME4_RTR_HBW_RD_RS_N_ARB, 0x01060102);
1634 	WREG32(mmMME3_RTR_HBW_RD_RS_N_ARB, 0x01040102);
1635 	WREG32(mmMME2_RTR_HBW_RD_RS_N_ARB, 0x01020102);
1636 	WREG32(mmMME1_RTR_HBW_RD_RS_N_ARB, 0x01020107);
1637 	WREG32(mmMME6_RTR_HBW_RD_RS_S_ARB, 0x01020106);
1638 	WREG32(mmMME5_RTR_HBW_RD_RS_S_ARB, 0x01020102);
1639 	WREG32(mmMME4_RTR_HBW_RD_RS_S_ARB, 0x01040102);
1640 	WREG32(mmMME3_RTR_HBW_RD_RS_S_ARB, 0x01060102);
1641 	WREG32(mmMME2_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1642 	WREG32(mmMME1_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1643 	WREG32(mmMME6_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1644 	WREG32(mmMME5_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1645 	WREG32(mmMME4_RTR_HBW_RD_RS_E_ARB, 0x01040602);
1646 	WREG32(mmMME3_RTR_HBW_RD_RS_E_ARB, 0x01060402);
1647 	WREG32(mmMME2_RTR_HBW_RD_RS_E_ARB, 0x01070202);
1648 	WREG32(mmMME1_RTR_HBW_RD_RS_E_ARB, 0x01070102);
1649 	WREG32(mmMME6_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1650 	WREG32(mmMME5_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1651 	WREG32(mmMME4_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1652 	WREG32(mmMME3_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1653 	WREG32(mmMME2_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1654 	WREG32(mmMME1_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1655 	WREG32(mmMME6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1656 	WREG32(mmMME5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1657 	WREG32(mmMME4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1658 	WREG32(mmMME3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1659 	WREG32(mmMME2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1660 	WREG32(mmMME1_RTR_HBW_WR_RS_N_ARB, 0x01010107);
1661 	WREG32(mmMME6_RTR_HBW_WR_RS_S_ARB, 0x01010107);
1662 	WREG32(mmMME5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1663 	WREG32(mmMME4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1664 	WREG32(mmMME3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1665 	WREG32(mmMME2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1666 	WREG32(mmMME1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1667 	WREG32(mmMME6_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1668 	WREG32(mmMME5_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1669 	WREG32(mmMME4_RTR_HBW_WR_RS_E_ARB, 0x01040301);
1670 	WREG32(mmMME3_RTR_HBW_WR_RS_E_ARB, 0x01030401);
1671 	WREG32(mmMME2_RTR_HBW_WR_RS_E_ARB, 0x01040101);
1672 	WREG32(mmMME1_RTR_HBW_WR_RS_E_ARB, 0x01050101);
1673 	WREG32(mmMME6_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1674 	WREG32(mmMME5_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1675 	WREG32(mmMME4_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1676 	WREG32(mmMME3_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1677 	WREG32(mmMME2_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1678 	WREG32(mmMME1_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1679 
1680 	WREG32(mmTPC1_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1681 	WREG32(mmTPC1_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1682 	WREG32(mmTPC1_RTR_HBW_RD_RQ_E_ARB, 0x01060101);
1683 	WREG32(mmTPC1_RTR_HBW_WR_RQ_N_ARB, 0x02020102);
1684 	WREG32(mmTPC1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1685 	WREG32(mmTPC1_RTR_HBW_WR_RQ_E_ARB, 0x02070202);
1686 	WREG32(mmTPC1_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1687 	WREG32(mmTPC1_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1688 	WREG32(mmTPC1_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1689 	WREG32(mmTPC1_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1690 	WREG32(mmTPC1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1691 	WREG32(mmTPC1_RTR_HBW_WR_RS_W_ARB, 0x01050101);
1692 
1693 	WREG32(mmTPC2_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1694 	WREG32(mmTPC2_RTR_HBW_RD_RQ_S_ARB, 0x01050101);
1695 	WREG32(mmTPC2_RTR_HBW_RD_RQ_E_ARB, 0x01010201);
1696 	WREG32(mmTPC2_RTR_HBW_WR_RQ_N_ARB, 0x02040102);
1697 	WREG32(mmTPC2_RTR_HBW_WR_RQ_S_ARB, 0x01050101);
1698 	WREG32(mmTPC2_RTR_HBW_WR_RQ_E_ARB, 0x02060202);
1699 	WREG32(mmTPC2_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1700 	WREG32(mmTPC2_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1701 	WREG32(mmTPC2_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1702 	WREG32(mmTPC2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1703 	WREG32(mmTPC2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1704 	WREG32(mmTPC2_RTR_HBW_WR_RS_W_ARB, 0x01040101);
1705 
1706 	WREG32(mmTPC3_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1707 	WREG32(mmTPC3_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1708 	WREG32(mmTPC3_RTR_HBW_RD_RQ_E_ARB, 0x01040301);
1709 	WREG32(mmTPC3_RTR_HBW_WR_RQ_N_ARB, 0x02060102);
1710 	WREG32(mmTPC3_RTR_HBW_WR_RQ_S_ARB, 0x01040101);
1711 	WREG32(mmTPC3_RTR_HBW_WR_RQ_E_ARB, 0x01040301);
1712 	WREG32(mmTPC3_RTR_HBW_RD_RS_N_ARB, 0x01040201);
1713 	WREG32(mmTPC3_RTR_HBW_RD_RS_S_ARB, 0x01060201);
1714 	WREG32(mmTPC3_RTR_HBW_RD_RS_W_ARB, 0x01060402);
1715 	WREG32(mmTPC3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1716 	WREG32(mmTPC3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1717 	WREG32(mmTPC3_RTR_HBW_WR_RS_W_ARB, 0x01030401);
1718 
1719 	WREG32(mmTPC4_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1720 	WREG32(mmTPC4_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1721 	WREG32(mmTPC4_RTR_HBW_RD_RQ_E_ARB, 0x01030401);
1722 	WREG32(mmTPC4_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1723 	WREG32(mmTPC4_RTR_HBW_WR_RQ_S_ARB, 0x01030101);
1724 	WREG32(mmTPC4_RTR_HBW_WR_RQ_E_ARB, 0x02060702);
1725 	WREG32(mmTPC4_RTR_HBW_RD_RS_N_ARB, 0x01060201);
1726 	WREG32(mmTPC4_RTR_HBW_RD_RS_S_ARB, 0x01040201);
1727 	WREG32(mmTPC4_RTR_HBW_RD_RS_W_ARB, 0x01040602);
1728 	WREG32(mmTPC4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1729 	WREG32(mmTPC4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1730 	WREG32(mmTPC4_RTR_HBW_WR_RS_W_ARB, 0x01040301);
1731 
1732 	WREG32(mmTPC5_RTR_HBW_RD_RQ_N_ARB, 0x01050101);
1733 	WREG32(mmTPC5_RTR_HBW_RD_RQ_S_ARB, 0x01020101);
1734 	WREG32(mmTPC5_RTR_HBW_RD_RQ_E_ARB, 0x01200501);
1735 	WREG32(mmTPC5_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1736 	WREG32(mmTPC5_RTR_HBW_WR_RQ_S_ARB, 0x01020101);
1737 	WREG32(mmTPC5_RTR_HBW_WR_RQ_E_ARB, 0x02020602);
1738 	WREG32(mmTPC5_RTR_HBW_RD_RS_N_ARB, 0x01070201);
1739 	WREG32(mmTPC5_RTR_HBW_RD_RS_S_ARB, 0x01020201);
1740 	WREG32(mmTPC5_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1741 	WREG32(mmTPC5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1742 	WREG32(mmTPC5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1743 	WREG32(mmTPC5_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1744 
1745 	WREG32(mmTPC6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1746 	WREG32(mmTPC6_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1747 	WREG32(mmTPC6_RTR_HBW_RD_RQ_E_ARB, 0x01010601);
1748 	WREG32(mmTPC6_RTR_HBW_WR_RQ_N_ARB, 0x01010101);
1749 	WREG32(mmTPC6_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1750 	WREG32(mmTPC6_RTR_HBW_WR_RQ_E_ARB, 0x02020702);
1751 	WREG32(mmTPC6_RTR_HBW_RD_RS_N_ARB, 0x01010101);
1752 	WREG32(mmTPC6_RTR_HBW_RD_RS_S_ARB, 0x01010101);
1753 	WREG32(mmTPC6_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1754 	WREG32(mmTPC6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1755 	WREG32(mmTPC6_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1756 	WREG32(mmTPC6_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1757 
1758 	for (i = 0, offset = 0 ; i < 10 ; i++, offset += 4) {
1759 		WREG32(mmMME1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1760 		WREG32(mmMME2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1761 		WREG32(mmMME3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1762 		WREG32(mmMME4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1763 		WREG32(mmMME5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1764 		WREG32(mmMME6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1765 
1766 		WREG32(mmTPC0_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1767 		WREG32(mmTPC1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1768 		WREG32(mmTPC2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1769 		WREG32(mmTPC3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1770 		WREG32(mmTPC4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1771 		WREG32(mmTPC5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1772 		WREG32(mmTPC6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1773 		WREG32(mmTPC7_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1774 
1775 		WREG32(mmPCI_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1776 		WREG32(mmDMA_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1777 	}
1778 
1779 	for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x40000) {
1780 		WREG32(mmMME1_RTR_SCRAMB_EN + offset,
1781 				1 << MME1_RTR_SCRAMB_EN_VAL_SHIFT);
1782 		WREG32(mmMME1_RTR_NON_LIN_SCRAMB + offset,
1783 				1 << MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT);
1784 	}
1785 
1786 	for (i = 0, offset = 0 ; i < 8 ; i++, offset += 0x40000) {
1787 		/*
1788 		 * Workaround for Bug H2 #2441 :
1789 		 * "ST.NOP set trace event illegal opcode"
1790 		 */
1791 		WREG32(mmTPC0_CFG_TPC_INTR_MASK + offset, tpc_intr_mask);
1792 
1793 		WREG32(mmTPC0_NRTR_SCRAMB_EN + offset,
1794 				1 << TPC0_NRTR_SCRAMB_EN_VAL_SHIFT);
1795 		WREG32(mmTPC0_NRTR_NON_LIN_SCRAMB + offset,
1796 				1 << TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1797 
1798 		WREG32_FIELD(TPC0_CFG_MSS_CONFIG, offset,
1799 				ICACHE_FETCH_LINE_NUM, 2);
1800 	}
1801 
1802 	WREG32(mmDMA_NRTR_SCRAMB_EN, 1 << DMA_NRTR_SCRAMB_EN_VAL_SHIFT);
1803 	WREG32(mmDMA_NRTR_NON_LIN_SCRAMB,
1804 			1 << DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1805 
1806 	WREG32(mmPCI_NRTR_SCRAMB_EN, 1 << PCI_NRTR_SCRAMB_EN_VAL_SHIFT);
1807 	WREG32(mmPCI_NRTR_NON_LIN_SCRAMB,
1808 			1 << PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1809 
1810 	/*
1811 	 * Workaround for H2 #HW-23 bug
1812 	 * Set DMA max outstanding read requests to 240 on DMA CH 1.
1813 	 * This limitation is still large enough to not affect Gen4 bandwidth.
1814 	 * We need to only limit that DMA channel because the user can only read
1815 	 * from Host using DMA CH 1
1816 	 */
1817 	WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
1818 
1819 	WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
1820 
1821 	goya->hw_cap_initialized |= HW_CAP_GOLDEN;
1822 }
1823 
goya_init_mme_qman(struct hl_device * hdev)1824 static void goya_init_mme_qman(struct hl_device *hdev)
1825 {
1826 	u32 mtr_base_lo, mtr_base_hi;
1827 	u32 so_base_lo, so_base_hi;
1828 	u32 gic_base_lo, gic_base_hi;
1829 	u64 qman_base_addr;
1830 
1831 	mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1832 	mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1833 	so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1834 	so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1835 
1836 	gic_base_lo =
1837 		lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1838 	gic_base_hi =
1839 		upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1840 
1841 	qman_base_addr = hdev->asic_prop.sram_base_address +
1842 				MME_QMAN_BASE_OFFSET;
1843 
1844 	WREG32(mmMME_QM_PQ_BASE_LO, lower_32_bits(qman_base_addr));
1845 	WREG32(mmMME_QM_PQ_BASE_HI, upper_32_bits(qman_base_addr));
1846 	WREG32(mmMME_QM_PQ_SIZE, ilog2(MME_QMAN_LENGTH));
1847 	WREG32(mmMME_QM_PQ_PI, 0);
1848 	WREG32(mmMME_QM_PQ_CI, 0);
1849 	WREG32(mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET, 0x10C0);
1850 	WREG32(mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET, 0x10C4);
1851 	WREG32(mmMME_QM_CP_LDMA_TSIZE_OFFSET, 0x10C8);
1852 	WREG32(mmMME_QM_CP_LDMA_COMMIT_OFFSET, 0x10CC);
1853 
1854 	WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1855 	WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1856 	WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1857 	WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1858 
1859 	/* QMAN CQ has 8 cache lines */
1860 	WREG32(mmMME_QM_CQ_CFG1, 0x00080008);
1861 
1862 	WREG32(mmMME_QM_GLBL_ERR_ADDR_LO, gic_base_lo);
1863 	WREG32(mmMME_QM_GLBL_ERR_ADDR_HI, gic_base_hi);
1864 
1865 	WREG32(mmMME_QM_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_QM);
1866 
1867 	WREG32(mmMME_QM_GLBL_ERR_CFG, QMAN_MME_ERR_MSG_EN);
1868 
1869 	WREG32(mmMME_QM_GLBL_PROT, QMAN_MME_ERR_PROT);
1870 
1871 	WREG32(mmMME_QM_GLBL_CFG0, QMAN_MME_ENABLE);
1872 }
1873 
goya_init_mme_cmdq(struct hl_device * hdev)1874 static void goya_init_mme_cmdq(struct hl_device *hdev)
1875 {
1876 	u32 mtr_base_lo, mtr_base_hi;
1877 	u32 so_base_lo, so_base_hi;
1878 	u32 gic_base_lo, gic_base_hi;
1879 
1880 	mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1881 	mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1882 	so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1883 	so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1884 
1885 	gic_base_lo =
1886 		lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1887 	gic_base_hi =
1888 		upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1889 
1890 	WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1891 	WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1892 	WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO,	so_base_lo);
1893 	WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1894 
1895 	/* CMDQ CQ has 20 cache lines */
1896 	WREG32(mmMME_CMDQ_CQ_CFG1, 0x00140014);
1897 
1898 	WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_LO, gic_base_lo);
1899 	WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_HI, gic_base_hi);
1900 
1901 	WREG32(mmMME_CMDQ_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_CMDQ);
1902 
1903 	WREG32(mmMME_CMDQ_GLBL_ERR_CFG, CMDQ_MME_ERR_MSG_EN);
1904 
1905 	WREG32(mmMME_CMDQ_GLBL_PROT, CMDQ_MME_ERR_PROT);
1906 
1907 	WREG32(mmMME_CMDQ_GLBL_CFG0, CMDQ_MME_ENABLE);
1908 }
1909 
goya_init_mme_qmans(struct hl_device * hdev)1910 void goya_init_mme_qmans(struct hl_device *hdev)
1911 {
1912 	struct goya_device *goya = hdev->asic_specific;
1913 	u32 so_base_lo, so_base_hi;
1914 
1915 	if (goya->hw_cap_initialized & HW_CAP_MME)
1916 		return;
1917 
1918 	so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1919 	so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1920 
1921 	WREG32(mmMME_SM_BASE_ADDRESS_LOW, so_base_lo);
1922 	WREG32(mmMME_SM_BASE_ADDRESS_HIGH, so_base_hi);
1923 
1924 	goya_init_mme_qman(hdev);
1925 	goya_init_mme_cmdq(hdev);
1926 
1927 	goya->hw_cap_initialized |= HW_CAP_MME;
1928 }
1929 
goya_init_tpc_qman(struct hl_device * hdev,u32 base_off,int tpc_id)1930 static void goya_init_tpc_qman(struct hl_device *hdev, u32 base_off, int tpc_id)
1931 {
1932 	u32 mtr_base_lo, mtr_base_hi;
1933 	u32 so_base_lo, so_base_hi;
1934 	u32 gic_base_lo, gic_base_hi;
1935 	u64 qman_base_addr;
1936 	u32 reg_off = tpc_id * (mmTPC1_QM_PQ_PI - mmTPC0_QM_PQ_PI);
1937 
1938 	mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1939 	mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1940 	so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1941 	so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1942 
1943 	gic_base_lo =
1944 		lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1945 	gic_base_hi =
1946 		upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1947 
1948 	qman_base_addr = hdev->asic_prop.sram_base_address + base_off;
1949 
1950 	WREG32(mmTPC0_QM_PQ_BASE_LO + reg_off, lower_32_bits(qman_base_addr));
1951 	WREG32(mmTPC0_QM_PQ_BASE_HI + reg_off, upper_32_bits(qman_base_addr));
1952 	WREG32(mmTPC0_QM_PQ_SIZE + reg_off, ilog2(TPC_QMAN_LENGTH));
1953 	WREG32(mmTPC0_QM_PQ_PI + reg_off, 0);
1954 	WREG32(mmTPC0_QM_PQ_CI + reg_off, 0);
1955 	WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET + reg_off, 0x10C0);
1956 	WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET + reg_off, 0x10C4);
1957 	WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET + reg_off, 0x10C8);
1958 	WREG32(mmTPC0_QM_CP_LDMA_COMMIT_OFFSET + reg_off, 0x10CC);
1959 
1960 	WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1961 	WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1962 	WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1963 	WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1964 
1965 	WREG32(mmTPC0_QM_CQ_CFG1 + reg_off, 0x00080008);
1966 
1967 	WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1968 	WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1969 
1970 	WREG32(mmTPC0_QM_GLBL_ERR_WDATA + reg_off,
1971 			GOYA_ASYNC_EVENT_ID_TPC0_QM + tpc_id);
1972 
1973 	WREG32(mmTPC0_QM_GLBL_ERR_CFG + reg_off, QMAN_TPC_ERR_MSG_EN);
1974 
1975 	WREG32(mmTPC0_QM_GLBL_PROT + reg_off, QMAN_TPC_ERR_PROT);
1976 
1977 	WREG32(mmTPC0_QM_GLBL_CFG0 + reg_off, QMAN_TPC_ENABLE);
1978 }
1979 
goya_init_tpc_cmdq(struct hl_device * hdev,int tpc_id)1980 static void goya_init_tpc_cmdq(struct hl_device *hdev, int tpc_id)
1981 {
1982 	u32 mtr_base_lo, mtr_base_hi;
1983 	u32 so_base_lo, so_base_hi;
1984 	u32 gic_base_lo, gic_base_hi;
1985 	u32 reg_off = tpc_id * (mmTPC1_CMDQ_CQ_CFG1 - mmTPC0_CMDQ_CQ_CFG1);
1986 
1987 	mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1988 	mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1989 	so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1990 	so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1991 
1992 	gic_base_lo =
1993 		lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1994 	gic_base_hi =
1995 		upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1996 
1997 	WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1998 	WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1999 	WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
2000 	WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
2001 
2002 	WREG32(mmTPC0_CMDQ_CQ_CFG1 + reg_off, 0x00140014);
2003 
2004 	WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
2005 	WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
2006 
2007 	WREG32(mmTPC0_CMDQ_GLBL_ERR_WDATA + reg_off,
2008 			GOYA_ASYNC_EVENT_ID_TPC0_CMDQ + tpc_id);
2009 
2010 	WREG32(mmTPC0_CMDQ_GLBL_ERR_CFG + reg_off, CMDQ_TPC_ERR_MSG_EN);
2011 
2012 	WREG32(mmTPC0_CMDQ_GLBL_PROT + reg_off, CMDQ_TPC_ERR_PROT);
2013 
2014 	WREG32(mmTPC0_CMDQ_GLBL_CFG0 + reg_off, CMDQ_TPC_ENABLE);
2015 }
2016 
goya_init_tpc_qmans(struct hl_device * hdev)2017 void goya_init_tpc_qmans(struct hl_device *hdev)
2018 {
2019 	struct goya_device *goya = hdev->asic_specific;
2020 	u32 so_base_lo, so_base_hi;
2021 	u32 cfg_off = mmTPC1_CFG_SM_BASE_ADDRESS_LOW -
2022 			mmTPC0_CFG_SM_BASE_ADDRESS_LOW;
2023 	int i;
2024 
2025 	if (goya->hw_cap_initialized & HW_CAP_TPC)
2026 		return;
2027 
2028 	so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
2029 	so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
2030 
2031 	for (i = 0 ; i < TPC_MAX_NUM ; i++) {
2032 		WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_LOW + i * cfg_off,
2033 				so_base_lo);
2034 		WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + i * cfg_off,
2035 				so_base_hi);
2036 	}
2037 
2038 	goya_init_tpc_qman(hdev, TPC0_QMAN_BASE_OFFSET, 0);
2039 	goya_init_tpc_qman(hdev, TPC1_QMAN_BASE_OFFSET, 1);
2040 	goya_init_tpc_qman(hdev, TPC2_QMAN_BASE_OFFSET, 2);
2041 	goya_init_tpc_qman(hdev, TPC3_QMAN_BASE_OFFSET, 3);
2042 	goya_init_tpc_qman(hdev, TPC4_QMAN_BASE_OFFSET, 4);
2043 	goya_init_tpc_qman(hdev, TPC5_QMAN_BASE_OFFSET, 5);
2044 	goya_init_tpc_qman(hdev, TPC6_QMAN_BASE_OFFSET, 6);
2045 	goya_init_tpc_qman(hdev, TPC7_QMAN_BASE_OFFSET, 7);
2046 
2047 	for (i = 0 ; i < TPC_MAX_NUM ; i++)
2048 		goya_init_tpc_cmdq(hdev, i);
2049 
2050 	goya->hw_cap_initialized |= HW_CAP_TPC;
2051 }
2052 
2053 /*
2054  * goya_disable_internal_queues - Disable internal queues
2055  *
2056  * @hdev: pointer to hl_device structure
2057  *
2058  */
goya_disable_internal_queues(struct hl_device * hdev)2059 static void goya_disable_internal_queues(struct hl_device *hdev)
2060 {
2061 	struct goya_device *goya = hdev->asic_specific;
2062 
2063 	if (!(goya->hw_cap_initialized & HW_CAP_MME))
2064 		goto disable_tpc;
2065 
2066 	WREG32(mmMME_QM_GLBL_CFG0, 0);
2067 	WREG32(mmMME_CMDQ_GLBL_CFG0, 0);
2068 
2069 disable_tpc:
2070 	if (!(goya->hw_cap_initialized & HW_CAP_TPC))
2071 		return;
2072 
2073 	WREG32(mmTPC0_QM_GLBL_CFG0, 0);
2074 	WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0);
2075 
2076 	WREG32(mmTPC1_QM_GLBL_CFG0, 0);
2077 	WREG32(mmTPC1_CMDQ_GLBL_CFG0, 0);
2078 
2079 	WREG32(mmTPC2_QM_GLBL_CFG0, 0);
2080 	WREG32(mmTPC2_CMDQ_GLBL_CFG0, 0);
2081 
2082 	WREG32(mmTPC3_QM_GLBL_CFG0, 0);
2083 	WREG32(mmTPC3_CMDQ_GLBL_CFG0, 0);
2084 
2085 	WREG32(mmTPC4_QM_GLBL_CFG0, 0);
2086 	WREG32(mmTPC4_CMDQ_GLBL_CFG0, 0);
2087 
2088 	WREG32(mmTPC5_QM_GLBL_CFG0, 0);
2089 	WREG32(mmTPC5_CMDQ_GLBL_CFG0, 0);
2090 
2091 	WREG32(mmTPC6_QM_GLBL_CFG0, 0);
2092 	WREG32(mmTPC6_CMDQ_GLBL_CFG0, 0);
2093 
2094 	WREG32(mmTPC7_QM_GLBL_CFG0, 0);
2095 	WREG32(mmTPC7_CMDQ_GLBL_CFG0, 0);
2096 }
2097 
2098 /*
2099  * goya_stop_internal_queues - Stop internal queues
2100  *
2101  * @hdev: pointer to hl_device structure
2102  *
2103  * Returns 0 on success
2104  *
2105  */
goya_stop_internal_queues(struct hl_device * hdev)2106 static int goya_stop_internal_queues(struct hl_device *hdev)
2107 {
2108 	struct goya_device *goya = hdev->asic_specific;
2109 	int rc, retval = 0;
2110 
2111 	if (!(goya->hw_cap_initialized & HW_CAP_MME))
2112 		goto stop_tpc;
2113 
2114 	/*
2115 	 * Each queue (QMAN) is a separate H/W logic. That means that each
2116 	 * QMAN can be stopped independently and failure to stop one does NOT
2117 	 * mandate we should not try to stop other QMANs
2118 	 */
2119 
2120 	rc = goya_stop_queue(hdev,
2121 			mmMME_QM_GLBL_CFG1,
2122 			mmMME_QM_CP_STS,
2123 			mmMME_QM_GLBL_STS0);
2124 
2125 	if (rc) {
2126 		dev_err(hdev->dev, "failed to stop MME QMAN\n");
2127 		retval = -EIO;
2128 	}
2129 
2130 	rc = goya_stop_queue(hdev,
2131 			mmMME_CMDQ_GLBL_CFG1,
2132 			mmMME_CMDQ_CP_STS,
2133 			mmMME_CMDQ_GLBL_STS0);
2134 
2135 	if (rc) {
2136 		dev_err(hdev->dev, "failed to stop MME CMDQ\n");
2137 		retval = -EIO;
2138 	}
2139 
2140 stop_tpc:
2141 	if (!(goya->hw_cap_initialized & HW_CAP_TPC))
2142 		return retval;
2143 
2144 	rc = goya_stop_queue(hdev,
2145 			mmTPC0_QM_GLBL_CFG1,
2146 			mmTPC0_QM_CP_STS,
2147 			mmTPC0_QM_GLBL_STS0);
2148 
2149 	if (rc) {
2150 		dev_err(hdev->dev, "failed to stop TPC 0 QMAN\n");
2151 		retval = -EIO;
2152 	}
2153 
2154 	rc = goya_stop_queue(hdev,
2155 			mmTPC0_CMDQ_GLBL_CFG1,
2156 			mmTPC0_CMDQ_CP_STS,
2157 			mmTPC0_CMDQ_GLBL_STS0);
2158 
2159 	if (rc) {
2160 		dev_err(hdev->dev, "failed to stop TPC 0 CMDQ\n");
2161 		retval = -EIO;
2162 	}
2163 
2164 	rc = goya_stop_queue(hdev,
2165 			mmTPC1_QM_GLBL_CFG1,
2166 			mmTPC1_QM_CP_STS,
2167 			mmTPC1_QM_GLBL_STS0);
2168 
2169 	if (rc) {
2170 		dev_err(hdev->dev, "failed to stop TPC 1 QMAN\n");
2171 		retval = -EIO;
2172 	}
2173 
2174 	rc = goya_stop_queue(hdev,
2175 			mmTPC1_CMDQ_GLBL_CFG1,
2176 			mmTPC1_CMDQ_CP_STS,
2177 			mmTPC1_CMDQ_GLBL_STS0);
2178 
2179 	if (rc) {
2180 		dev_err(hdev->dev, "failed to stop TPC 1 CMDQ\n");
2181 		retval = -EIO;
2182 	}
2183 
2184 	rc = goya_stop_queue(hdev,
2185 			mmTPC2_QM_GLBL_CFG1,
2186 			mmTPC2_QM_CP_STS,
2187 			mmTPC2_QM_GLBL_STS0);
2188 
2189 	if (rc) {
2190 		dev_err(hdev->dev, "failed to stop TPC 2 QMAN\n");
2191 		retval = -EIO;
2192 	}
2193 
2194 	rc = goya_stop_queue(hdev,
2195 			mmTPC2_CMDQ_GLBL_CFG1,
2196 			mmTPC2_CMDQ_CP_STS,
2197 			mmTPC2_CMDQ_GLBL_STS0);
2198 
2199 	if (rc) {
2200 		dev_err(hdev->dev, "failed to stop TPC 2 CMDQ\n");
2201 		retval = -EIO;
2202 	}
2203 
2204 	rc = goya_stop_queue(hdev,
2205 			mmTPC3_QM_GLBL_CFG1,
2206 			mmTPC3_QM_CP_STS,
2207 			mmTPC3_QM_GLBL_STS0);
2208 
2209 	if (rc) {
2210 		dev_err(hdev->dev, "failed to stop TPC 3 QMAN\n");
2211 		retval = -EIO;
2212 	}
2213 
2214 	rc = goya_stop_queue(hdev,
2215 			mmTPC3_CMDQ_GLBL_CFG1,
2216 			mmTPC3_CMDQ_CP_STS,
2217 			mmTPC3_CMDQ_GLBL_STS0);
2218 
2219 	if (rc) {
2220 		dev_err(hdev->dev, "failed to stop TPC 3 CMDQ\n");
2221 		retval = -EIO;
2222 	}
2223 
2224 	rc = goya_stop_queue(hdev,
2225 			mmTPC4_QM_GLBL_CFG1,
2226 			mmTPC4_QM_CP_STS,
2227 			mmTPC4_QM_GLBL_STS0);
2228 
2229 	if (rc) {
2230 		dev_err(hdev->dev, "failed to stop TPC 4 QMAN\n");
2231 		retval = -EIO;
2232 	}
2233 
2234 	rc = goya_stop_queue(hdev,
2235 			mmTPC4_CMDQ_GLBL_CFG1,
2236 			mmTPC4_CMDQ_CP_STS,
2237 			mmTPC4_CMDQ_GLBL_STS0);
2238 
2239 	if (rc) {
2240 		dev_err(hdev->dev, "failed to stop TPC 4 CMDQ\n");
2241 		retval = -EIO;
2242 	}
2243 
2244 	rc = goya_stop_queue(hdev,
2245 			mmTPC5_QM_GLBL_CFG1,
2246 			mmTPC5_QM_CP_STS,
2247 			mmTPC5_QM_GLBL_STS0);
2248 
2249 	if (rc) {
2250 		dev_err(hdev->dev, "failed to stop TPC 5 QMAN\n");
2251 		retval = -EIO;
2252 	}
2253 
2254 	rc = goya_stop_queue(hdev,
2255 			mmTPC5_CMDQ_GLBL_CFG1,
2256 			mmTPC5_CMDQ_CP_STS,
2257 			mmTPC5_CMDQ_GLBL_STS0);
2258 
2259 	if (rc) {
2260 		dev_err(hdev->dev, "failed to stop TPC 5 CMDQ\n");
2261 		retval = -EIO;
2262 	}
2263 
2264 	rc = goya_stop_queue(hdev,
2265 			mmTPC6_QM_GLBL_CFG1,
2266 			mmTPC6_QM_CP_STS,
2267 			mmTPC6_QM_GLBL_STS0);
2268 
2269 	if (rc) {
2270 		dev_err(hdev->dev, "failed to stop TPC 6 QMAN\n");
2271 		retval = -EIO;
2272 	}
2273 
2274 	rc = goya_stop_queue(hdev,
2275 			mmTPC6_CMDQ_GLBL_CFG1,
2276 			mmTPC6_CMDQ_CP_STS,
2277 			mmTPC6_CMDQ_GLBL_STS0);
2278 
2279 	if (rc) {
2280 		dev_err(hdev->dev, "failed to stop TPC 6 CMDQ\n");
2281 		retval = -EIO;
2282 	}
2283 
2284 	rc = goya_stop_queue(hdev,
2285 			mmTPC7_QM_GLBL_CFG1,
2286 			mmTPC7_QM_CP_STS,
2287 			mmTPC7_QM_GLBL_STS0);
2288 
2289 	if (rc) {
2290 		dev_err(hdev->dev, "failed to stop TPC 7 QMAN\n");
2291 		retval = -EIO;
2292 	}
2293 
2294 	rc = goya_stop_queue(hdev,
2295 			mmTPC7_CMDQ_GLBL_CFG1,
2296 			mmTPC7_CMDQ_CP_STS,
2297 			mmTPC7_CMDQ_GLBL_STS0);
2298 
2299 	if (rc) {
2300 		dev_err(hdev->dev, "failed to stop TPC 7 CMDQ\n");
2301 		retval = -EIO;
2302 	}
2303 
2304 	return retval;
2305 }
2306 
goya_dma_stall(struct hl_device * hdev)2307 static void goya_dma_stall(struct hl_device *hdev)
2308 {
2309 	struct goya_device *goya = hdev->asic_specific;
2310 
2311 	if (!(goya->hw_cap_initialized & HW_CAP_DMA))
2312 		return;
2313 
2314 	WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
2315 	WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT);
2316 	WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT);
2317 	WREG32(mmDMA_QM_3_GLBL_CFG1, 1 << DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT);
2318 	WREG32(mmDMA_QM_4_GLBL_CFG1, 1 << DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT);
2319 }
2320 
goya_tpc_stall(struct hl_device * hdev)2321 static void goya_tpc_stall(struct hl_device *hdev)
2322 {
2323 	struct goya_device *goya = hdev->asic_specific;
2324 
2325 	if (!(goya->hw_cap_initialized & HW_CAP_TPC))
2326 		return;
2327 
2328 	WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
2329 	WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT);
2330 	WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT);
2331 	WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC3_CFG_TPC_STALL_V_SHIFT);
2332 	WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC4_CFG_TPC_STALL_V_SHIFT);
2333 	WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC5_CFG_TPC_STALL_V_SHIFT);
2334 	WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC6_CFG_TPC_STALL_V_SHIFT);
2335 	WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC7_CFG_TPC_STALL_V_SHIFT);
2336 }
2337 
goya_mme_stall(struct hl_device * hdev)2338 static void goya_mme_stall(struct hl_device *hdev)
2339 {
2340 	struct goya_device *goya = hdev->asic_specific;
2341 
2342 	if (!(goya->hw_cap_initialized & HW_CAP_MME))
2343 		return;
2344 
2345 	WREG32(mmMME_STALL, 0xFFFFFFFF);
2346 }
2347 
goya_enable_msix(struct hl_device * hdev)2348 static int goya_enable_msix(struct hl_device *hdev)
2349 {
2350 	struct goya_device *goya = hdev->asic_specific;
2351 	int cq_cnt = hdev->asic_prop.completion_queues_count;
2352 	int rc, i, irq_cnt_init, irq;
2353 
2354 	if (goya->hw_cap_initialized & HW_CAP_MSIX)
2355 		return 0;
2356 
2357 	rc = pci_alloc_irq_vectors(hdev->pdev, GOYA_MSIX_ENTRIES,
2358 				GOYA_MSIX_ENTRIES, PCI_IRQ_MSIX);
2359 	if (rc < 0) {
2360 		dev_err(hdev->dev,
2361 			"MSI-X: Failed to enable support -- %d/%d\n",
2362 			GOYA_MSIX_ENTRIES, rc);
2363 		return rc;
2364 	}
2365 
2366 	for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) {
2367 		irq = pci_irq_vector(hdev->pdev, i);
2368 		rc = request_irq(irq, hl_irq_handler_cq, 0, goya_irq_name[i],
2369 				&hdev->completion_queue[i]);
2370 		if (rc) {
2371 			dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2372 			goto free_irqs;
2373 		}
2374 	}
2375 
2376 	irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
2377 
2378 	rc = request_irq(irq, hl_irq_handler_eq, 0,
2379 			goya_irq_name[GOYA_EVENT_QUEUE_MSIX_IDX],
2380 			&hdev->event_queue);
2381 	if (rc) {
2382 		dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2383 		goto free_irqs;
2384 	}
2385 
2386 	goya->hw_cap_initialized |= HW_CAP_MSIX;
2387 	return 0;
2388 
2389 free_irqs:
2390 	for (i = 0 ; i < irq_cnt_init ; i++)
2391 		free_irq(pci_irq_vector(hdev->pdev, i),
2392 			&hdev->completion_queue[i]);
2393 
2394 	pci_free_irq_vectors(hdev->pdev);
2395 	return rc;
2396 }
2397 
goya_sync_irqs(struct hl_device * hdev)2398 static void goya_sync_irqs(struct hl_device *hdev)
2399 {
2400 	struct goya_device *goya = hdev->asic_specific;
2401 	int i;
2402 
2403 	if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2404 		return;
2405 
2406 	/* Wait for all pending IRQs to be finished */
2407 	for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
2408 		synchronize_irq(pci_irq_vector(hdev->pdev, i));
2409 
2410 	synchronize_irq(pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX));
2411 }
2412 
goya_disable_msix(struct hl_device * hdev)2413 static void goya_disable_msix(struct hl_device *hdev)
2414 {
2415 	struct goya_device *goya = hdev->asic_specific;
2416 	int i, irq;
2417 
2418 	if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2419 		return;
2420 
2421 	goya_sync_irqs(hdev);
2422 
2423 	irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
2424 	free_irq(irq, &hdev->event_queue);
2425 
2426 	for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
2427 		irq = pci_irq_vector(hdev->pdev, i);
2428 		free_irq(irq, &hdev->completion_queue[i]);
2429 	}
2430 
2431 	pci_free_irq_vectors(hdev->pdev);
2432 
2433 	goya->hw_cap_initialized &= ~HW_CAP_MSIX;
2434 }
2435 
goya_enable_timestamp(struct hl_device * hdev)2436 static void goya_enable_timestamp(struct hl_device *hdev)
2437 {
2438 	/* Disable the timestamp counter */
2439 	WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
2440 
2441 	/* Zero the lower/upper parts of the 64-bit counter */
2442 	WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0);
2443 	WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0);
2444 
2445 	/* Enable the counter */
2446 	WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1);
2447 }
2448 
goya_disable_timestamp(struct hl_device * hdev)2449 static void goya_disable_timestamp(struct hl_device *hdev)
2450 {
2451 	/* Disable the timestamp counter */
2452 	WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
2453 }
2454 
goya_halt_engines(struct hl_device * hdev,bool hard_reset,bool fw_reset)2455 static void goya_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_reset)
2456 {
2457 	u32 wait_timeout_ms;
2458 
2459 	if (hdev->pldm)
2460 		wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2461 	else
2462 		wait_timeout_ms = GOYA_RESET_WAIT_MSEC;
2463 
2464 	goya_stop_external_queues(hdev);
2465 	goya_stop_internal_queues(hdev);
2466 
2467 	msleep(wait_timeout_ms);
2468 
2469 	goya_dma_stall(hdev);
2470 	goya_tpc_stall(hdev);
2471 	goya_mme_stall(hdev);
2472 
2473 	msleep(wait_timeout_ms);
2474 
2475 	goya_disable_external_queues(hdev);
2476 	goya_disable_internal_queues(hdev);
2477 
2478 	goya_disable_timestamp(hdev);
2479 
2480 	if (hard_reset) {
2481 		goya_disable_msix(hdev);
2482 		goya_mmu_remove_device_cpu_mappings(hdev);
2483 	} else {
2484 		goya_sync_irqs(hdev);
2485 	}
2486 }
2487 
2488 /*
2489  * goya_load_firmware_to_device() - Load LINUX FW code to device.
2490  * @hdev: Pointer to hl_device structure.
2491  *
2492  * Copy LINUX fw code from firmware file to HBM BAR.
2493  *
2494  * Return: 0 on success, non-zero for failure.
2495  */
goya_load_firmware_to_device(struct hl_device * hdev)2496 static int goya_load_firmware_to_device(struct hl_device *hdev)
2497 {
2498 	void __iomem *dst;
2499 
2500 	dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
2501 
2502 	return hl_fw_load_fw_to_device(hdev, GOYA_LINUX_FW_FILE, dst, 0, 0);
2503 }
2504 
2505 /*
2506  * goya_load_boot_fit_to_device() - Load boot fit to device.
2507  * @hdev: Pointer to hl_device structure.
2508  *
2509  * Copy boot fit file to SRAM BAR.
2510  *
2511  * Return: 0 on success, non-zero for failure.
2512  */
goya_load_boot_fit_to_device(struct hl_device * hdev)2513 static int goya_load_boot_fit_to_device(struct hl_device *hdev)
2514 {
2515 	void __iomem *dst;
2516 
2517 	dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + BOOT_FIT_SRAM_OFFSET;
2518 
2519 	return hl_fw_load_fw_to_device(hdev, GOYA_BOOT_FIT_FILE, dst, 0, 0);
2520 }
2521 
goya_init_dynamic_firmware_loader(struct hl_device * hdev)2522 static void goya_init_dynamic_firmware_loader(struct hl_device *hdev)
2523 {
2524 	struct dynamic_fw_load_mgr *dynamic_loader;
2525 	struct cpu_dyn_regs *dyn_regs;
2526 
2527 	dynamic_loader = &hdev->fw_loader.dynamic_loader;
2528 
2529 	/*
2530 	 * here we update initial values for few specific dynamic regs (as
2531 	 * before reading the first descriptor from FW those value has to be
2532 	 * hard-coded) in later stages of the protocol those values will be
2533 	 * updated automatically by reading the FW descriptor so data there
2534 	 * will always be up-to-date
2535 	 */
2536 	dyn_regs = &dynamic_loader->comm_desc.cpu_dyn_regs;
2537 	dyn_regs->kmd_msg_to_cpu =
2538 				cpu_to_le32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU);
2539 	dyn_regs->cpu_cmd_status_to_host =
2540 				cpu_to_le32(mmCPU_CMD_STATUS_TO_HOST);
2541 
2542 	dynamic_loader->wait_for_bl_timeout = GOYA_WAIT_FOR_BL_TIMEOUT_USEC;
2543 }
2544 
goya_init_static_firmware_loader(struct hl_device * hdev)2545 static void goya_init_static_firmware_loader(struct hl_device *hdev)
2546 {
2547 	struct static_fw_load_mgr *static_loader;
2548 
2549 	static_loader = &hdev->fw_loader.static_loader;
2550 
2551 	static_loader->preboot_version_max_off = SRAM_SIZE - VERSION_MAX_LEN;
2552 	static_loader->boot_fit_version_max_off = SRAM_SIZE - VERSION_MAX_LEN;
2553 	static_loader->kmd_msg_to_cpu_reg = mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU;
2554 	static_loader->cpu_cmd_status_to_host_reg = mmCPU_CMD_STATUS_TO_HOST;
2555 	static_loader->cpu_boot_status_reg = mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS;
2556 	static_loader->cpu_boot_dev_status0_reg = mmCPU_BOOT_DEV_STS0;
2557 	static_loader->cpu_boot_dev_status1_reg = mmCPU_BOOT_DEV_STS1;
2558 	static_loader->boot_err0_reg = mmCPU_BOOT_ERR0;
2559 	static_loader->boot_err1_reg = mmCPU_BOOT_ERR1;
2560 	static_loader->preboot_version_offset_reg = mmPREBOOT_VER_OFFSET;
2561 	static_loader->boot_fit_version_offset_reg = mmUBOOT_VER_OFFSET;
2562 	static_loader->sram_offset_mask = ~(lower_32_bits(SRAM_BASE_ADDR));
2563 }
2564 
goya_init_firmware_preload_params(struct hl_device * hdev)2565 static void goya_init_firmware_preload_params(struct hl_device *hdev)
2566 {
2567 	struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
2568 
2569 	pre_fw_load->cpu_boot_status_reg = mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS;
2570 	pre_fw_load->sts_boot_dev_sts0_reg = mmCPU_BOOT_DEV_STS0;
2571 	pre_fw_load->sts_boot_dev_sts1_reg = mmCPU_BOOT_DEV_STS1;
2572 	pre_fw_load->boot_err0_reg = mmCPU_BOOT_ERR0;
2573 	pre_fw_load->boot_err1_reg = mmCPU_BOOT_ERR1;
2574 	pre_fw_load->wait_for_preboot_timeout = GOYA_BOOT_FIT_REQ_TIMEOUT_USEC;
2575 }
2576 
goya_init_firmware_loader(struct hl_device * hdev)2577 static void goya_init_firmware_loader(struct hl_device *hdev)
2578 {
2579 	struct asic_fixed_properties *prop = &hdev->asic_prop;
2580 	struct fw_load_mgr *fw_loader = &hdev->fw_loader;
2581 
2582 	/* fill common fields */
2583 	fw_loader->fw_comp_loaded = FW_TYPE_NONE;
2584 	fw_loader->boot_fit_img.image_name = GOYA_BOOT_FIT_FILE;
2585 	fw_loader->linux_img.image_name = GOYA_LINUX_FW_FILE;
2586 	fw_loader->cpu_timeout = GOYA_CPU_TIMEOUT_USEC;
2587 	fw_loader->boot_fit_timeout = GOYA_BOOT_FIT_REQ_TIMEOUT_USEC;
2588 	fw_loader->skip_bmc = false;
2589 	fw_loader->sram_bar_id = SRAM_CFG_BAR_ID;
2590 	fw_loader->dram_bar_id = DDR_BAR_ID;
2591 
2592 	if (prop->dynamic_fw_load)
2593 		goya_init_dynamic_firmware_loader(hdev);
2594 	else
2595 		goya_init_static_firmware_loader(hdev);
2596 }
2597 
goya_init_cpu(struct hl_device * hdev)2598 static int goya_init_cpu(struct hl_device *hdev)
2599 {
2600 	struct goya_device *goya = hdev->asic_specific;
2601 	int rc;
2602 
2603 	if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
2604 		return 0;
2605 
2606 	if (goya->hw_cap_initialized & HW_CAP_CPU)
2607 		return 0;
2608 
2609 	/*
2610 	 * Before pushing u-boot/linux to device, need to set the ddr bar to
2611 	 * base address of dram
2612 	 */
2613 	if (goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE) == U64_MAX) {
2614 		dev_err(hdev->dev,
2615 			"failed to map DDR bar to DRAM base address\n");
2616 		return -EIO;
2617 	}
2618 
2619 	rc = hl_fw_init_cpu(hdev);
2620 
2621 	if (rc)
2622 		return rc;
2623 
2624 	goya->hw_cap_initialized |= HW_CAP_CPU;
2625 
2626 	return 0;
2627 }
2628 
goya_mmu_update_asid_hop0_addr(struct hl_device * hdev,u32 asid,u64 phys_addr)2629 static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
2630 						u64 phys_addr)
2631 {
2632 	u32 status, timeout_usec;
2633 	int rc;
2634 
2635 	if (hdev->pldm)
2636 		timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
2637 	else
2638 		timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
2639 
2640 	WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
2641 	WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
2642 	WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
2643 
2644 	rc = hl_poll_timeout(
2645 		hdev,
2646 		MMU_ASID_BUSY,
2647 		status,
2648 		!(status & 0x80000000),
2649 		1000,
2650 		timeout_usec);
2651 
2652 	if (rc) {
2653 		dev_err(hdev->dev,
2654 			"Timeout during MMU hop0 config of asid %d\n", asid);
2655 		return rc;
2656 	}
2657 
2658 	return 0;
2659 }
2660 
goya_mmu_init(struct hl_device * hdev)2661 int goya_mmu_init(struct hl_device *hdev)
2662 {
2663 	struct asic_fixed_properties *prop = &hdev->asic_prop;
2664 	struct goya_device *goya = hdev->asic_specific;
2665 	u64 hop0_addr;
2666 	int rc, i;
2667 
2668 	if (goya->hw_cap_initialized & HW_CAP_MMU)
2669 		return 0;
2670 
2671 	hdev->dram_default_page_mapping = true;
2672 
2673 	for (i = 0 ; i < prop->max_asid ; i++) {
2674 		hop0_addr = prop->mmu_pgt_addr +
2675 				(i * prop->dmmu.hop_table_size);
2676 
2677 		rc = goya_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
2678 		if (rc) {
2679 			dev_err(hdev->dev,
2680 				"failed to set hop0 addr for asid %d\n", i);
2681 			goto err;
2682 		}
2683 	}
2684 
2685 	goya->hw_cap_initialized |= HW_CAP_MMU;
2686 
2687 	/* init MMU cache manage page */
2688 	WREG32(mmSTLB_CACHE_INV_BASE_39_8,
2689 				lower_32_bits(MMU_CACHE_MNG_ADDR >> 8));
2690 	WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
2691 
2692 	/* Remove follower feature due to performance bug */
2693 	WREG32_AND(mmSTLB_STLB_FEATURE_EN,
2694 			(~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK));
2695 
2696 	hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR | MMU_OP_PHYS_PACK);
2697 
2698 	WREG32(mmMMU_MMU_ENABLE, 1);
2699 	WREG32(mmMMU_SPI_MASK, 0xF);
2700 
2701 	return 0;
2702 
2703 err:
2704 	return rc;
2705 }
2706 
2707 /*
2708  * goya_hw_init - Goya hardware initialization code
2709  *
2710  * @hdev: pointer to hl_device structure
2711  *
2712  * Returns 0 on success
2713  *
2714  */
goya_hw_init(struct hl_device * hdev)2715 static int goya_hw_init(struct hl_device *hdev)
2716 {
2717 	struct asic_fixed_properties *prop = &hdev->asic_prop;
2718 	int rc;
2719 
2720 	/* Perform read from the device to make sure device is up */
2721 	RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2722 
2723 	/*
2724 	 * Let's mark in the H/W that we have reached this point. We check
2725 	 * this value in the reset_before_init function to understand whether
2726 	 * we need to reset the chip before doing H/W init. This register is
2727 	 * cleared by the H/W upon H/W reset
2728 	 */
2729 	WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY);
2730 
2731 	rc = goya_init_cpu(hdev);
2732 	if (rc) {
2733 		dev_err(hdev->dev, "failed to initialize CPU\n");
2734 		return rc;
2735 	}
2736 
2737 	goya_tpc_mbist_workaround(hdev);
2738 
2739 	goya_init_golden_registers(hdev);
2740 
2741 	/*
2742 	 * After CPU initialization is finished, change DDR bar mapping inside
2743 	 * iATU to point to the start address of the MMU page tables
2744 	 */
2745 	if (goya_set_ddr_bar_base(hdev, (MMU_PAGE_TABLES_ADDR &
2746 			~(prop->dram_pci_bar_size - 0x1ull))) == U64_MAX) {
2747 		dev_err(hdev->dev,
2748 			"failed to map DDR bar to MMU page tables\n");
2749 		return -EIO;
2750 	}
2751 
2752 	rc = goya_mmu_init(hdev);
2753 	if (rc)
2754 		return rc;
2755 
2756 	goya_init_security(hdev);
2757 
2758 	goya_init_dma_qmans(hdev);
2759 
2760 	goya_init_mme_qmans(hdev);
2761 
2762 	goya_init_tpc_qmans(hdev);
2763 
2764 	goya_enable_timestamp(hdev);
2765 
2766 	/* MSI-X must be enabled before CPU queues are initialized */
2767 	rc = goya_enable_msix(hdev);
2768 	if (rc)
2769 		goto disable_queues;
2770 
2771 	/* Perform read from the device to flush all MSI-X configuration */
2772 	RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2773 
2774 	return 0;
2775 
2776 disable_queues:
2777 	goya_disable_internal_queues(hdev);
2778 	goya_disable_external_queues(hdev);
2779 
2780 	return rc;
2781 }
2782 
goya_hw_fini(struct hl_device * hdev,bool hard_reset,bool fw_reset)2783 static int goya_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
2784 {
2785 	struct goya_device *goya = hdev->asic_specific;
2786 	u32 reset_timeout_ms, cpu_timeout_ms, status;
2787 
2788 	if (hdev->pldm) {
2789 		reset_timeout_ms = GOYA_PLDM_RESET_TIMEOUT_MSEC;
2790 		cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2791 	} else {
2792 		reset_timeout_ms = GOYA_RESET_TIMEOUT_MSEC;
2793 		cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC;
2794 	}
2795 
2796 	if (hard_reset) {
2797 		/* I don't know what is the state of the CPU so make sure it is
2798 		 * stopped in any means necessary
2799 		 */
2800 		WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE);
2801 		WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2802 			GOYA_ASYNC_EVENT_ID_HALT_MACHINE);
2803 
2804 		msleep(cpu_timeout_ms);
2805 
2806 		goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
2807 		goya_disable_clk_rlx(hdev);
2808 		goya_set_pll_refclk(hdev);
2809 
2810 		WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, RESET_ALL);
2811 		dev_dbg(hdev->dev,
2812 			"Issued HARD reset command, going to wait %dms\n",
2813 			reset_timeout_ms);
2814 	} else {
2815 		WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, DMA_MME_TPC_RESET);
2816 		dev_dbg(hdev->dev,
2817 			"Issued SOFT reset command, going to wait %dms\n",
2818 			reset_timeout_ms);
2819 	}
2820 
2821 	/*
2822 	 * After hard reset, we can't poll the BTM_FSM register because the PSOC
2823 	 * itself is in reset. In either reset we need to wait until the reset
2824 	 * is deasserted
2825 	 */
2826 	msleep(reset_timeout_ms);
2827 
2828 	status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
2829 	if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK) {
2830 		dev_err(hdev->dev, "Timeout while waiting for device to reset 0x%x\n", status);
2831 		return -ETIMEDOUT;
2832 	}
2833 
2834 	if (!hard_reset && goya) {
2835 		goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME |
2836 						HW_CAP_GOLDEN | HW_CAP_TPC);
2837 		WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2838 				GOYA_ASYNC_EVENT_ID_SOFT_RESET);
2839 		return 0;
2840 	}
2841 
2842 	/* Chicken bit to re-initiate boot sequencer flow */
2843 	WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START,
2844 		1 << PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT);
2845 	/* Move boot manager FSM to pre boot sequencer init state */
2846 	WREG32(mmPSOC_GLOBAL_CONF_SW_BTM_FSM,
2847 			0xA << PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT);
2848 
2849 	if (goya) {
2850 		goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
2851 				HW_CAP_DDR_0 | HW_CAP_DDR_1 |
2852 				HW_CAP_DMA | HW_CAP_MME |
2853 				HW_CAP_MMU | HW_CAP_TPC_MBIST |
2854 				HW_CAP_GOLDEN | HW_CAP_TPC);
2855 
2856 		memset(goya->events_stat, 0, sizeof(goya->events_stat));
2857 	}
2858 	return 0;
2859 }
2860 
goya_suspend(struct hl_device * hdev)2861 int goya_suspend(struct hl_device *hdev)
2862 {
2863 	return hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
2864 }
2865 
goya_resume(struct hl_device * hdev)2866 int goya_resume(struct hl_device *hdev)
2867 {
2868 	return goya_init_iatu(hdev);
2869 }
2870 
goya_mmap(struct hl_device * hdev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size)2871 static int goya_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
2872 			void *cpu_addr, dma_addr_t dma_addr, size_t size)
2873 {
2874 	int rc;
2875 
2876 	vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
2877 			VM_DONTCOPY | VM_NORESERVE);
2878 
2879 	rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
2880 				(dma_addr - HOST_PHYS_BASE), size);
2881 	if (rc)
2882 		dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
2883 
2884 	return rc;
2885 }
2886 
goya_ring_doorbell(struct hl_device * hdev,u32 hw_queue_id,u32 pi)2887 void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
2888 {
2889 	u32 db_reg_offset, db_value;
2890 
2891 	switch (hw_queue_id) {
2892 	case GOYA_QUEUE_ID_DMA_0:
2893 		db_reg_offset = mmDMA_QM_0_PQ_PI;
2894 		break;
2895 
2896 	case GOYA_QUEUE_ID_DMA_1:
2897 		db_reg_offset = mmDMA_QM_1_PQ_PI;
2898 		break;
2899 
2900 	case GOYA_QUEUE_ID_DMA_2:
2901 		db_reg_offset = mmDMA_QM_2_PQ_PI;
2902 		break;
2903 
2904 	case GOYA_QUEUE_ID_DMA_3:
2905 		db_reg_offset = mmDMA_QM_3_PQ_PI;
2906 		break;
2907 
2908 	case GOYA_QUEUE_ID_DMA_4:
2909 		db_reg_offset = mmDMA_QM_4_PQ_PI;
2910 		break;
2911 
2912 	case GOYA_QUEUE_ID_CPU_PQ:
2913 		db_reg_offset = mmCPU_IF_PF_PQ_PI;
2914 		break;
2915 
2916 	case GOYA_QUEUE_ID_MME:
2917 		db_reg_offset = mmMME_QM_PQ_PI;
2918 		break;
2919 
2920 	case GOYA_QUEUE_ID_TPC0:
2921 		db_reg_offset = mmTPC0_QM_PQ_PI;
2922 		break;
2923 
2924 	case GOYA_QUEUE_ID_TPC1:
2925 		db_reg_offset = mmTPC1_QM_PQ_PI;
2926 		break;
2927 
2928 	case GOYA_QUEUE_ID_TPC2:
2929 		db_reg_offset = mmTPC2_QM_PQ_PI;
2930 		break;
2931 
2932 	case GOYA_QUEUE_ID_TPC3:
2933 		db_reg_offset = mmTPC3_QM_PQ_PI;
2934 		break;
2935 
2936 	case GOYA_QUEUE_ID_TPC4:
2937 		db_reg_offset = mmTPC4_QM_PQ_PI;
2938 		break;
2939 
2940 	case GOYA_QUEUE_ID_TPC5:
2941 		db_reg_offset = mmTPC5_QM_PQ_PI;
2942 		break;
2943 
2944 	case GOYA_QUEUE_ID_TPC6:
2945 		db_reg_offset = mmTPC6_QM_PQ_PI;
2946 		break;
2947 
2948 	case GOYA_QUEUE_ID_TPC7:
2949 		db_reg_offset = mmTPC7_QM_PQ_PI;
2950 		break;
2951 
2952 	default:
2953 		/* Should never get here */
2954 		dev_err(hdev->dev, "H/W queue %d is invalid. Can't set pi\n",
2955 			hw_queue_id);
2956 		return;
2957 	}
2958 
2959 	db_value = pi;
2960 
2961 	/* ring the doorbell */
2962 	WREG32(db_reg_offset, db_value);
2963 
2964 	if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ) {
2965 		/* make sure device CPU will read latest data from host */
2966 		mb();
2967 		WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2968 				GOYA_ASYNC_EVENT_ID_PI_UPDATE);
2969 	}
2970 }
2971 
goya_pqe_write(struct hl_device * hdev,__le64 * pqe,struct hl_bd * bd)2972 void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd)
2973 {
2974 	/* The QMANs are on the SRAM so need to copy to IO space */
2975 	memcpy_toio((void __iomem *) pqe, bd, sizeof(struct hl_bd));
2976 }
2977 
goya_dma_alloc_coherent(struct hl_device * hdev,size_t size,dma_addr_t * dma_handle,gfp_t flags)2978 static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
2979 					dma_addr_t *dma_handle, gfp_t flags)
2980 {
2981 	void *kernel_addr = dma_alloc_coherent(&hdev->pdev->dev, size,
2982 						dma_handle, flags);
2983 
2984 	/* Shift to the device's base physical address of host memory */
2985 	if (kernel_addr)
2986 		*dma_handle += HOST_PHYS_BASE;
2987 
2988 	return kernel_addr;
2989 }
2990 
goya_dma_free_coherent(struct hl_device * hdev,size_t size,void * cpu_addr,dma_addr_t dma_handle)2991 static void goya_dma_free_coherent(struct hl_device *hdev, size_t size,
2992 					void *cpu_addr, dma_addr_t dma_handle)
2993 {
2994 	/* Cancel the device's base physical address of host memory */
2995 	dma_addr_t fixed_dma_handle = dma_handle - HOST_PHYS_BASE;
2996 
2997 	dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle);
2998 }
2999 
goya_scrub_device_mem(struct hl_device * hdev)3000 int goya_scrub_device_mem(struct hl_device *hdev)
3001 {
3002 	return 0;
3003 }
3004 
goya_get_int_queue_base(struct hl_device * hdev,u32 queue_id,dma_addr_t * dma_handle,u16 * queue_len)3005 void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
3006 				dma_addr_t *dma_handle,	u16 *queue_len)
3007 {
3008 	void *base;
3009 	u32 offset;
3010 
3011 	*dma_handle = hdev->asic_prop.sram_base_address;
3012 
3013 	base = (__force void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
3014 
3015 	switch (queue_id) {
3016 	case GOYA_QUEUE_ID_MME:
3017 		offset = MME_QMAN_BASE_OFFSET;
3018 		*queue_len = MME_QMAN_LENGTH;
3019 		break;
3020 	case GOYA_QUEUE_ID_TPC0:
3021 		offset = TPC0_QMAN_BASE_OFFSET;
3022 		*queue_len = TPC_QMAN_LENGTH;
3023 		break;
3024 	case GOYA_QUEUE_ID_TPC1:
3025 		offset = TPC1_QMAN_BASE_OFFSET;
3026 		*queue_len = TPC_QMAN_LENGTH;
3027 		break;
3028 	case GOYA_QUEUE_ID_TPC2:
3029 		offset = TPC2_QMAN_BASE_OFFSET;
3030 		*queue_len = TPC_QMAN_LENGTH;
3031 		break;
3032 	case GOYA_QUEUE_ID_TPC3:
3033 		offset = TPC3_QMAN_BASE_OFFSET;
3034 		*queue_len = TPC_QMAN_LENGTH;
3035 		break;
3036 	case GOYA_QUEUE_ID_TPC4:
3037 		offset = TPC4_QMAN_BASE_OFFSET;
3038 		*queue_len = TPC_QMAN_LENGTH;
3039 		break;
3040 	case GOYA_QUEUE_ID_TPC5:
3041 		offset = TPC5_QMAN_BASE_OFFSET;
3042 		*queue_len = TPC_QMAN_LENGTH;
3043 		break;
3044 	case GOYA_QUEUE_ID_TPC6:
3045 		offset = TPC6_QMAN_BASE_OFFSET;
3046 		*queue_len = TPC_QMAN_LENGTH;
3047 		break;
3048 	case GOYA_QUEUE_ID_TPC7:
3049 		offset = TPC7_QMAN_BASE_OFFSET;
3050 		*queue_len = TPC_QMAN_LENGTH;
3051 		break;
3052 	default:
3053 		dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id);
3054 		return NULL;
3055 	}
3056 
3057 	base += offset;
3058 	*dma_handle += offset;
3059 
3060 	return base;
3061 }
3062 
goya_send_job_on_qman0(struct hl_device * hdev,struct hl_cs_job * job)3063 static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
3064 {
3065 	struct packet_msg_prot *fence_pkt;
3066 	u32 *fence_ptr;
3067 	dma_addr_t fence_dma_addr;
3068 	struct hl_cb *cb;
3069 	u32 tmp, timeout;
3070 	int rc;
3071 
3072 	if (hdev->pldm)
3073 		timeout = GOYA_PLDM_QMAN0_TIMEOUT_USEC;
3074 	else
3075 		timeout = HL_DEVICE_TIMEOUT_USEC;
3076 
3077 	if (!hdev->asic_funcs->is_device_idle(hdev, NULL, 0, NULL)) {
3078 		dev_err_ratelimited(hdev->dev,
3079 			"Can't send driver job on QMAN0 because the device is not idle\n");
3080 		return -EBUSY;
3081 	}
3082 
3083 	fence_ptr = hl_asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, &fence_dma_addr);
3084 	if (!fence_ptr) {
3085 		dev_err(hdev->dev,
3086 			"Failed to allocate fence memory for QMAN0\n");
3087 		return -ENOMEM;
3088 	}
3089 
3090 	goya_qman0_set_security(hdev, true);
3091 
3092 	cb = job->patched_cb;
3093 
3094 	fence_pkt = cb->kernel_address +
3095 			job->job_cb_size - sizeof(struct packet_msg_prot);
3096 
3097 	tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
3098 			(1 << GOYA_PKT_CTL_EB_SHIFT) |
3099 			(1 << GOYA_PKT_CTL_MB_SHIFT);
3100 	fence_pkt->ctl = cpu_to_le32(tmp);
3101 	fence_pkt->value = cpu_to_le32(GOYA_QMAN0_FENCE_VAL);
3102 	fence_pkt->addr = cpu_to_le64(fence_dma_addr);
3103 
3104 	rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_DMA_0,
3105 					job->job_cb_size, cb->bus_address);
3106 	if (rc) {
3107 		dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc);
3108 		goto free_fence_ptr;
3109 	}
3110 
3111 	rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp,
3112 				(tmp == GOYA_QMAN0_FENCE_VAL), 1000,
3113 				timeout, true);
3114 
3115 	hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0);
3116 
3117 	if (rc == -ETIMEDOUT) {
3118 		dev_err(hdev->dev, "QMAN0 Job timeout (0x%x)\n", tmp);
3119 		goto free_fence_ptr;
3120 	}
3121 
3122 free_fence_ptr:
3123 	hl_asic_dma_pool_free(hdev, (void *) fence_ptr, fence_dma_addr);
3124 
3125 	goya_qman0_set_security(hdev, false);
3126 
3127 	return rc;
3128 }
3129 
goya_send_cpu_message(struct hl_device * hdev,u32 * msg,u16 len,u32 timeout,u64 * result)3130 int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
3131 				u32 timeout, u64 *result)
3132 {
3133 	struct goya_device *goya = hdev->asic_specific;
3134 
3135 	if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) {
3136 		if (result)
3137 			*result = 0;
3138 		return 0;
3139 	}
3140 
3141 	if (!timeout)
3142 		timeout = GOYA_MSG_TO_CPU_TIMEOUT_USEC;
3143 
3144 	return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len,
3145 					timeout, result);
3146 }
3147 
goya_test_queue(struct hl_device * hdev,u32 hw_queue_id)3148 int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
3149 {
3150 	struct packet_msg_prot *fence_pkt;
3151 	dma_addr_t pkt_dma_addr;
3152 	u32 fence_val, tmp;
3153 	dma_addr_t fence_dma_addr;
3154 	u32 *fence_ptr;
3155 	int rc;
3156 
3157 	fence_val = GOYA_QMAN0_FENCE_VAL;
3158 
3159 	fence_ptr = hl_asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, &fence_dma_addr);
3160 	if (!fence_ptr) {
3161 		dev_err(hdev->dev,
3162 			"Failed to allocate memory for H/W queue %d testing\n",
3163 			hw_queue_id);
3164 		return -ENOMEM;
3165 	}
3166 
3167 	*fence_ptr = 0;
3168 
3169 	fence_pkt = hl_asic_dma_pool_zalloc(hdev, sizeof(struct packet_msg_prot), GFP_KERNEL,
3170 						&pkt_dma_addr);
3171 	if (!fence_pkt) {
3172 		dev_err(hdev->dev,
3173 			"Failed to allocate packet for H/W queue %d testing\n",
3174 			hw_queue_id);
3175 		rc = -ENOMEM;
3176 		goto free_fence_ptr;
3177 	}
3178 
3179 	tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
3180 			(1 << GOYA_PKT_CTL_EB_SHIFT) |
3181 			(1 << GOYA_PKT_CTL_MB_SHIFT);
3182 	fence_pkt->ctl = cpu_to_le32(tmp);
3183 	fence_pkt->value = cpu_to_le32(fence_val);
3184 	fence_pkt->addr = cpu_to_le64(fence_dma_addr);
3185 
3186 	rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
3187 					sizeof(struct packet_msg_prot),
3188 					pkt_dma_addr);
3189 	if (rc) {
3190 		dev_err(hdev->dev,
3191 			"Failed to send fence packet to H/W queue %d\n",
3192 			hw_queue_id);
3193 		goto free_pkt;
3194 	}
3195 
3196 	rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, (tmp == fence_val),
3197 					1000, GOYA_TEST_QUEUE_WAIT_USEC, true);
3198 
3199 	hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
3200 
3201 	if (rc == -ETIMEDOUT) {
3202 		dev_err(hdev->dev,
3203 			"H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
3204 			hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
3205 		rc = -EIO;
3206 	}
3207 
3208 free_pkt:
3209 	hl_asic_dma_pool_free(hdev, (void *) fence_pkt, pkt_dma_addr);
3210 free_fence_ptr:
3211 	hl_asic_dma_pool_free(hdev, (void *) fence_ptr, fence_dma_addr);
3212 	return rc;
3213 }
3214 
goya_test_cpu_queue(struct hl_device * hdev)3215 int goya_test_cpu_queue(struct hl_device *hdev)
3216 {
3217 	struct goya_device *goya = hdev->asic_specific;
3218 
3219 	/*
3220 	 * check capability here as send_cpu_message() won't update the result
3221 	 * value if no capability
3222 	 */
3223 	if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
3224 		return 0;
3225 
3226 	return hl_fw_test_cpu_queue(hdev);
3227 }
3228 
goya_test_queues(struct hl_device * hdev)3229 int goya_test_queues(struct hl_device *hdev)
3230 {
3231 	int i, rc, ret_val = 0;
3232 
3233 	for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
3234 		rc = goya_test_queue(hdev, i);
3235 		if (rc)
3236 			ret_val = -EINVAL;
3237 	}
3238 
3239 	return ret_val;
3240 }
3241 
goya_dma_pool_zalloc(struct hl_device * hdev,size_t size,gfp_t mem_flags,dma_addr_t * dma_handle)3242 static void *goya_dma_pool_zalloc(struct hl_device *hdev, size_t size,
3243 					gfp_t mem_flags, dma_addr_t *dma_handle)
3244 {
3245 	void *kernel_addr;
3246 
3247 	if (size > GOYA_DMA_POOL_BLK_SIZE)
3248 		return NULL;
3249 
3250 	kernel_addr =  dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
3251 
3252 	/* Shift to the device's base physical address of host memory */
3253 	if (kernel_addr)
3254 		*dma_handle += HOST_PHYS_BASE;
3255 
3256 	return kernel_addr;
3257 }
3258 
goya_dma_pool_free(struct hl_device * hdev,void * vaddr,dma_addr_t dma_addr)3259 static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr,
3260 				dma_addr_t dma_addr)
3261 {
3262 	/* Cancel the device's base physical address of host memory */
3263 	dma_addr_t fixed_dma_addr = dma_addr - HOST_PHYS_BASE;
3264 
3265 	dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr);
3266 }
3267 
goya_cpu_accessible_dma_pool_alloc(struct hl_device * hdev,size_t size,dma_addr_t * dma_handle)3268 void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
3269 					dma_addr_t *dma_handle)
3270 {
3271 	void *vaddr;
3272 
3273 	vaddr = hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
3274 	*dma_handle = (*dma_handle) - hdev->cpu_accessible_dma_address +
3275 			VA_CPU_ACCESSIBLE_MEM_ADDR;
3276 
3277 	return vaddr;
3278 }
3279 
goya_cpu_accessible_dma_pool_free(struct hl_device * hdev,size_t size,void * vaddr)3280 void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
3281 					void *vaddr)
3282 {
3283 	hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
3284 }
3285 
goya_get_dma_desc_list_size(struct hl_device * hdev,struct sg_table * sgt)3286 u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
3287 {
3288 	struct scatterlist *sg, *sg_next_iter;
3289 	u32 count, dma_desc_cnt;
3290 	u64 len, len_next;
3291 	dma_addr_t addr, addr_next;
3292 
3293 	dma_desc_cnt = 0;
3294 
3295 	for_each_sgtable_dma_sg(sgt, sg, count) {
3296 		len = sg_dma_len(sg);
3297 		addr = sg_dma_address(sg);
3298 
3299 		if (len == 0)
3300 			break;
3301 
3302 		while ((count + 1) < sgt->nents) {
3303 			sg_next_iter = sg_next(sg);
3304 			len_next = sg_dma_len(sg_next_iter);
3305 			addr_next = sg_dma_address(sg_next_iter);
3306 
3307 			if (len_next == 0)
3308 				break;
3309 
3310 			if ((addr + len == addr_next) &&
3311 				(len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3312 				len += len_next;
3313 				count++;
3314 				sg = sg_next_iter;
3315 			} else {
3316 				break;
3317 			}
3318 		}
3319 
3320 		dma_desc_cnt++;
3321 	}
3322 
3323 	return dma_desc_cnt * sizeof(struct packet_lin_dma);
3324 }
3325 
goya_pin_memory_before_cs(struct hl_device * hdev,struct hl_cs_parser * parser,struct packet_lin_dma * user_dma_pkt,u64 addr,enum dma_data_direction dir)3326 static int goya_pin_memory_before_cs(struct hl_device *hdev,
3327 				struct hl_cs_parser *parser,
3328 				struct packet_lin_dma *user_dma_pkt,
3329 				u64 addr, enum dma_data_direction dir)
3330 {
3331 	struct hl_userptr *userptr;
3332 	int rc;
3333 
3334 	if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
3335 			parser->job_userptr_list, &userptr))
3336 		goto already_pinned;
3337 
3338 	userptr = kzalloc_obj(*userptr);
3339 	if (!userptr)
3340 		return -ENOMEM;
3341 
3342 	rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
3343 				userptr);
3344 	if (rc)
3345 		goto free_userptr;
3346 
3347 	list_add_tail(&userptr->job_node, parser->job_userptr_list);
3348 
3349 	rc = hl_dma_map_sgtable(hdev, userptr->sgt, dir);
3350 	if (rc) {
3351 		dev_err(hdev->dev, "failed to map sgt with DMA region\n");
3352 		goto unpin_memory;
3353 	}
3354 
3355 	userptr->dma_mapped = true;
3356 	userptr->dir = dir;
3357 
3358 already_pinned:
3359 	parser->patched_cb_size +=
3360 			goya_get_dma_desc_list_size(hdev, userptr->sgt);
3361 
3362 	return 0;
3363 
3364 unpin_memory:
3365 	list_del(&userptr->job_node);
3366 	hl_unpin_host_memory(hdev, userptr);
3367 free_userptr:
3368 	kfree(userptr);
3369 	return rc;
3370 }
3371 
goya_validate_dma_pkt_host(struct hl_device * hdev,struct hl_cs_parser * parser,struct packet_lin_dma * user_dma_pkt)3372 static int goya_validate_dma_pkt_host(struct hl_device *hdev,
3373 				struct hl_cs_parser *parser,
3374 				struct packet_lin_dma *user_dma_pkt)
3375 {
3376 	u64 device_memory_addr, addr;
3377 	enum dma_data_direction dir;
3378 	enum hl_goya_dma_direction user_dir;
3379 	bool sram_addr = true;
3380 	bool skip_host_mem_pin = false;
3381 	bool user_memset;
3382 	u32 ctl;
3383 	int rc = 0;
3384 
3385 	ctl = le32_to_cpu(user_dma_pkt->ctl);
3386 
3387 	user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3388 			GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3389 
3390 	user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3391 			GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3392 
3393 	switch (user_dir) {
3394 	case HL_DMA_HOST_TO_DRAM:
3395 		dev_dbg(hdev->dev, "DMA direction is HOST --> DRAM\n");
3396 		dir = DMA_TO_DEVICE;
3397 		sram_addr = false;
3398 		addr = le64_to_cpu(user_dma_pkt->src_addr);
3399 		device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3400 		if (user_memset)
3401 			skip_host_mem_pin = true;
3402 		break;
3403 
3404 	case HL_DMA_DRAM_TO_HOST:
3405 		dev_dbg(hdev->dev, "DMA direction is DRAM --> HOST\n");
3406 		dir = DMA_FROM_DEVICE;
3407 		sram_addr = false;
3408 		addr = le64_to_cpu(user_dma_pkt->dst_addr);
3409 		device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3410 		break;
3411 
3412 	case HL_DMA_HOST_TO_SRAM:
3413 		dev_dbg(hdev->dev, "DMA direction is HOST --> SRAM\n");
3414 		dir = DMA_TO_DEVICE;
3415 		addr = le64_to_cpu(user_dma_pkt->src_addr);
3416 		device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3417 		if (user_memset)
3418 			skip_host_mem_pin = true;
3419 		break;
3420 
3421 	case HL_DMA_SRAM_TO_HOST:
3422 		dev_dbg(hdev->dev, "DMA direction is SRAM --> HOST\n");
3423 		dir = DMA_FROM_DEVICE;
3424 		addr = le64_to_cpu(user_dma_pkt->dst_addr);
3425 		device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3426 		break;
3427 	default:
3428 		dev_err(hdev->dev, "DMA direction %d is unsupported/undefined\n", user_dir);
3429 		return -EFAULT;
3430 	}
3431 
3432 	if (sram_addr) {
3433 		if (!hl_mem_area_inside_range(device_memory_addr,
3434 				le32_to_cpu(user_dma_pkt->tsize),
3435 				hdev->asic_prop.sram_user_base_address,
3436 				hdev->asic_prop.sram_end_address)) {
3437 
3438 			dev_err(hdev->dev,
3439 				"SRAM address 0x%llx + 0x%x is invalid\n",
3440 				device_memory_addr,
3441 				user_dma_pkt->tsize);
3442 			return -EFAULT;
3443 		}
3444 	} else {
3445 		if (!hl_mem_area_inside_range(device_memory_addr,
3446 				le32_to_cpu(user_dma_pkt->tsize),
3447 				hdev->asic_prop.dram_user_base_address,
3448 				hdev->asic_prop.dram_end_address)) {
3449 
3450 			dev_err(hdev->dev,
3451 				"DRAM address 0x%llx + 0x%x is invalid\n",
3452 				device_memory_addr,
3453 				user_dma_pkt->tsize);
3454 			return -EFAULT;
3455 		}
3456 	}
3457 
3458 	if (skip_host_mem_pin)
3459 		parser->patched_cb_size += sizeof(*user_dma_pkt);
3460 	else {
3461 		if ((dir == DMA_TO_DEVICE) &&
3462 				(parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1)) {
3463 			dev_err(hdev->dev,
3464 				"Can't DMA from host on queue other then 1\n");
3465 			return -EFAULT;
3466 		}
3467 
3468 		rc = goya_pin_memory_before_cs(hdev, parser, user_dma_pkt,
3469 						addr, dir);
3470 	}
3471 
3472 	return rc;
3473 }
3474 
goya_validate_dma_pkt_no_host(struct hl_device * hdev,struct hl_cs_parser * parser,struct packet_lin_dma * user_dma_pkt)3475 static int goya_validate_dma_pkt_no_host(struct hl_device *hdev,
3476 				struct hl_cs_parser *parser,
3477 				struct packet_lin_dma *user_dma_pkt)
3478 {
3479 	u64 sram_memory_addr, dram_memory_addr;
3480 	enum hl_goya_dma_direction user_dir;
3481 	u32 ctl;
3482 
3483 	ctl = le32_to_cpu(user_dma_pkt->ctl);
3484 	user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3485 			GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3486 
3487 	if (user_dir == HL_DMA_DRAM_TO_SRAM) {
3488 		dev_dbg(hdev->dev, "DMA direction is DRAM --> SRAM\n");
3489 		dram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3490 		sram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3491 	} else {
3492 		dev_dbg(hdev->dev, "DMA direction is SRAM --> DRAM\n");
3493 		sram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3494 		dram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3495 	}
3496 
3497 	if (!hl_mem_area_inside_range(sram_memory_addr,
3498 				le32_to_cpu(user_dma_pkt->tsize),
3499 				hdev->asic_prop.sram_user_base_address,
3500 				hdev->asic_prop.sram_end_address)) {
3501 		dev_err(hdev->dev, "SRAM address 0x%llx + 0x%x is invalid\n",
3502 			sram_memory_addr, user_dma_pkt->tsize);
3503 		return -EFAULT;
3504 	}
3505 
3506 	if (!hl_mem_area_inside_range(dram_memory_addr,
3507 				le32_to_cpu(user_dma_pkt->tsize),
3508 				hdev->asic_prop.dram_user_base_address,
3509 				hdev->asic_prop.dram_end_address)) {
3510 		dev_err(hdev->dev, "DRAM address 0x%llx + 0x%x is invalid\n",
3511 			dram_memory_addr, user_dma_pkt->tsize);
3512 		return -EFAULT;
3513 	}
3514 
3515 	parser->patched_cb_size += sizeof(*user_dma_pkt);
3516 
3517 	return 0;
3518 }
3519 
goya_validate_dma_pkt_no_mmu(struct hl_device * hdev,struct hl_cs_parser * parser,struct packet_lin_dma * user_dma_pkt)3520 static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
3521 				struct hl_cs_parser *parser,
3522 				struct packet_lin_dma *user_dma_pkt)
3523 {
3524 	enum hl_goya_dma_direction user_dir;
3525 	u32 ctl;
3526 	int rc;
3527 
3528 	dev_dbg(hdev->dev, "DMA packet details:\n");
3529 	dev_dbg(hdev->dev, "source == 0x%llx\n",
3530 		le64_to_cpu(user_dma_pkt->src_addr));
3531 	dev_dbg(hdev->dev, "destination == 0x%llx\n",
3532 		le64_to_cpu(user_dma_pkt->dst_addr));
3533 	dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
3534 
3535 	ctl = le32_to_cpu(user_dma_pkt->ctl);
3536 	user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3537 			GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3538 
3539 	/*
3540 	 * Special handling for DMA with size 0. The H/W has a bug where
3541 	 * this can cause the QMAN DMA to get stuck, so block it here.
3542 	 */
3543 	if (user_dma_pkt->tsize == 0) {
3544 		dev_err(hdev->dev,
3545 			"Got DMA with size 0, might reset the device\n");
3546 		return -EINVAL;
3547 	}
3548 
3549 	if ((user_dir == HL_DMA_DRAM_TO_SRAM) || (user_dir == HL_DMA_SRAM_TO_DRAM))
3550 		rc = goya_validate_dma_pkt_no_host(hdev, parser, user_dma_pkt);
3551 	else
3552 		rc = goya_validate_dma_pkt_host(hdev, parser, user_dma_pkt);
3553 
3554 	return rc;
3555 }
3556 
goya_validate_dma_pkt_mmu(struct hl_device * hdev,struct hl_cs_parser * parser,struct packet_lin_dma * user_dma_pkt)3557 static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
3558 				struct hl_cs_parser *parser,
3559 				struct packet_lin_dma *user_dma_pkt)
3560 {
3561 	dev_dbg(hdev->dev, "DMA packet details:\n");
3562 	dev_dbg(hdev->dev, "source == 0x%llx\n",
3563 		le64_to_cpu(user_dma_pkt->src_addr));
3564 	dev_dbg(hdev->dev, "destination == 0x%llx\n",
3565 		le64_to_cpu(user_dma_pkt->dst_addr));
3566 	dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
3567 
3568 	/*
3569 	 * WA for HW-23.
3570 	 * We can't allow user to read from Host using QMANs other than 1.
3571 	 * PMMU and HPMMU addresses are equal, check only one of them.
3572 	 */
3573 	if (parser->hw_queue_id != GOYA_QUEUE_ID_DMA_1 &&
3574 		hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
3575 				le32_to_cpu(user_dma_pkt->tsize),
3576 				hdev->asic_prop.pmmu.start_addr,
3577 				hdev->asic_prop.pmmu.end_addr)) {
3578 		dev_err(hdev->dev,
3579 			"Can't DMA from host on queue other then 1\n");
3580 		return -EFAULT;
3581 	}
3582 
3583 	if (user_dma_pkt->tsize == 0) {
3584 		dev_err(hdev->dev,
3585 			"Got DMA with size 0, might reset the device\n");
3586 		return -EINVAL;
3587 	}
3588 
3589 	parser->patched_cb_size += sizeof(*user_dma_pkt);
3590 
3591 	return 0;
3592 }
3593 
goya_validate_wreg32(struct hl_device * hdev,struct hl_cs_parser * parser,struct packet_wreg32 * wreg_pkt)3594 static int goya_validate_wreg32(struct hl_device *hdev,
3595 				struct hl_cs_parser *parser,
3596 				struct packet_wreg32 *wreg_pkt)
3597 {
3598 	struct goya_device *goya = hdev->asic_specific;
3599 	u32 sob_start_addr, sob_end_addr;
3600 	u16 reg_offset;
3601 
3602 	reg_offset = le32_to_cpu(wreg_pkt->ctl) &
3603 			GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK;
3604 
3605 	dev_dbg(hdev->dev, "WREG32 packet details:\n");
3606 	dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
3607 	dev_dbg(hdev->dev, "value      == 0x%x\n",
3608 		le32_to_cpu(wreg_pkt->value));
3609 
3610 	if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
3611 		dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
3612 			reg_offset);
3613 		return -EPERM;
3614 	}
3615 
3616 	/*
3617 	 * With MMU, DMA channels are not secured, so it doesn't matter where
3618 	 * the WR COMP will be written to because it will go out with
3619 	 * non-secured property
3620 	 */
3621 	if (goya->hw_cap_initialized & HW_CAP_MMU)
3622 		return 0;
3623 
3624 	sob_start_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
3625 	sob_end_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1023);
3626 
3627 	if ((le32_to_cpu(wreg_pkt->value) < sob_start_addr) ||
3628 			(le32_to_cpu(wreg_pkt->value) > sob_end_addr)) {
3629 
3630 		dev_err(hdev->dev, "WREG32 packet with illegal value 0x%x\n",
3631 			wreg_pkt->value);
3632 		return -EPERM;
3633 	}
3634 
3635 	return 0;
3636 }
3637 
goya_validate_cb(struct hl_device * hdev,struct hl_cs_parser * parser,bool is_mmu)3638 static int goya_validate_cb(struct hl_device *hdev,
3639 			struct hl_cs_parser *parser, bool is_mmu)
3640 {
3641 	u32 cb_parsed_length = 0;
3642 	int rc = 0;
3643 
3644 	parser->patched_cb_size = 0;
3645 
3646 	/* cb_user_size is more than 0 so loop will always be executed */
3647 	while (cb_parsed_length < parser->user_cb_size) {
3648 		enum packet_id pkt_id;
3649 		u16 pkt_size;
3650 		struct goya_packet *user_pkt;
3651 
3652 		user_pkt = parser->user_cb->kernel_address + cb_parsed_length;
3653 
3654 		pkt_id = (enum packet_id) (
3655 				(le64_to_cpu(user_pkt->header) &
3656 				PACKET_HEADER_PACKET_ID_MASK) >>
3657 					PACKET_HEADER_PACKET_ID_SHIFT);
3658 
3659 		if (!validate_packet_id(pkt_id)) {
3660 			dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
3661 			rc = -EINVAL;
3662 			break;
3663 		}
3664 
3665 		pkt_size = goya_packet_sizes[pkt_id];
3666 		cb_parsed_length += pkt_size;
3667 		if (cb_parsed_length > parser->user_cb_size) {
3668 			dev_err(hdev->dev,
3669 				"packet 0x%x is out of CB boundary\n", pkt_id);
3670 			rc = -EINVAL;
3671 			break;
3672 		}
3673 
3674 		switch (pkt_id) {
3675 		case PACKET_WREG_32:
3676 			/*
3677 			 * Although it is validated after copy in patch_cb(),
3678 			 * need to validate here as well because patch_cb() is
3679 			 * not called in MMU path while this function is called
3680 			 */
3681 			rc = goya_validate_wreg32(hdev,
3682 				parser, (struct packet_wreg32 *) user_pkt);
3683 			parser->patched_cb_size += pkt_size;
3684 			break;
3685 
3686 		case PACKET_WREG_BULK:
3687 			dev_err(hdev->dev,
3688 				"User not allowed to use WREG_BULK\n");
3689 			rc = -EPERM;
3690 			break;
3691 
3692 		case PACKET_MSG_PROT:
3693 			dev_err(hdev->dev,
3694 				"User not allowed to use MSG_PROT\n");
3695 			rc = -EPERM;
3696 			break;
3697 
3698 		case PACKET_CP_DMA:
3699 			dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3700 			rc = -EPERM;
3701 			break;
3702 
3703 		case PACKET_STOP:
3704 			dev_err(hdev->dev, "User not allowed to use STOP\n");
3705 			rc = -EPERM;
3706 			break;
3707 
3708 		case PACKET_LIN_DMA:
3709 			if (is_mmu)
3710 				rc = goya_validate_dma_pkt_mmu(hdev, parser,
3711 					(struct packet_lin_dma *) user_pkt);
3712 			else
3713 				rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
3714 					(struct packet_lin_dma *) user_pkt);
3715 			break;
3716 
3717 		case PACKET_MSG_LONG:
3718 		case PACKET_MSG_SHORT:
3719 		case PACKET_FENCE:
3720 		case PACKET_NOP:
3721 			parser->patched_cb_size += pkt_size;
3722 			break;
3723 
3724 		default:
3725 			dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3726 				pkt_id);
3727 			rc = -EINVAL;
3728 			break;
3729 		}
3730 
3731 		if (rc)
3732 			break;
3733 	}
3734 
3735 	/*
3736 	 * The new CB should have space at the end for two MSG_PROT packets:
3737 	 * 1. A packet that will act as a completion packet
3738 	 * 2. A packet that will generate MSI-X interrupt
3739 	 */
3740 	parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
3741 
3742 	return rc;
3743 }
3744 
goya_patch_dma_packet(struct hl_device * hdev,struct hl_cs_parser * parser,struct packet_lin_dma * user_dma_pkt,struct packet_lin_dma * new_dma_pkt,u32 * new_dma_pkt_size)3745 static int goya_patch_dma_packet(struct hl_device *hdev,
3746 				struct hl_cs_parser *parser,
3747 				struct packet_lin_dma *user_dma_pkt,
3748 				struct packet_lin_dma *new_dma_pkt,
3749 				u32 *new_dma_pkt_size)
3750 {
3751 	struct hl_userptr *userptr;
3752 	struct scatterlist *sg, *sg_next_iter;
3753 	u32 count, dma_desc_cnt;
3754 	u64 len, len_next;
3755 	dma_addr_t dma_addr, dma_addr_next;
3756 	enum hl_goya_dma_direction user_dir;
3757 	u64 device_memory_addr, addr;
3758 	enum dma_data_direction dir;
3759 	struct sg_table *sgt;
3760 	bool skip_host_mem_pin = false;
3761 	bool user_memset;
3762 	u32 user_rdcomp_mask, user_wrcomp_mask, ctl;
3763 
3764 	ctl = le32_to_cpu(user_dma_pkt->ctl);
3765 
3766 	user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3767 			GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3768 
3769 	user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3770 			GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3771 
3772 	if ((user_dir == HL_DMA_DRAM_TO_SRAM) || (user_dir == HL_DMA_SRAM_TO_DRAM) ||
3773 			(user_dma_pkt->tsize == 0)) {
3774 		memcpy(new_dma_pkt, user_dma_pkt, sizeof(*new_dma_pkt));
3775 		*new_dma_pkt_size = sizeof(*new_dma_pkt);
3776 		return 0;
3777 	}
3778 
3779 	if ((user_dir == HL_DMA_HOST_TO_DRAM) || (user_dir == HL_DMA_HOST_TO_SRAM)) {
3780 		addr = le64_to_cpu(user_dma_pkt->src_addr);
3781 		device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3782 		dir = DMA_TO_DEVICE;
3783 		if (user_memset)
3784 			skip_host_mem_pin = true;
3785 	} else {
3786 		addr = le64_to_cpu(user_dma_pkt->dst_addr);
3787 		device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3788 		dir = DMA_FROM_DEVICE;
3789 	}
3790 
3791 	if ((!skip_host_mem_pin) &&
3792 		(hl_userptr_is_pinned(hdev, addr,
3793 			le32_to_cpu(user_dma_pkt->tsize),
3794 			parser->job_userptr_list, &userptr) == false)) {
3795 		dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n",
3796 				addr, user_dma_pkt->tsize);
3797 		return -EFAULT;
3798 	}
3799 
3800 	if ((user_memset) && (dir == DMA_TO_DEVICE)) {
3801 		memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt));
3802 		*new_dma_pkt_size = sizeof(*user_dma_pkt);
3803 		return 0;
3804 	}
3805 
3806 	user_rdcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK;
3807 
3808 	user_wrcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK;
3809 
3810 	sgt = userptr->sgt;
3811 	dma_desc_cnt = 0;
3812 
3813 	for_each_sgtable_dma_sg(sgt, sg, count) {
3814 		len = sg_dma_len(sg);
3815 		dma_addr = sg_dma_address(sg);
3816 
3817 		if (len == 0)
3818 			break;
3819 
3820 		while ((count + 1) < sgt->nents) {
3821 			sg_next_iter = sg_next(sg);
3822 			len_next = sg_dma_len(sg_next_iter);
3823 			dma_addr_next = sg_dma_address(sg_next_iter);
3824 
3825 			if (len_next == 0)
3826 				break;
3827 
3828 			if ((dma_addr + len == dma_addr_next) &&
3829 				(len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3830 				len += len_next;
3831 				count++;
3832 				sg = sg_next_iter;
3833 			} else {
3834 				break;
3835 			}
3836 		}
3837 
3838 		ctl = le32_to_cpu(user_dma_pkt->ctl);
3839 		if (likely(dma_desc_cnt))
3840 			ctl &= ~GOYA_PKT_CTL_EB_MASK;
3841 		ctl &= ~(GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK |
3842 				GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK);
3843 		new_dma_pkt->ctl = cpu_to_le32(ctl);
3844 		new_dma_pkt->tsize = cpu_to_le32((u32) len);
3845 
3846 		if (dir == DMA_TO_DEVICE) {
3847 			new_dma_pkt->src_addr = cpu_to_le64(dma_addr);
3848 			new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr);
3849 		} else {
3850 			new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr);
3851 			new_dma_pkt->dst_addr = cpu_to_le64(dma_addr);
3852 		}
3853 
3854 		if (!user_memset)
3855 			device_memory_addr += len;
3856 		dma_desc_cnt++;
3857 		new_dma_pkt++;
3858 	}
3859 
3860 	if (!dma_desc_cnt) {
3861 		dev_err(hdev->dev,
3862 			"Error of 0 SG entries when patching DMA packet\n");
3863 		return -EFAULT;
3864 	}
3865 
3866 	/* Fix the last dma packet - rdcomp/wrcomp must be as user set them */
3867 	new_dma_pkt--;
3868 	new_dma_pkt->ctl |= cpu_to_le32(user_rdcomp_mask | user_wrcomp_mask);
3869 
3870 	*new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma);
3871 
3872 	return 0;
3873 }
3874 
goya_patch_cb(struct hl_device * hdev,struct hl_cs_parser * parser)3875 static int goya_patch_cb(struct hl_device *hdev,
3876 				struct hl_cs_parser *parser)
3877 {
3878 	u32 cb_parsed_length = 0;
3879 	u32 cb_patched_cur_length = 0;
3880 	int rc = 0;
3881 
3882 	/* cb_user_size is more than 0 so loop will always be executed */
3883 	while (cb_parsed_length < parser->user_cb_size) {
3884 		enum packet_id pkt_id;
3885 		u16 pkt_size;
3886 		u32 new_pkt_size = 0;
3887 		struct goya_packet *user_pkt, *kernel_pkt;
3888 
3889 		user_pkt = parser->user_cb->kernel_address + cb_parsed_length;
3890 		kernel_pkt = parser->patched_cb->kernel_address +
3891 					cb_patched_cur_length;
3892 
3893 		pkt_id = (enum packet_id) (
3894 				(le64_to_cpu(user_pkt->header) &
3895 				PACKET_HEADER_PACKET_ID_MASK) >>
3896 					PACKET_HEADER_PACKET_ID_SHIFT);
3897 
3898 		if (!validate_packet_id(pkt_id)) {
3899 			dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
3900 			rc = -EINVAL;
3901 			break;
3902 		}
3903 
3904 		pkt_size = goya_packet_sizes[pkt_id];
3905 		cb_parsed_length += pkt_size;
3906 		if (cb_parsed_length > parser->user_cb_size) {
3907 			dev_err(hdev->dev,
3908 				"packet 0x%x is out of CB boundary\n", pkt_id);
3909 			rc = -EINVAL;
3910 			break;
3911 		}
3912 
3913 		switch (pkt_id) {
3914 		case PACKET_LIN_DMA:
3915 			rc = goya_patch_dma_packet(hdev, parser,
3916 					(struct packet_lin_dma *) user_pkt,
3917 					(struct packet_lin_dma *) kernel_pkt,
3918 					&new_pkt_size);
3919 			cb_patched_cur_length += new_pkt_size;
3920 			break;
3921 
3922 		case PACKET_WREG_32:
3923 			memcpy(kernel_pkt, user_pkt, pkt_size);
3924 			cb_patched_cur_length += pkt_size;
3925 			rc = goya_validate_wreg32(hdev, parser,
3926 					(struct packet_wreg32 *) kernel_pkt);
3927 			break;
3928 
3929 		case PACKET_WREG_BULK:
3930 			dev_err(hdev->dev,
3931 				"User not allowed to use WREG_BULK\n");
3932 			rc = -EPERM;
3933 			break;
3934 
3935 		case PACKET_MSG_PROT:
3936 			dev_err(hdev->dev,
3937 				"User not allowed to use MSG_PROT\n");
3938 			rc = -EPERM;
3939 			break;
3940 
3941 		case PACKET_CP_DMA:
3942 			dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3943 			rc = -EPERM;
3944 			break;
3945 
3946 		case PACKET_STOP:
3947 			dev_err(hdev->dev, "User not allowed to use STOP\n");
3948 			rc = -EPERM;
3949 			break;
3950 
3951 		case PACKET_MSG_LONG:
3952 		case PACKET_MSG_SHORT:
3953 		case PACKET_FENCE:
3954 		case PACKET_NOP:
3955 			memcpy(kernel_pkt, user_pkt, pkt_size);
3956 			cb_patched_cur_length += pkt_size;
3957 			break;
3958 
3959 		default:
3960 			dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3961 				pkt_id);
3962 			rc = -EINVAL;
3963 			break;
3964 		}
3965 
3966 		if (rc)
3967 			break;
3968 	}
3969 
3970 	return rc;
3971 }
3972 
goya_parse_cb_mmu(struct hl_device * hdev,struct hl_cs_parser * parser)3973 static int goya_parse_cb_mmu(struct hl_device *hdev,
3974 		struct hl_cs_parser *parser)
3975 {
3976 	u64 handle;
3977 	u32 patched_cb_size;
3978 	struct hl_cb *user_cb;
3979 	int rc;
3980 
3981 	/*
3982 	 * The new CB should have space at the end for two MSG_PROT pkt:
3983 	 * 1. A packet that will act as a completion packet
3984 	 * 2. A packet that will generate MSI-X interrupt
3985 	 */
3986 	parser->patched_cb_size = parser->user_cb_size +
3987 			sizeof(struct packet_msg_prot) * 2;
3988 
3989 	rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx,
3990 				parser->patched_cb_size, false, false,
3991 				&handle);
3992 
3993 	if (rc) {
3994 		dev_err(hdev->dev,
3995 			"Failed to allocate patched CB for DMA CS %d\n",
3996 			rc);
3997 		return rc;
3998 	}
3999 
4000 	parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle);
4001 	/* hl_cb_get should never fail here */
4002 	if (!parser->patched_cb) {
4003 		dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle);
4004 		rc = -EFAULT;
4005 		goto out;
4006 	}
4007 
4008 	/*
4009 	 * The check that parser->user_cb_size <= parser->user_cb->size was done
4010 	 * in validate_queue_index().
4011 	 */
4012 	memcpy(parser->patched_cb->kernel_address,
4013 		parser->user_cb->kernel_address,
4014 		parser->user_cb_size);
4015 
4016 	patched_cb_size = parser->patched_cb_size;
4017 
4018 	/* validate patched CB instead of user CB */
4019 	user_cb = parser->user_cb;
4020 	parser->user_cb = parser->patched_cb;
4021 	rc = goya_validate_cb(hdev, parser, true);
4022 	parser->user_cb = user_cb;
4023 
4024 	if (rc) {
4025 		hl_cb_put(parser->patched_cb);
4026 		goto out;
4027 	}
4028 
4029 	if (patched_cb_size != parser->patched_cb_size) {
4030 		dev_err(hdev->dev, "user CB size mismatch\n");
4031 		hl_cb_put(parser->patched_cb);
4032 		rc = -EINVAL;
4033 		goto out;
4034 	}
4035 
4036 out:
4037 	/*
4038 	 * Always call cb destroy here because we still have 1 reference
4039 	 * to it by calling cb_get earlier. After the job will be completed,
4040 	 * cb_put will release it, but here we want to remove it from the
4041 	 * idr
4042 	 */
4043 	hl_cb_destroy(&hdev->kernel_mem_mgr, handle);
4044 
4045 	return rc;
4046 }
4047 
goya_parse_cb_no_mmu(struct hl_device * hdev,struct hl_cs_parser * parser)4048 static int goya_parse_cb_no_mmu(struct hl_device *hdev,
4049 				struct hl_cs_parser *parser)
4050 {
4051 	u64 handle;
4052 	int rc;
4053 
4054 	rc = goya_validate_cb(hdev, parser, false);
4055 
4056 	if (rc)
4057 		goto free_userptr;
4058 
4059 	rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx,
4060 				parser->patched_cb_size, false, false,
4061 				&handle);
4062 	if (rc) {
4063 		dev_err(hdev->dev,
4064 			"Failed to allocate patched CB for DMA CS %d\n", rc);
4065 		goto free_userptr;
4066 	}
4067 
4068 	parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle);
4069 	/* hl_cb_get should never fail here */
4070 	if (!parser->patched_cb) {
4071 		dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle);
4072 		rc = -EFAULT;
4073 		goto out;
4074 	}
4075 
4076 	rc = goya_patch_cb(hdev, parser);
4077 
4078 	if (rc)
4079 		hl_cb_put(parser->patched_cb);
4080 
4081 out:
4082 	/*
4083 	 * Always call cb destroy here because we still have 1 reference
4084 	 * to it by calling cb_get earlier. After the job will be completed,
4085 	 * cb_put will release it, but here we want to remove it from the
4086 	 * idr
4087 	 */
4088 	hl_cb_destroy(&hdev->kernel_mem_mgr, handle);
4089 
4090 free_userptr:
4091 	if (rc)
4092 		hl_userptr_delete_list(hdev, parser->job_userptr_list);
4093 	return rc;
4094 }
4095 
goya_parse_cb_no_ext_queue(struct hl_device * hdev,struct hl_cs_parser * parser)4096 static int goya_parse_cb_no_ext_queue(struct hl_device *hdev,
4097 					struct hl_cs_parser *parser)
4098 {
4099 	struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
4100 	struct goya_device *goya = hdev->asic_specific;
4101 
4102 	if (goya->hw_cap_initialized & HW_CAP_MMU)
4103 		return 0;
4104 
4105 	/* For internal queue jobs, just check if CB address is valid */
4106 	if (hl_mem_area_inside_range(
4107 			(u64) (uintptr_t) parser->user_cb,
4108 			parser->user_cb_size,
4109 			asic_prop->sram_user_base_address,
4110 			asic_prop->sram_end_address))
4111 		return 0;
4112 
4113 	if (hl_mem_area_inside_range(
4114 			(u64) (uintptr_t) parser->user_cb,
4115 			parser->user_cb_size,
4116 			asic_prop->dram_user_base_address,
4117 			asic_prop->dram_end_address))
4118 		return 0;
4119 
4120 	dev_err(hdev->dev,
4121 		"Internal CB address 0x%px + 0x%x is not in SRAM nor in DRAM\n",
4122 		parser->user_cb, parser->user_cb_size);
4123 
4124 	return -EFAULT;
4125 }
4126 
goya_cs_parser(struct hl_device * hdev,struct hl_cs_parser * parser)4127 int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
4128 {
4129 	struct goya_device *goya = hdev->asic_specific;
4130 
4131 	if (parser->queue_type == QUEUE_TYPE_INT)
4132 		return goya_parse_cb_no_ext_queue(hdev, parser);
4133 
4134 	if (goya->hw_cap_initialized & HW_CAP_MMU)
4135 		return goya_parse_cb_mmu(hdev, parser);
4136 	else
4137 		return goya_parse_cb_no_mmu(hdev, parser);
4138 }
4139 
goya_add_end_of_cb_packets(struct hl_device * hdev,void * kernel_address,u32 len,u32 original_len,u64 cq_addr,u32 cq_val,u32 msix_vec,bool eb)4140 void goya_add_end_of_cb_packets(struct hl_device *hdev, void *kernel_address,
4141 				u32 len, u32 original_len, u64 cq_addr, u32 cq_val,
4142 				u32 msix_vec, bool eb)
4143 {
4144 	struct packet_msg_prot *cq_pkt;
4145 	u32 tmp;
4146 
4147 	cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2);
4148 
4149 	tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
4150 			(1 << GOYA_PKT_CTL_EB_SHIFT) |
4151 			(1 << GOYA_PKT_CTL_MB_SHIFT);
4152 	cq_pkt->ctl = cpu_to_le32(tmp);
4153 	cq_pkt->value = cpu_to_le32(cq_val);
4154 	cq_pkt->addr = cpu_to_le64(cq_addr);
4155 
4156 	cq_pkt++;
4157 
4158 	tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
4159 			(1 << GOYA_PKT_CTL_MB_SHIFT);
4160 	cq_pkt->ctl = cpu_to_le32(tmp);
4161 	cq_pkt->value = cpu_to_le32(msix_vec & 0x7FF);
4162 	cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF);
4163 }
4164 
goya_update_eq_ci(struct hl_device * hdev,u32 val)4165 void goya_update_eq_ci(struct hl_device *hdev, u32 val)
4166 {
4167 	WREG32(mmCPU_EQ_CI, val);
4168 }
4169 
goya_restore_phase_topology(struct hl_device * hdev)4170 void goya_restore_phase_topology(struct hl_device *hdev)
4171 {
4172 
4173 }
4174 
goya_clear_sm_regs(struct hl_device * hdev)4175 static void goya_clear_sm_regs(struct hl_device *hdev)
4176 {
4177 	int i, num_of_sob_in_longs, num_of_mon_in_longs;
4178 
4179 	num_of_sob_in_longs =
4180 		((mmSYNC_MNGR_SOB_OBJ_1023 - mmSYNC_MNGR_SOB_OBJ_0) + 4);
4181 
4182 	num_of_mon_in_longs =
4183 		((mmSYNC_MNGR_MON_STATUS_255 - mmSYNC_MNGR_MON_STATUS_0) + 4);
4184 
4185 	for (i = 0 ; i < num_of_sob_in_longs ; i += 4)
4186 		WREG32(mmSYNC_MNGR_SOB_OBJ_0 + i, 0);
4187 
4188 	for (i = 0 ; i < num_of_mon_in_longs ; i += 4)
4189 		WREG32(mmSYNC_MNGR_MON_STATUS_0 + i, 0);
4190 
4191 	/* Flush all WREG to prevent race */
4192 	i = RREG32(mmSYNC_MNGR_SOB_OBJ_0);
4193 }
4194 
goya_debugfs_read_dma(struct hl_device * hdev,u64 addr,u32 size,void * blob_addr)4195 static int goya_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size, void *blob_addr)
4196 {
4197 	dev_err(hdev->dev, "Reading via DMA is unimplemented yet\n");
4198 	return -EPERM;
4199 }
4200 
goya_read_pte(struct hl_device * hdev,u64 addr)4201 static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
4202 {
4203 	struct goya_device *goya = hdev->asic_specific;
4204 
4205 	if (hdev->reset_info.hard_reset_pending)
4206 		return U64_MAX;
4207 
4208 	return readq(hdev->pcie_bar[DDR_BAR_ID] +
4209 			(addr - goya->ddr_bar_cur_addr));
4210 }
4211 
goya_write_pte(struct hl_device * hdev,u64 addr,u64 val)4212 static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
4213 {
4214 	struct goya_device *goya = hdev->asic_specific;
4215 
4216 	if (hdev->reset_info.hard_reset_pending)
4217 		return;
4218 
4219 	writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
4220 			(addr - goya->ddr_bar_cur_addr));
4221 }
4222 
_goya_get_event_desc(u16 event_type)4223 static const char *_goya_get_event_desc(u16 event_type)
4224 {
4225 	switch (event_type) {
4226 	case GOYA_ASYNC_EVENT_ID_PCIE_IF:
4227 		return "PCIe_if";
4228 	case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4229 	case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4230 	case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4231 	case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4232 	case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4233 	case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4234 	case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4235 	case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4236 		return "TPC%d_ecc";
4237 	case GOYA_ASYNC_EVENT_ID_MME_ECC:
4238 		return "MME_ecc";
4239 	case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
4240 		return "MME_ecc_ext";
4241 	case GOYA_ASYNC_EVENT_ID_MMU_ECC:
4242 		return "MMU_ecc";
4243 	case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
4244 		return "DMA_macro";
4245 	case GOYA_ASYNC_EVENT_ID_DMA_ECC:
4246 		return "DMA_ecc";
4247 	case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
4248 		return "CPU_if_ecc";
4249 	case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
4250 		return "PSOC_mem";
4251 	case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
4252 		return "PSOC_coresight";
4253 	case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4254 		return "SRAM%d";
4255 	case GOYA_ASYNC_EVENT_ID_GIC500:
4256 		return "GIC500";
4257 	case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
4258 		return "PLL%d";
4259 	case GOYA_ASYNC_EVENT_ID_AXI_ECC:
4260 		return "AXI_ecc";
4261 	case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
4262 		return "L2_ram_ecc";
4263 	case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
4264 		return "PSOC_gpio_05_sw_reset";
4265 	case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
4266 		return "PSOC_gpio_10_vrhot_icrit";
4267 	case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4268 		return "PCIe_dec";
4269 	case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4270 	case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4271 	case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4272 	case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4273 	case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4274 	case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4275 	case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4276 	case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4277 		return "TPC%d_dec";
4278 	case GOYA_ASYNC_EVENT_ID_MME_WACS:
4279 		return "MME_wacs";
4280 	case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4281 		return "MME_wacsd";
4282 	case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4283 		return "CPU_axi_splitter";
4284 	case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4285 		return "PSOC_axi_dec";
4286 	case GOYA_ASYNC_EVENT_ID_PSOC:
4287 		return "PSOC";
4288 	case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4289 	case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4290 	case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4291 	case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4292 	case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4293 	case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4294 	case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4295 	case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4296 		return "TPC%d_krn_err";
4297 	case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4298 		return "TPC%d_cq";
4299 	case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4300 		return "TPC%d_qm";
4301 	case GOYA_ASYNC_EVENT_ID_MME_QM:
4302 		return "MME_qm";
4303 	case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4304 		return "MME_cq";
4305 	case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4306 		return "DMA%d_qm";
4307 	case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4308 		return "DMA%d_ch";
4309 	case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4310 	case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4311 	case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4312 	case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4313 	case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4314 	case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4315 	case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4316 	case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4317 		return "TPC%d_bmon_spmu";
4318 	case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4319 		return "DMA_bm_ch%d";
4320 	case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
4321 		return "POWER_ENV_S";
4322 	case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
4323 		return "POWER_ENV_E";
4324 	case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
4325 		return "THERMAL_ENV_S";
4326 	case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
4327 		return "THERMAL_ENV_E";
4328 	case GOYA_ASYNC_EVENT_PKT_QUEUE_OUT_SYNC:
4329 		return "QUEUE_OUT_OF_SYNC";
4330 	default:
4331 		return "N/A";
4332 	}
4333 }
4334 
goya_get_event_desc(u16 event_type,char * desc,size_t size)4335 static void goya_get_event_desc(u16 event_type, char *desc, size_t size)
4336 {
4337 	u8 index;
4338 
4339 	switch (event_type) {
4340 	case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4341 	case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4342 	case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4343 	case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4344 	case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4345 	case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4346 	case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4347 	case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4348 		index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_ECC) / 3;
4349 		snprintf(desc, size, _goya_get_event_desc(event_type), index);
4350 		break;
4351 	case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4352 		index = event_type - GOYA_ASYNC_EVENT_ID_SRAM0;
4353 		snprintf(desc, size, _goya_get_event_desc(event_type), index);
4354 		break;
4355 	case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
4356 		index = event_type - GOYA_ASYNC_EVENT_ID_PLL0;
4357 		snprintf(desc, size, _goya_get_event_desc(event_type), index);
4358 		break;
4359 	case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4360 	case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4361 	case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4362 	case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4363 	case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4364 	case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4365 	case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4366 	case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4367 		index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_DEC) / 3;
4368 		snprintf(desc, size, _goya_get_event_desc(event_type), index);
4369 		break;
4370 	case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4371 	case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4372 	case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4373 	case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4374 	case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4375 	case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4376 	case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4377 	case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4378 		index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR) / 10;
4379 		snprintf(desc, size, _goya_get_event_desc(event_type), index);
4380 		break;
4381 	case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4382 		index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_CMDQ;
4383 		snprintf(desc, size, _goya_get_event_desc(event_type), index);
4384 		break;
4385 	case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4386 		index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_QM;
4387 		snprintf(desc, size, _goya_get_event_desc(event_type), index);
4388 		break;
4389 	case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4390 		index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_QM;
4391 		snprintf(desc, size, _goya_get_event_desc(event_type), index);
4392 		break;
4393 	case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4394 		index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_CH;
4395 		snprintf(desc, size, _goya_get_event_desc(event_type), index);
4396 		break;
4397 	case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4398 	case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4399 	case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4400 	case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4401 	case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4402 	case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4403 	case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4404 	case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4405 		index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU) / 10;
4406 		snprintf(desc, size, _goya_get_event_desc(event_type), index);
4407 		break;
4408 	case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4409 		index = event_type - GOYA_ASYNC_EVENT_ID_DMA_BM_CH0;
4410 		snprintf(desc, size, _goya_get_event_desc(event_type), index);
4411 		break;
4412 	case GOYA_ASYNC_EVENT_PKT_QUEUE_OUT_SYNC:
4413 		snprintf(desc, size, _goya_get_event_desc(event_type));
4414 		break;
4415 	default:
4416 		snprintf(desc, size, _goya_get_event_desc(event_type));
4417 		break;
4418 	}
4419 }
4420 
goya_print_razwi_info(struct hl_device * hdev)4421 static void goya_print_razwi_info(struct hl_device *hdev)
4422 {
4423 	if (RREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD)) {
4424 		dev_err_ratelimited(hdev->dev, "Illegal write to LBW\n");
4425 		WREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD, 0);
4426 	}
4427 
4428 	if (RREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD)) {
4429 		dev_err_ratelimited(hdev->dev, "Illegal read from LBW\n");
4430 		WREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD, 0);
4431 	}
4432 
4433 	if (RREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD)) {
4434 		dev_err_ratelimited(hdev->dev, "Illegal write to HBW\n");
4435 		WREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD, 0);
4436 	}
4437 
4438 	if (RREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD)) {
4439 		dev_err_ratelimited(hdev->dev, "Illegal read from HBW\n");
4440 		WREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD, 0);
4441 	}
4442 }
4443 
goya_print_mmu_error_info(struct hl_device * hdev)4444 static void goya_print_mmu_error_info(struct hl_device *hdev)
4445 {
4446 	struct goya_device *goya = hdev->asic_specific;
4447 	u64 addr;
4448 	u32 val;
4449 
4450 	if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4451 		return;
4452 
4453 	val = RREG32(mmMMU_PAGE_ERROR_CAPTURE);
4454 	if (val & MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
4455 		addr = val & MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
4456 		addr <<= 32;
4457 		addr |= RREG32(mmMMU_PAGE_ERROR_CAPTURE_VA);
4458 
4459 		dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n",
4460 					addr);
4461 
4462 		WREG32(mmMMU_PAGE_ERROR_CAPTURE, 0);
4463 	}
4464 }
4465 
goya_print_out_of_sync_info(struct hl_device * hdev,struct cpucp_pkt_sync_err * sync_err)4466 static void goya_print_out_of_sync_info(struct hl_device *hdev,
4467 					struct cpucp_pkt_sync_err *sync_err)
4468 {
4469 	struct hl_hw_queue *q = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
4470 
4471 	dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n",
4472 		le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci));
4473 }
4474 
goya_print_irq_info(struct hl_device * hdev,u16 event_type,bool razwi)4475 static void goya_print_irq_info(struct hl_device *hdev, u16 event_type,
4476 				bool razwi)
4477 {
4478 	char desc[20] = "";
4479 
4480 	goya_get_event_desc(event_type, desc, sizeof(desc));
4481 	dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
4482 		event_type, desc);
4483 
4484 	if (razwi) {
4485 		goya_print_razwi_info(hdev);
4486 		goya_print_mmu_error_info(hdev);
4487 	}
4488 }
4489 
goya_unmask_irq_arr(struct hl_device * hdev,u32 * irq_arr,size_t irq_arr_size)4490 static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
4491 		size_t irq_arr_size)
4492 {
4493 	struct cpucp_unmask_irq_arr_packet *pkt;
4494 	size_t total_pkt_size;
4495 	u64 result;
4496 	int rc;
4497 	int irq_num_entries, irq_arr_index;
4498 	__le32 *goya_irq_arr;
4499 
4500 	total_pkt_size = sizeof(struct cpucp_unmask_irq_arr_packet) +
4501 			irq_arr_size;
4502 
4503 	/* data should be aligned to 8 bytes in order to CPU-CP to copy it */
4504 	total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
4505 
4506 	/* total_pkt_size is casted to u16 later on */
4507 	if (total_pkt_size > USHRT_MAX) {
4508 		dev_err(hdev->dev, "too many elements in IRQ array\n");
4509 		return -EINVAL;
4510 	}
4511 
4512 	pkt = kzalloc(total_pkt_size, GFP_KERNEL);
4513 	if (!pkt)
4514 		return -ENOMEM;
4515 
4516 	irq_num_entries = irq_arr_size / sizeof(irq_arr[0]);
4517 	pkt->length = cpu_to_le32(irq_num_entries);
4518 
4519 	/* We must perform any necessary endianness conversation on the irq
4520 	 * array being passed to the goya hardware
4521 	 */
4522 	for (irq_arr_index = 0, goya_irq_arr = (__le32 *) &pkt->irqs;
4523 			irq_arr_index < irq_num_entries ; irq_arr_index++)
4524 		goya_irq_arr[irq_arr_index] =
4525 				cpu_to_le32(irq_arr[irq_arr_index]);
4526 
4527 	pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
4528 						CPUCP_PKT_CTL_OPCODE_SHIFT);
4529 
4530 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
4531 						total_pkt_size,	0, &result);
4532 
4533 	if (rc)
4534 		dev_err(hdev->dev, "failed to unmask IRQ array\n");
4535 
4536 	kfree(pkt);
4537 
4538 	return rc;
4539 }
4540 
goya_compute_reset_late_init(struct hl_device * hdev)4541 static int goya_compute_reset_late_init(struct hl_device *hdev)
4542 {
4543 	/*
4544 	 * Unmask all IRQs since some could have been received
4545 	 * during the soft reset
4546 	 */
4547 	return goya_unmask_irq_arr(hdev, goya_all_events,
4548 					sizeof(goya_all_events));
4549 }
4550 
goya_unmask_irq(struct hl_device * hdev,u16 event_type)4551 static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
4552 {
4553 	struct cpucp_packet pkt;
4554 	u64 result;
4555 	int rc;
4556 
4557 	memset(&pkt, 0, sizeof(pkt));
4558 
4559 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ <<
4560 				CPUCP_PKT_CTL_OPCODE_SHIFT);
4561 	pkt.value = cpu_to_le64(event_type);
4562 
4563 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
4564 						0, &result);
4565 
4566 	if (rc)
4567 		dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
4568 
4569 	return rc;
4570 }
4571 
goya_print_clk_change_info(struct hl_device * hdev,u16 event_type)4572 static void goya_print_clk_change_info(struct hl_device *hdev, u16 event_type)
4573 {
4574 	ktime_t zero_time = ktime_set(0, 0);
4575 
4576 	mutex_lock(&hdev->clk_throttling.lock);
4577 
4578 	switch (event_type) {
4579 	case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
4580 		hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_POWER;
4581 		hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_POWER;
4582 		hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get();
4583 		hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time;
4584 		dev_info_ratelimited(hdev->dev,
4585 			"Clock throttling due to power consumption\n");
4586 		break;
4587 
4588 	case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
4589 		hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_POWER;
4590 		hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get();
4591 		dev_info_ratelimited(hdev->dev,
4592 			"Power envelop is safe, back to optimal clock\n");
4593 		break;
4594 
4595 	case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
4596 		hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_THERMAL;
4597 		hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL;
4598 		hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get();
4599 		hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time;
4600 		dev_info_ratelimited(hdev->dev,
4601 			"Clock throttling due to overheating\n");
4602 		break;
4603 
4604 	case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
4605 		hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL;
4606 		hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get();
4607 		dev_info_ratelimited(hdev->dev,
4608 			"Thermal envelop is safe, back to optimal clock\n");
4609 		break;
4610 
4611 	default:
4612 		dev_err(hdev->dev, "Received invalid clock change event %d\n",
4613 			event_type);
4614 		break;
4615 	}
4616 
4617 	mutex_unlock(&hdev->clk_throttling.lock);
4618 }
4619 
goya_handle_eqe(struct hl_device * hdev,struct hl_eq_entry * eq_entry)4620 void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
4621 {
4622 	u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
4623 	u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
4624 				>> EQ_CTL_EVENT_TYPE_SHIFT);
4625 	struct goya_device *goya = hdev->asic_specific;
4626 
4627 	if (event_type >= GOYA_ASYNC_EVENT_ID_SIZE) {
4628 		dev_err(hdev->dev, "Event type %u exceeds maximum of %u",
4629 				event_type, GOYA_ASYNC_EVENT_ID_SIZE - 1);
4630 		return;
4631 	}
4632 
4633 	goya->events_stat[event_type]++;
4634 	goya->events_stat_aggregate[event_type]++;
4635 
4636 	switch (event_type) {
4637 	case GOYA_ASYNC_EVENT_ID_PCIE_IF:
4638 	case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4639 	case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4640 	case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4641 	case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4642 	case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4643 	case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4644 	case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4645 	case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4646 	case GOYA_ASYNC_EVENT_ID_MME_ECC:
4647 	case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
4648 	case GOYA_ASYNC_EVENT_ID_MMU_ECC:
4649 	case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
4650 	case GOYA_ASYNC_EVENT_ID_DMA_ECC:
4651 	case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
4652 	case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
4653 	case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
4654 	case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4655 	case GOYA_ASYNC_EVENT_ID_GIC500:
4656 	case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
4657 	case GOYA_ASYNC_EVENT_ID_AXI_ECC:
4658 	case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
4659 		goya_print_irq_info(hdev, event_type, false);
4660 		if (hdev->hard_reset_on_fw_events)
4661 			hl_device_reset(hdev, (HL_DRV_RESET_HARD |
4662 						HL_DRV_RESET_FW_FATAL_ERR));
4663 		break;
4664 
4665 	case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
4666 		goya_print_irq_info(hdev, event_type, false);
4667 		if (hdev->hard_reset_on_fw_events)
4668 			hl_device_reset(hdev, HL_DRV_RESET_HARD);
4669 		break;
4670 
4671 	case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4672 	case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4673 	case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4674 	case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4675 	case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4676 	case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4677 	case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4678 	case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4679 	case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4680 	case GOYA_ASYNC_EVENT_ID_MME_WACS:
4681 	case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4682 	case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4683 	case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4684 	case GOYA_ASYNC_EVENT_ID_PSOC:
4685 	case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4686 	case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4687 	case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4688 	case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4689 	case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4690 	case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4691 	case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4692 	case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4693 	case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4694 	case GOYA_ASYNC_EVENT_ID_MME_QM:
4695 	case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4696 	case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4697 	case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4698 		goya_print_irq_info(hdev, event_type, true);
4699 		goya_unmask_irq(hdev, event_type);
4700 		break;
4701 
4702 	case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
4703 	case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4704 	case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4705 	case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4706 	case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4707 	case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4708 	case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4709 	case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4710 	case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4711 	case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4712 		goya_print_irq_info(hdev, event_type, false);
4713 		goya_unmask_irq(hdev, event_type);
4714 		break;
4715 
4716 	case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
4717 	case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
4718 	case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
4719 	case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
4720 		goya_print_clk_change_info(hdev, event_type);
4721 		goya_unmask_irq(hdev, event_type);
4722 		break;
4723 
4724 	case GOYA_ASYNC_EVENT_PKT_QUEUE_OUT_SYNC:
4725 		goya_print_irq_info(hdev, event_type, false);
4726 		goya_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err);
4727 		if (hdev->hard_reset_on_fw_events)
4728 			hl_device_reset(hdev, HL_DRV_RESET_HARD);
4729 		else
4730 			hl_fw_unmask_irq(hdev, event_type);
4731 		break;
4732 
4733 	default:
4734 		dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
4735 				event_type);
4736 		break;
4737 	}
4738 }
4739 
goya_get_events_stat(struct hl_device * hdev,bool aggregate,u32 * size)4740 void *goya_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size)
4741 {
4742 	struct goya_device *goya = hdev->asic_specific;
4743 
4744 	if (aggregate) {
4745 		*size = (u32) sizeof(goya->events_stat_aggregate);
4746 		return goya->events_stat_aggregate;
4747 	}
4748 
4749 	*size = (u32) sizeof(goya->events_stat);
4750 	return goya->events_stat;
4751 }
4752 
goya_memset_device_memory(struct hl_device * hdev,u64 addr,u64 size,u64 val,bool is_dram)4753 static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
4754 				u64 val, bool is_dram)
4755 {
4756 	struct packet_lin_dma *lin_dma_pkt;
4757 	struct hl_cs_job *job;
4758 	u32 cb_size, ctl;
4759 	struct hl_cb *cb;
4760 	int rc, lin_dma_pkts_cnt;
4761 
4762 	lin_dma_pkts_cnt = DIV_ROUND_UP_ULL(size, SZ_2G);
4763 	cb_size = lin_dma_pkts_cnt * sizeof(struct packet_lin_dma) +
4764 						sizeof(struct packet_msg_prot);
4765 	cb = hl_cb_kernel_create(hdev, cb_size, false);
4766 	if (!cb)
4767 		return -ENOMEM;
4768 
4769 	lin_dma_pkt = cb->kernel_address;
4770 
4771 	do {
4772 		memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
4773 
4774 		ctl = ((PACKET_LIN_DMA << GOYA_PKT_CTL_OPCODE_SHIFT) |
4775 				(1 << GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT) |
4776 				(1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) |
4777 				(1 << GOYA_PKT_CTL_RB_SHIFT) |
4778 				(1 << GOYA_PKT_CTL_MB_SHIFT));
4779 		ctl |= (is_dram ? HL_DMA_HOST_TO_DRAM : HL_DMA_HOST_TO_SRAM) <<
4780 				GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
4781 		lin_dma_pkt->ctl = cpu_to_le32(ctl);
4782 
4783 		lin_dma_pkt->src_addr = cpu_to_le64(val);
4784 		lin_dma_pkt->dst_addr = cpu_to_le64(addr);
4785 		if (lin_dma_pkts_cnt > 1)
4786 			lin_dma_pkt->tsize = cpu_to_le32(SZ_2G);
4787 		else
4788 			lin_dma_pkt->tsize = cpu_to_le32(size);
4789 
4790 		size -= SZ_2G;
4791 		addr += SZ_2G;
4792 		lin_dma_pkt++;
4793 	} while (--lin_dma_pkts_cnt);
4794 
4795 	job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
4796 	if (!job) {
4797 		dev_err(hdev->dev, "Failed to allocate a new job\n");
4798 		rc = -ENOMEM;
4799 		goto release_cb;
4800 	}
4801 
4802 	job->id = 0;
4803 	job->user_cb = cb;
4804 	atomic_inc(&job->user_cb->cs_cnt);
4805 	job->user_cb_size = cb_size;
4806 	job->hw_queue_id = GOYA_QUEUE_ID_DMA_0;
4807 	job->patched_cb = job->user_cb;
4808 	job->job_cb_size = job->user_cb_size;
4809 
4810 	hl_debugfs_add_job(hdev, job);
4811 
4812 	rc = goya_send_job_on_qman0(hdev, job);
4813 
4814 	hl_debugfs_remove_job(hdev, job);
4815 	kfree(job);
4816 	atomic_dec(&cb->cs_cnt);
4817 
4818 release_cb:
4819 	hl_cb_put(cb);
4820 	hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
4821 
4822 	return rc;
4823 }
4824 
goya_context_switch(struct hl_device * hdev,u32 asid)4825 int goya_context_switch(struct hl_device *hdev, u32 asid)
4826 {
4827 	struct asic_fixed_properties *prop = &hdev->asic_prop;
4828 	u64 addr = prop->sram_base_address, sob_addr;
4829 	u32 size = hdev->pldm ? 0x10000 : prop->sram_size;
4830 	u64 val = 0x7777777777777777ull;
4831 	int rc, dma_id;
4832 	u32 channel_off = mmDMA_CH_1_WR_COMP_ADDR_LO -
4833 					mmDMA_CH_0_WR_COMP_ADDR_LO;
4834 
4835 	rc = goya_memset_device_memory(hdev, addr, size, val, false);
4836 	if (rc) {
4837 		dev_err(hdev->dev, "Failed to clear SRAM in context switch\n");
4838 		return rc;
4839 	}
4840 
4841 	/* we need to reset registers that the user is allowed to change */
4842 	sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
4843 	WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO, lower_32_bits(sob_addr));
4844 
4845 	for (dma_id = 1 ; dma_id < NUMBER_OF_EXT_HW_QUEUES ; dma_id++) {
4846 		sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
4847 							(dma_id - 1) * 4;
4848 		WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO + channel_off * dma_id,
4849 						lower_32_bits(sob_addr));
4850 	}
4851 
4852 	WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
4853 
4854 	goya_clear_sm_regs(hdev);
4855 
4856 	return 0;
4857 }
4858 
goya_mmu_clear_pgt_range(struct hl_device * hdev)4859 static int goya_mmu_clear_pgt_range(struct hl_device *hdev)
4860 {
4861 	struct asic_fixed_properties *prop = &hdev->asic_prop;
4862 	struct goya_device *goya = hdev->asic_specific;
4863 	u64 addr = prop->mmu_pgt_addr;
4864 	u32 size = prop->mmu_pgt_size + MMU_DRAM_DEFAULT_PAGE_SIZE +
4865 			MMU_CACHE_MNG_SIZE;
4866 
4867 	if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4868 		return 0;
4869 
4870 	return goya_memset_device_memory(hdev, addr, size, 0, true);
4871 }
4872 
goya_mmu_set_dram_default_page(struct hl_device * hdev)4873 static int goya_mmu_set_dram_default_page(struct hl_device *hdev)
4874 {
4875 	struct goya_device *goya = hdev->asic_specific;
4876 	u64 addr = hdev->asic_prop.mmu_dram_default_page_addr;
4877 	u32 size = MMU_DRAM_DEFAULT_PAGE_SIZE;
4878 	u64 val = 0x9999999999999999ull;
4879 
4880 	if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4881 		return 0;
4882 
4883 	return goya_memset_device_memory(hdev, addr, size, val, true);
4884 }
4885 
goya_mmu_add_mappings_for_device_cpu(struct hl_device * hdev)4886 static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev)
4887 {
4888 	struct asic_fixed_properties *prop = &hdev->asic_prop;
4889 	struct goya_device *goya = hdev->asic_specific;
4890 	s64 off, cpu_off;
4891 	int rc;
4892 
4893 	if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4894 		return 0;
4895 
4896 	for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB) {
4897 		rc = hl_mmu_map_page(hdev->kernel_ctx,
4898 			prop->dram_base_address + off,
4899 			prop->dram_base_address + off, PAGE_SIZE_2MB,
4900 			(off + PAGE_SIZE_2MB) == CPU_FW_IMAGE_SIZE);
4901 		if (rc) {
4902 			dev_err(hdev->dev, "Map failed for address 0x%llx\n",
4903 				prop->dram_base_address + off);
4904 			goto unmap;
4905 		}
4906 	}
4907 
4908 	if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
4909 		rc = hl_mmu_map_page(hdev->kernel_ctx,
4910 			VA_CPU_ACCESSIBLE_MEM_ADDR,
4911 			hdev->cpu_accessible_dma_address,
4912 			PAGE_SIZE_2MB, true);
4913 
4914 		if (rc) {
4915 			dev_err(hdev->dev,
4916 				"Map failed for CPU accessible memory\n");
4917 			off -= PAGE_SIZE_2MB;
4918 			goto unmap;
4919 		}
4920 	} else {
4921 		for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB) {
4922 			rc = hl_mmu_map_page(hdev->kernel_ctx,
4923 				VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
4924 				hdev->cpu_accessible_dma_address + cpu_off,
4925 				PAGE_SIZE_4KB, true);
4926 			if (rc) {
4927 				dev_err(hdev->dev,
4928 					"Map failed for CPU accessible memory\n");
4929 				cpu_off -= PAGE_SIZE_4KB;
4930 				goto unmap_cpu;
4931 			}
4932 		}
4933 	}
4934 
4935 	goya_mmu_prepare_reg(hdev, mmCPU_IF_ARUSER_OVR, HL_KERNEL_ASID_ID);
4936 	goya_mmu_prepare_reg(hdev, mmCPU_IF_AWUSER_OVR, HL_KERNEL_ASID_ID);
4937 	WREG32(mmCPU_IF_ARUSER_OVR_EN, 0x7FF);
4938 	WREG32(mmCPU_IF_AWUSER_OVR_EN, 0x7FF);
4939 
4940 	/* Make sure configuration is flushed to device */
4941 	RREG32(mmCPU_IF_AWUSER_OVR_EN);
4942 
4943 	goya->device_cpu_mmu_mappings_done = true;
4944 
4945 	return 0;
4946 
4947 unmap_cpu:
4948 	for (; cpu_off >= 0 ; cpu_off -= PAGE_SIZE_4KB)
4949 		if (hl_mmu_unmap_page(hdev->kernel_ctx,
4950 				VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
4951 				PAGE_SIZE_4KB, true))
4952 			dev_warn_ratelimited(hdev->dev,
4953 				"failed to unmap address 0x%llx\n",
4954 				VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
4955 unmap:
4956 	for (; off >= 0 ; off -= PAGE_SIZE_2MB)
4957 		if (hl_mmu_unmap_page(hdev->kernel_ctx,
4958 				prop->dram_base_address + off, PAGE_SIZE_2MB,
4959 				true))
4960 			dev_warn_ratelimited(hdev->dev,
4961 				"failed to unmap address 0x%llx\n",
4962 				prop->dram_base_address + off);
4963 
4964 	return rc;
4965 }
4966 
goya_mmu_remove_device_cpu_mappings(struct hl_device * hdev)4967 void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev)
4968 {
4969 	struct asic_fixed_properties *prop = &hdev->asic_prop;
4970 	struct goya_device *goya = hdev->asic_specific;
4971 	u32 off, cpu_off;
4972 
4973 	if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4974 		return;
4975 
4976 	if (!goya->device_cpu_mmu_mappings_done)
4977 		return;
4978 
4979 	WREG32(mmCPU_IF_ARUSER_OVR_EN, 0);
4980 	WREG32(mmCPU_IF_AWUSER_OVR_EN, 0);
4981 
4982 	if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
4983 		if (hl_mmu_unmap_page(hdev->kernel_ctx,
4984 				VA_CPU_ACCESSIBLE_MEM_ADDR,
4985 				PAGE_SIZE_2MB, true))
4986 			dev_warn(hdev->dev,
4987 				"Failed to unmap CPU accessible memory\n");
4988 	} else {
4989 		for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB)
4990 			if (hl_mmu_unmap_page(hdev->kernel_ctx,
4991 					VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
4992 					PAGE_SIZE_4KB,
4993 					(cpu_off + PAGE_SIZE_4KB) >= SZ_2M))
4994 				dev_warn_ratelimited(hdev->dev,
4995 					"failed to unmap address 0x%llx\n",
4996 					VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
4997 	}
4998 
4999 	for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB)
5000 		if (hl_mmu_unmap_page(hdev->kernel_ctx,
5001 				prop->dram_base_address + off, PAGE_SIZE_2MB,
5002 				(off + PAGE_SIZE_2MB) >= CPU_FW_IMAGE_SIZE))
5003 			dev_warn_ratelimited(hdev->dev,
5004 					"Failed to unmap address 0x%llx\n",
5005 					prop->dram_base_address + off);
5006 
5007 	goya->device_cpu_mmu_mappings_done = false;
5008 }
5009 
goya_mmu_prepare(struct hl_device * hdev,u32 asid)5010 static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
5011 {
5012 	struct goya_device *goya = hdev->asic_specific;
5013 	int i;
5014 
5015 	if (!(goya->hw_cap_initialized & HW_CAP_MMU))
5016 		return;
5017 
5018 	if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) {
5019 		dev_crit(hdev->dev, "asid %u is too big\n", asid);
5020 		return;
5021 	}
5022 
5023 	/* zero the MMBP and ASID bits and then set the ASID */
5024 	for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++)
5025 		goya_mmu_prepare_reg(hdev, goya_mmu_regs[i], asid);
5026 }
5027 
goya_mmu_invalidate_cache(struct hl_device * hdev,bool is_hard,u32 flags)5028 static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
5029 					u32 flags)
5030 {
5031 	struct goya_device *goya = hdev->asic_specific;
5032 	u32 status, timeout_usec;
5033 	int rc;
5034 
5035 	if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
5036 		hdev->reset_info.hard_reset_pending)
5037 		return 0;
5038 
5039 	/* no need in L1 only invalidation in Goya */
5040 	if (!is_hard)
5041 		return 0;
5042 
5043 	if (hdev->pldm)
5044 		timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
5045 	else
5046 		timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
5047 
5048 	/* L0 & L1 invalidation */
5049 	WREG32(mmSTLB_INV_ALL_START, 1);
5050 
5051 	rc = hl_poll_timeout(
5052 		hdev,
5053 		mmSTLB_INV_ALL_START,
5054 		status,
5055 		!status,
5056 		1000,
5057 		timeout_usec);
5058 
5059 	return rc;
5060 }
5061 
goya_mmu_invalidate_cache_range(struct hl_device * hdev,bool is_hard,u32 flags,u32 asid,u64 va,u64 size)5062 static int goya_mmu_invalidate_cache_range(struct hl_device *hdev,
5063 						bool is_hard, u32 flags,
5064 						u32 asid, u64 va, u64 size)
5065 {
5066 	/* Treat as invalidate all because there is no range invalidation
5067 	 * in Goya
5068 	 */
5069 	return hl_mmu_invalidate_cache(hdev, is_hard, flags);
5070 }
5071 
goya_send_heartbeat(struct hl_device * hdev)5072 int goya_send_heartbeat(struct hl_device *hdev)
5073 {
5074 	struct goya_device *goya = hdev->asic_specific;
5075 
5076 	if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5077 		return 0;
5078 
5079 	return hl_fw_send_heartbeat(hdev);
5080 }
5081 
goya_cpucp_info_get(struct hl_device * hdev)5082 int goya_cpucp_info_get(struct hl_device *hdev)
5083 {
5084 	struct goya_device *goya = hdev->asic_specific;
5085 	struct asic_fixed_properties *prop = &hdev->asic_prop;
5086 	u64 dram_size;
5087 	int rc;
5088 
5089 	if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5090 		return 0;
5091 
5092 	rc = hl_fw_cpucp_handshake(hdev, mmCPU_BOOT_DEV_STS0,
5093 					mmCPU_BOOT_DEV_STS1, mmCPU_BOOT_ERR0,
5094 					mmCPU_BOOT_ERR1);
5095 	if (rc)
5096 		return rc;
5097 
5098 	dram_size = le64_to_cpu(prop->cpucp_info.dram_size);
5099 	if (dram_size) {
5100 		if ((!is_power_of_2(dram_size)) ||
5101 				(dram_size < DRAM_PHYS_DEFAULT_SIZE)) {
5102 			dev_err(hdev->dev,
5103 				"F/W reported invalid DRAM size %llu. Trying to use default size\n",
5104 				dram_size);
5105 			dram_size = DRAM_PHYS_DEFAULT_SIZE;
5106 		}
5107 
5108 		prop->dram_size = dram_size;
5109 		prop->dram_end_address = prop->dram_base_address + dram_size;
5110 	}
5111 
5112 	if (!strlen(prop->cpucp_info.card_name))
5113 		strscpy_pad(prop->cpucp_info.card_name, GOYA_DEFAULT_CARD_NAME,
5114 				CARD_NAME_MAX_LEN);
5115 
5116 	return 0;
5117 }
5118 
goya_is_device_idle(struct hl_device * hdev,u64 * mask_arr,u8 mask_len,struct engines_data * e)5119 static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
5120 				struct engines_data *e)
5121 {
5122 	const char *fmt = "%-5d%-9s%#-14x%#-16x%#x\n";
5123 	const char *dma_fmt = "%-5d%-9s%#-14x%#x\n";
5124 	unsigned long *mask = (unsigned long *)mask_arr;
5125 	u32 qm_glbl_sts0, cmdq_glbl_sts0, dma_core_sts0, tpc_cfg_sts,
5126 		mme_arch_sts;
5127 	bool is_idle = true, is_eng_idle;
5128 	u64 offset;
5129 	int i;
5130 
5131 	if (e)
5132 		hl_engine_data_sprintf(e, "\nDMA  is_idle  QM_GLBL_STS0  DMA_CORE_STS0\n"
5133 					"---  -------  ------------  -------------\n");
5134 
5135 	offset = mmDMA_QM_1_GLBL_STS0 - mmDMA_QM_0_GLBL_STS0;
5136 
5137 	for (i = 0 ; i < DMA_MAX_NUM ; i++) {
5138 		qm_glbl_sts0 = RREG32(mmDMA_QM_0_GLBL_STS0 + i * offset);
5139 		dma_core_sts0 = RREG32(mmDMA_CH_0_STS0 + i * offset);
5140 		is_eng_idle = IS_DMA_QM_IDLE(qm_glbl_sts0) &&
5141 				IS_DMA_IDLE(dma_core_sts0);
5142 		is_idle &= is_eng_idle;
5143 
5144 		if (mask && !is_eng_idle)
5145 			set_bit(GOYA_ENGINE_ID_DMA_0 + i, mask);
5146 		if (e)
5147 			hl_engine_data_sprintf(e, dma_fmt, i, is_eng_idle ? "Y" : "N",
5148 					qm_glbl_sts0, dma_core_sts0);
5149 	}
5150 
5151 	if (e)
5152 		hl_engine_data_sprintf(e,
5153 			"\nTPC  is_idle  QM_GLBL_STS0  CMDQ_GLBL_STS0  CFG_STATUS\n"
5154 			"---  -------  ------------  --------------  ----------\n");
5155 
5156 	offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0;
5157 
5158 	for (i = 0 ; i < TPC_MAX_NUM ; i++) {
5159 		qm_glbl_sts0 = RREG32(mmTPC0_QM_GLBL_STS0 + i * offset);
5160 		cmdq_glbl_sts0 = RREG32(mmTPC0_CMDQ_GLBL_STS0 + i * offset);
5161 		tpc_cfg_sts = RREG32(mmTPC0_CFG_STATUS + i * offset);
5162 		is_eng_idle = IS_TPC_QM_IDLE(qm_glbl_sts0) &&
5163 				IS_TPC_CMDQ_IDLE(cmdq_glbl_sts0) &&
5164 				IS_TPC_IDLE(tpc_cfg_sts);
5165 		is_idle &= is_eng_idle;
5166 
5167 		if (mask && !is_eng_idle)
5168 			set_bit(GOYA_ENGINE_ID_TPC_0 + i, mask);
5169 		if (e)
5170 			hl_engine_data_sprintf(e, fmt, i, is_eng_idle ? "Y" : "N",
5171 				qm_glbl_sts0, cmdq_glbl_sts0, tpc_cfg_sts);
5172 	}
5173 
5174 	if (e)
5175 		hl_engine_data_sprintf(e,
5176 			"\nMME  is_idle  QM_GLBL_STS0  CMDQ_GLBL_STS0  ARCH_STATUS\n"
5177 			"---  -------  ------------  --------------  -----------\n");
5178 
5179 	qm_glbl_sts0 = RREG32(mmMME_QM_GLBL_STS0);
5180 	cmdq_glbl_sts0 = RREG32(mmMME_CMDQ_GLBL_STS0);
5181 	mme_arch_sts = RREG32(mmMME_ARCH_STATUS);
5182 	is_eng_idle = IS_MME_QM_IDLE(qm_glbl_sts0) &&
5183 			IS_MME_CMDQ_IDLE(cmdq_glbl_sts0) &&
5184 			IS_MME_IDLE(mme_arch_sts);
5185 	is_idle &= is_eng_idle;
5186 
5187 	if (mask && !is_eng_idle)
5188 		set_bit(GOYA_ENGINE_ID_MME_0, mask);
5189 	if (e) {
5190 		hl_engine_data_sprintf(e, fmt, 0, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
5191 				cmdq_glbl_sts0, mme_arch_sts);
5192 		hl_engine_data_sprintf(e, "\n");
5193 	}
5194 
5195 	return is_idle;
5196 }
5197 
goya_hw_queues_lock(struct hl_device * hdev)5198 static void goya_hw_queues_lock(struct hl_device *hdev)
5199 	__acquires(&goya->hw_queues_lock)
5200 {
5201 	struct goya_device *goya = hdev->asic_specific;
5202 
5203 	spin_lock(&goya->hw_queues_lock);
5204 }
5205 
goya_hw_queues_unlock(struct hl_device * hdev)5206 static void goya_hw_queues_unlock(struct hl_device *hdev)
5207 	__releases(&goya->hw_queues_lock)
5208 {
5209 	struct goya_device *goya = hdev->asic_specific;
5210 
5211 	spin_unlock(&goya->hw_queues_lock);
5212 }
5213 
goya_get_pci_id(struct hl_device * hdev)5214 static u32 goya_get_pci_id(struct hl_device *hdev)
5215 {
5216 	return hdev->pdev->device;
5217 }
5218 
goya_get_eeprom_data(struct hl_device * hdev,void * data,size_t max_size)5219 static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
5220 				size_t max_size)
5221 {
5222 	struct goya_device *goya = hdev->asic_specific;
5223 
5224 	if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5225 		return 0;
5226 
5227 	return hl_fw_get_eeprom_data(hdev, data, max_size);
5228 }
5229 
goya_cpu_init_scrambler_dram(struct hl_device * hdev)5230 static void goya_cpu_init_scrambler_dram(struct hl_device *hdev)
5231 {
5232 
5233 }
5234 
goya_ctx_init(struct hl_ctx * ctx)5235 static int goya_ctx_init(struct hl_ctx *ctx)
5236 {
5237 	if (ctx->asid != HL_KERNEL_ASID_ID)
5238 		goya_mmu_prepare(ctx->hdev, ctx->asid);
5239 
5240 	return 0;
5241 }
5242 
goya_pre_schedule_cs(struct hl_cs * cs)5243 static int goya_pre_schedule_cs(struct hl_cs *cs)
5244 {
5245 	return 0;
5246 }
5247 
goya_get_queue_id_for_cq(struct hl_device * hdev,u32 cq_idx)5248 u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
5249 {
5250 	return cq_idx;
5251 }
5252 
goya_get_signal_cb_size(struct hl_device * hdev)5253 static u32 goya_get_signal_cb_size(struct hl_device *hdev)
5254 {
5255 	return 0;
5256 }
5257 
goya_get_wait_cb_size(struct hl_device * hdev)5258 static u32 goya_get_wait_cb_size(struct hl_device *hdev)
5259 {
5260 	return 0;
5261 }
5262 
goya_gen_signal_cb(struct hl_device * hdev,void * data,u16 sob_id,u32 size,bool eb)5263 static u32 goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
5264 				u32 size, bool eb)
5265 {
5266 	return 0;
5267 }
5268 
goya_gen_wait_cb(struct hl_device * hdev,struct hl_gen_wait_properties * prop)5269 static u32 goya_gen_wait_cb(struct hl_device *hdev,
5270 		struct hl_gen_wait_properties *prop)
5271 {
5272 	return 0;
5273 }
5274 
goya_reset_sob(struct hl_device * hdev,void * data)5275 static void goya_reset_sob(struct hl_device *hdev, void *data)
5276 {
5277 
5278 }
5279 
goya_reset_sob_group(struct hl_device * hdev,u16 sob_group)5280 static void goya_reset_sob_group(struct hl_device *hdev, u16 sob_group)
5281 {
5282 
5283 }
5284 
goya_get_device_time(struct hl_device * hdev)5285 u64 goya_get_device_time(struct hl_device *hdev)
5286 {
5287 	u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32;
5288 
5289 	return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL);
5290 }
5291 
goya_collective_wait_init_cs(struct hl_cs * cs)5292 static int goya_collective_wait_init_cs(struct hl_cs *cs)
5293 {
5294 	return 0;
5295 }
5296 
goya_collective_wait_create_jobs(struct hl_device * hdev,struct hl_ctx * ctx,struct hl_cs * cs,u32 wait_queue_id,u32 collective_engine_id,u32 encaps_signal_offset)5297 static int goya_collective_wait_create_jobs(struct hl_device *hdev,
5298 		struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id,
5299 		u32 collective_engine_id, u32 encaps_signal_offset)
5300 {
5301 	return -EINVAL;
5302 }
5303 
goya_ctx_fini(struct hl_ctx * ctx)5304 static void goya_ctx_fini(struct hl_ctx *ctx)
5305 {
5306 
5307 }
5308 
goya_get_hw_block_id(struct hl_device * hdev,u64 block_addr,u32 * block_size,u32 * block_id)5309 static int goya_get_hw_block_id(struct hl_device *hdev, u64 block_addr,
5310 			u32 *block_size, u32 *block_id)
5311 {
5312 	return -EPERM;
5313 }
5314 
goya_block_mmap(struct hl_device * hdev,struct vm_area_struct * vma,u32 block_id,u32 block_size)5315 static int goya_block_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
5316 				u32 block_id, u32 block_size)
5317 {
5318 	return -EPERM;
5319 }
5320 
goya_enable_events_from_fw(struct hl_device * hdev)5321 static void goya_enable_events_from_fw(struct hl_device *hdev)
5322 {
5323 	WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
5324 			GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
5325 }
5326 
goya_ack_mmu_page_fault_or_access_error(struct hl_device * hdev,u64 mmu_cap_mask)5327 static int goya_ack_mmu_page_fault_or_access_error(struct hl_device *hdev, u64 mmu_cap_mask)
5328 {
5329 	return -EINVAL;
5330 }
5331 
goya_map_pll_idx_to_fw_idx(u32 pll_idx)5332 static int goya_map_pll_idx_to_fw_idx(u32 pll_idx)
5333 {
5334 	switch (pll_idx) {
5335 	case HL_GOYA_CPU_PLL: return CPU_PLL;
5336 	case HL_GOYA_PCI_PLL: return PCI_PLL;
5337 	case HL_GOYA_MME_PLL: return MME_PLL;
5338 	case HL_GOYA_TPC_PLL: return TPC_PLL;
5339 	case HL_GOYA_IC_PLL: return IC_PLL;
5340 	case HL_GOYA_MC_PLL: return MC_PLL;
5341 	case HL_GOYA_EMMC_PLL: return EMMC_PLL;
5342 	default: return -EINVAL;
5343 	}
5344 }
5345 
goya_gen_sync_to_engine_map(struct hl_device * hdev,struct hl_sync_to_engine_map * map)5346 static int goya_gen_sync_to_engine_map(struct hl_device *hdev,
5347 				struct hl_sync_to_engine_map *map)
5348 {
5349 	/* Not implemented */
5350 	return 0;
5351 }
5352 
goya_monitor_valid(struct hl_mon_state_dump * mon)5353 static int goya_monitor_valid(struct hl_mon_state_dump *mon)
5354 {
5355 	/* Not implemented */
5356 	return 0;
5357 }
5358 
goya_print_single_monitor(char ** buf,size_t * size,size_t * offset,struct hl_device * hdev,struct hl_mon_state_dump * mon)5359 static int goya_print_single_monitor(char **buf, size_t *size, size_t *offset,
5360 				struct hl_device *hdev,
5361 				struct hl_mon_state_dump *mon)
5362 {
5363 	/* Not implemented */
5364 	return 0;
5365 }
5366 
5367 
goya_print_fences_single_engine(struct hl_device * hdev,u64 base_offset,u64 status_base_offset,enum hl_sync_engine_type engine_type,u32 engine_id,char ** buf,size_t * size,size_t * offset)5368 static int goya_print_fences_single_engine(
5369 	struct hl_device *hdev, u64 base_offset, u64 status_base_offset,
5370 	enum hl_sync_engine_type engine_type, u32 engine_id, char **buf,
5371 	size_t *size, size_t *offset)
5372 {
5373 	/* Not implemented */
5374 	return 0;
5375 }
5376 
5377 
5378 static struct hl_state_dump_specs_funcs goya_state_dump_funcs = {
5379 	.monitor_valid = goya_monitor_valid,
5380 	.print_single_monitor = goya_print_single_monitor,
5381 	.gen_sync_to_engine_map = goya_gen_sync_to_engine_map,
5382 	.print_fences_single_engine = goya_print_fences_single_engine,
5383 };
5384 
goya_state_dump_init(struct hl_device * hdev)5385 static void goya_state_dump_init(struct hl_device *hdev)
5386 {
5387 	/* Not implemented */
5388 	hdev->state_dump_specs.props = goya_state_dump_specs_props;
5389 	hdev->state_dump_specs.funcs = goya_state_dump_funcs;
5390 }
5391 
goya_get_sob_addr(struct hl_device * hdev,u32 sob_id)5392 static u32 goya_get_sob_addr(struct hl_device *hdev, u32 sob_id)
5393 {
5394 	return 0;
5395 }
5396 
goya_get_stream_master_qid_arr(void)5397 static u32 *goya_get_stream_master_qid_arr(void)
5398 {
5399 	return NULL;
5400 }
5401 
goya_get_monitor_dump(struct hl_device * hdev,void * data)5402 static int goya_get_monitor_dump(struct hl_device *hdev, void *data)
5403 {
5404 	return -EOPNOTSUPP;
5405 }
5406 
goya_check_if_razwi_happened(struct hl_device * hdev)5407 static void goya_check_if_razwi_happened(struct hl_device *hdev)
5408 {
5409 }
5410 
goya_scrub_device_dram(struct hl_device * hdev,u64 val)5411 static int goya_scrub_device_dram(struct hl_device *hdev, u64 val)
5412 {
5413 	return -EOPNOTSUPP;
5414 }
5415 
goya_set_dram_properties(struct hl_device * hdev)5416 static int goya_set_dram_properties(struct hl_device *hdev)
5417 {
5418 	return 0;
5419 }
5420 
goya_set_binning_masks(struct hl_device * hdev)5421 static int goya_set_binning_masks(struct hl_device *hdev)
5422 {
5423 	return 0;
5424 }
5425 
goya_send_device_activity(struct hl_device * hdev,bool open)5426 static int goya_send_device_activity(struct hl_device *hdev, bool open)
5427 {
5428 	return 0;
5429 }
5430 
5431 static const struct hl_asic_funcs goya_funcs = {
5432 	.early_init = goya_early_init,
5433 	.early_fini = goya_early_fini,
5434 	.late_init = goya_late_init,
5435 	.late_fini = goya_late_fini,
5436 	.sw_init = goya_sw_init,
5437 	.sw_fini = goya_sw_fini,
5438 	.hw_init = goya_hw_init,
5439 	.hw_fini = goya_hw_fini,
5440 	.halt_engines = goya_halt_engines,
5441 	.suspend = goya_suspend,
5442 	.resume = goya_resume,
5443 	.mmap = goya_mmap,
5444 	.ring_doorbell = goya_ring_doorbell,
5445 	.pqe_write = goya_pqe_write,
5446 	.asic_dma_alloc_coherent = goya_dma_alloc_coherent,
5447 	.asic_dma_free_coherent = goya_dma_free_coherent,
5448 	.scrub_device_mem = goya_scrub_device_mem,
5449 	.scrub_device_dram = goya_scrub_device_dram,
5450 	.get_int_queue_base = goya_get_int_queue_base,
5451 	.test_queues = goya_test_queues,
5452 	.asic_dma_pool_zalloc = goya_dma_pool_zalloc,
5453 	.asic_dma_pool_free = goya_dma_pool_free,
5454 	.cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc,
5455 	.cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free,
5456 	.dma_unmap_sgtable = hl_asic_dma_unmap_sgtable,
5457 	.cs_parser = goya_cs_parser,
5458 	.dma_map_sgtable = hl_asic_dma_map_sgtable,
5459 	.add_end_of_cb_packets = goya_add_end_of_cb_packets,
5460 	.update_eq_ci = goya_update_eq_ci,
5461 	.context_switch = goya_context_switch,
5462 	.restore_phase_topology = goya_restore_phase_topology,
5463 	.debugfs_read_dma = goya_debugfs_read_dma,
5464 	.add_device_attr = goya_add_device_attr,
5465 	.handle_eqe = goya_handle_eqe,
5466 	.get_events_stat = goya_get_events_stat,
5467 	.read_pte = goya_read_pte,
5468 	.write_pte = goya_write_pte,
5469 	.mmu_invalidate_cache = goya_mmu_invalidate_cache,
5470 	.mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
5471 	.mmu_prefetch_cache_range = NULL,
5472 	.send_heartbeat = goya_send_heartbeat,
5473 	.debug_coresight = goya_debug_coresight,
5474 	.is_device_idle = goya_is_device_idle,
5475 	.compute_reset_late_init = goya_compute_reset_late_init,
5476 	.hw_queues_lock = goya_hw_queues_lock,
5477 	.hw_queues_unlock = goya_hw_queues_unlock,
5478 	.get_pci_id = goya_get_pci_id,
5479 	.get_eeprom_data = goya_get_eeprom_data,
5480 	.get_monitor_dump = goya_get_monitor_dump,
5481 	.send_cpu_message = goya_send_cpu_message,
5482 	.pci_bars_map = goya_pci_bars_map,
5483 	.init_iatu = goya_init_iatu,
5484 	.rreg = hl_rreg,
5485 	.wreg = hl_wreg,
5486 	.halt_coresight = goya_halt_coresight,
5487 	.ctx_init = goya_ctx_init,
5488 	.ctx_fini = goya_ctx_fini,
5489 	.pre_schedule_cs = goya_pre_schedule_cs,
5490 	.get_queue_id_for_cq = goya_get_queue_id_for_cq,
5491 	.load_firmware_to_device = goya_load_firmware_to_device,
5492 	.load_boot_fit_to_device = goya_load_boot_fit_to_device,
5493 	.get_signal_cb_size = goya_get_signal_cb_size,
5494 	.get_wait_cb_size = goya_get_wait_cb_size,
5495 	.gen_signal_cb = goya_gen_signal_cb,
5496 	.gen_wait_cb = goya_gen_wait_cb,
5497 	.reset_sob = goya_reset_sob,
5498 	.reset_sob_group = goya_reset_sob_group,
5499 	.get_device_time = goya_get_device_time,
5500 	.pb_print_security_errors = NULL,
5501 	.collective_wait_init_cs = goya_collective_wait_init_cs,
5502 	.collective_wait_create_jobs = goya_collective_wait_create_jobs,
5503 	.get_dec_base_addr = NULL,
5504 	.scramble_addr = hl_mmu_scramble_addr,
5505 	.descramble_addr = hl_mmu_descramble_addr,
5506 	.ack_protection_bits_errors = goya_ack_protection_bits_errors,
5507 	.get_hw_block_id = goya_get_hw_block_id,
5508 	.hw_block_mmap = goya_block_mmap,
5509 	.enable_events_from_fw = goya_enable_events_from_fw,
5510 	.ack_mmu_errors = goya_ack_mmu_page_fault_or_access_error,
5511 	.map_pll_idx_to_fw_idx = goya_map_pll_idx_to_fw_idx,
5512 	.init_firmware_preload_params = goya_init_firmware_preload_params,
5513 	.init_firmware_loader = goya_init_firmware_loader,
5514 	.init_cpu_scrambler_dram = goya_cpu_init_scrambler_dram,
5515 	.state_dump_init = goya_state_dump_init,
5516 	.get_sob_addr = &goya_get_sob_addr,
5517 	.set_pci_memory_regions = goya_set_pci_memory_regions,
5518 	.get_stream_master_qid_arr = goya_get_stream_master_qid_arr,
5519 	.check_if_razwi_happened = goya_check_if_razwi_happened,
5520 	.mmu_get_real_page_size = hl_mmu_get_real_page_size,
5521 	.access_dev_mem = hl_access_dev_mem,
5522 	.set_dram_bar_base = goya_set_ddr_bar_base,
5523 	.send_device_activity = goya_send_device_activity,
5524 	.set_dram_properties = goya_set_dram_properties,
5525 	.set_binning_masks = goya_set_binning_masks,
5526 };
5527 
5528 /*
5529  * goya_set_asic_funcs - set Goya function pointers
5530  *
5531  * @*hdev: pointer to hl_device structure
5532  *
5533  */
goya_set_asic_funcs(struct hl_device * hdev)5534 void goya_set_asic_funcs(struct hl_device *hdev)
5535 {
5536 	hdev->asic_funcs = &goya_funcs;
5537 }
5538