xref: /linux/drivers/gpu/drm/imagination/pvr_fw_startstop.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_device.h"
5 #include "pvr_fw.h"
6 #include "pvr_fw_meta.h"
7 #include "pvr_fw_startstop.h"
8 #include "pvr_rogue_cr_defs.h"
9 #include "pvr_rogue_meta.h"
10 #include "pvr_vm.h"
11 
12 #include <linux/compiler.h>
13 #include <linux/delay.h>
14 #include <linux/ktime.h>
15 #include <linux/types.h>
16 
17 #define POLL_TIMEOUT_USEC 1000000
18 
19 static void
20 rogue_axi_ace_list_init(struct pvr_device *pvr_dev)
21 {
22 	/* Setup AXI-ACE config. Set everything to outer cache. */
23 	u64 reg_val =
24 		(3U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT) |
25 		(3U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT) |
26 		(2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT) |
27 		(2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT) |
28 		(2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT) |
29 		(2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT) |
30 		(2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT) |
31 		(2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT);
32 
33 	pvr_cr_write64(pvr_dev, ROGUE_CR_AXI_ACE_LITE_CONFIGURATION, reg_val);
34 }
35 
36 static void
37 rogue_bif_init(struct pvr_device *pvr_dev)
38 {
39 	dma_addr_t pc_dma_addr;
40 	u64 pc_addr;
41 
42 	/* Acquire the address of the Kernel Page Catalogue. */
43 	pc_dma_addr = pvr_vm_get_page_table_root_addr(pvr_dev->kernel_vm_ctx);
44 
45 	/* Write the kernel catalogue base. */
46 	pc_addr = ((((u64)pc_dma_addr >> ROGUE_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT)
47 		    << ROGUE_CR_BIF_CAT_BASE0_ADDR_SHIFT) &
48 		   ~ROGUE_CR_BIF_CAT_BASE0_ADDR_CLRMSK);
49 
50 	pvr_cr_write64(pvr_dev, BIF_CAT_BASEX(MMU_CONTEXT_MAPPING_FWPRIV),
51 		       pc_addr);
52 }
53 
54 static int
55 rogue_slc_init(struct pvr_device *pvr_dev)
56 {
57 	u16 slc_cache_line_size_bits;
58 	u32 reg_val;
59 	int err;
60 
61 	/*
62 	 * SLC Misc control.
63 	 *
64 	 * Note: This is a 64bit register and we set only the lower 32bits
65 	 *       leaving the top 32bits (ROGUE_CR_SLC_CTRL_MISC_SCRAMBLE_BITS)
66 	 *       unchanged from the HW default.
67 	 */
68 	reg_val = (pvr_cr_read32(pvr_dev, ROGUE_CR_SLC_CTRL_MISC) &
69 		      ROGUE_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN) |
70 		     ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1;
71 
72 	err = PVR_FEATURE_VALUE(pvr_dev, slc_cache_line_size_bits, &slc_cache_line_size_bits);
73 	if (err)
74 		return err;
75 
76 	/* Bypass burst combiner if SLC line size is smaller than 1024 bits. */
77 	if (slc_cache_line_size_bits < 1024)
78 		reg_val |= ROGUE_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN;
79 
80 	if (PVR_HAS_QUIRK(pvr_dev, 71242) && !PVR_HAS_FEATURE(pvr_dev, gpu_multicore_support))
81 		reg_val |= ROGUE_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_EN;
82 
83 	pvr_cr_write32(pvr_dev, ROGUE_CR_SLC_CTRL_MISC, reg_val);
84 
85 	return 0;
86 }
87 
88 /**
89  * pvr_fw_start() - Start FW processor and boot firmware
90  * @pvr_dev: Target PowerVR device.
91  *
92  * Returns:
93  *  * 0 on success, or
94  *  * Any error returned by rogue_slc_init().
95  */
96 int
97 pvr_fw_start(struct pvr_device *pvr_dev)
98 {
99 	bool has_reset2 = PVR_HAS_FEATURE(pvr_dev, xe_tpu2);
100 	u64 soft_reset_mask;
101 	int err;
102 
103 	if (PVR_HAS_FEATURE(pvr_dev, pbe2_in_xe))
104 		soft_reset_mask = ROGUE_CR_SOFT_RESET__PBE2_XE__MASKFULL;
105 	else
106 		soft_reset_mask = ROGUE_CR_SOFT_RESET_MASKFULL;
107 
108 	if (PVR_HAS_FEATURE(pvr_dev, sys_bus_secure_reset)) {
109 		/*
110 		 * Disable the default sys_bus_secure protection to perform
111 		 * minimal setup.
112 		 */
113 		pvr_cr_write32(pvr_dev, ROGUE_CR_SYS_BUS_SECURE, 0);
114 		(void)pvr_cr_read32(pvr_dev, ROGUE_CR_SYS_BUS_SECURE); /* Fence write */
115 	}
116 
117 	/* Set Rogue in soft-reset. */
118 	pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, soft_reset_mask);
119 	if (has_reset2)
120 		pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET2, ROGUE_CR_SOFT_RESET2_MASKFULL);
121 
122 	/* Read soft-reset to fence previous write in order to clear the SOCIF pipeline. */
123 	(void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET);
124 	if (has_reset2)
125 		(void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET2);
126 
127 	/* Take Rascal and Dust out of reset. */
128 	pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET,
129 		       soft_reset_mask ^ ROGUE_CR_SOFT_RESET_RASCALDUSTS_EN);
130 	if (has_reset2)
131 		pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET2, 0);
132 
133 	(void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET);
134 	if (has_reset2)
135 		(void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET2);
136 
137 	/* Take everything out of reset but the FW processor. */
138 	pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, ROGUE_CR_SOFT_RESET_GARTEN_EN);
139 	if (has_reset2)
140 		pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET2, 0);
141 
142 	(void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET);
143 	if (has_reset2)
144 		(void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET2);
145 
146 	err = rogue_slc_init(pvr_dev);
147 	if (err)
148 		goto err_reset;
149 
150 	/* Initialise Firmware wrapper. */
151 	pvr_dev->fw_dev.defs->wrapper_init(pvr_dev);
152 
153 	/* We must init the AXI-ACE interface before first BIF transaction. */
154 	rogue_axi_ace_list_init(pvr_dev);
155 
156 	if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
157 		/* Initialise BIF. */
158 		rogue_bif_init(pvr_dev);
159 	}
160 
161 	/* Need to wait for at least 16 cycles before taking the FW processor out of reset ... */
162 	udelay(3);
163 
164 	pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, 0x0);
165 	(void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET);
166 
167 	/* ... and afterwards. */
168 	udelay(3);
169 
170 	return 0;
171 
172 err_reset:
173 	/* Put everything back into soft-reset. */
174 	pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, soft_reset_mask);
175 
176 	return err;
177 }
178 
179 /**
180  * pvr_fw_stop() - Stop FW processor
181  * @pvr_dev: Target PowerVR device.
182  *
183  * Returns:
184  *  * 0 on success, or
185  *  * Any error returned by pvr_cr_poll_reg32().
186  */
187 int
188 pvr_fw_stop(struct pvr_device *pvr_dev)
189 {
190 	const u32 sidekick_idle_mask = ROGUE_CR_SIDEKICK_IDLE_MASKFULL &
191 				       ~(ROGUE_CR_SIDEKICK_IDLE_GARTEN_EN |
192 					 ROGUE_CR_SIDEKICK_IDLE_SOCIF_EN |
193 					 ROGUE_CR_SIDEKICK_IDLE_HOSTIF_EN);
194 	bool skip_garten_idle = false;
195 	u32 reg_value;
196 	int err;
197 
198 	/*
199 	 * Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper.
200 	 * For cores with the LAYOUT_MARS feature, SIDEKICK would have been
201 	 * powered down by the FW.
202 	 */
203 	err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_SIDEKICK_IDLE, sidekick_idle_mask,
204 				sidekick_idle_mask, POLL_TIMEOUT_USEC);
205 	if (err)
206 		return err;
207 
208 	/* Unset MTS DM association with threads. */
209 	pvr_cr_write32(pvr_dev, ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC,
210 		       ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL &
211 		       ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK);
212 	pvr_cr_write32(pvr_dev, ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC,
213 		       ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL &
214 		       ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK);
215 	pvr_cr_write32(pvr_dev, ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC,
216 		       ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL &
217 		       ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK);
218 	pvr_cr_write32(pvr_dev, ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC,
219 		       ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL &
220 		       ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK);
221 
222 	/* Extra Idle checks. */
223 	err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_BIF_STATUS_MMU, 0,
224 				ROGUE_CR_BIF_STATUS_MMU_MASKFULL,
225 				POLL_TIMEOUT_USEC);
226 	if (err)
227 		return err;
228 
229 	err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_BIFPM_STATUS_MMU, 0,
230 				ROGUE_CR_BIFPM_STATUS_MMU_MASKFULL,
231 				POLL_TIMEOUT_USEC);
232 	if (err)
233 		return err;
234 
235 	if (!PVR_HAS_FEATURE(pvr_dev, xt_top_infrastructure)) {
236 		err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_BIF_READS_EXT_STATUS, 0,
237 					ROGUE_CR_BIF_READS_EXT_STATUS_MASKFULL,
238 					POLL_TIMEOUT_USEC);
239 		if (err)
240 			return err;
241 	}
242 
243 	err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_BIFPM_READS_EXT_STATUS, 0,
244 				ROGUE_CR_BIFPM_READS_EXT_STATUS_MASKFULL,
245 				POLL_TIMEOUT_USEC);
246 	if (err)
247 		return err;
248 
249 	err = pvr_cr_poll_reg64(pvr_dev, ROGUE_CR_SLC_STATUS1, 0,
250 				ROGUE_CR_SLC_STATUS1_MASKFULL,
251 				POLL_TIMEOUT_USEC);
252 	if (err)
253 		return err;
254 
255 	/*
256 	 * Wait for SLC to signal IDLE.
257 	 * For cores with the LAYOUT_MARS feature, SLC would have been powered
258 	 * down by the FW.
259 	 */
260 	err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_SLC_IDLE,
261 				ROGUE_CR_SLC_IDLE_MASKFULL,
262 				ROGUE_CR_SLC_IDLE_MASKFULL, POLL_TIMEOUT_USEC);
263 	if (err)
264 		return err;
265 
266 	/*
267 	 * Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper.
268 	 * For cores with the LAYOUT_MARS feature, SIDEKICK would have been powered
269 	 * down by the FW.
270 	 */
271 	err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_SIDEKICK_IDLE, sidekick_idle_mask,
272 				sidekick_idle_mask, POLL_TIMEOUT_USEC);
273 	if (err)
274 		return err;
275 
276 	if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_META) {
277 		err = pvr_meta_cr_read32(pvr_dev, META_CR_TxVECINT_BHALT, &reg_value);
278 		if (err)
279 			return err;
280 
281 		/*
282 		 * Wait for Sidekick/Jones to signal IDLE including the Garten
283 		 * Wrapper if there is no debugger attached (TxVECINT_BHALT =
284 		 * 0x0).
285 		 */
286 		if (reg_value)
287 			skip_garten_idle = true;
288 	}
289 
290 	if (!skip_garten_idle) {
291 		err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_SIDEKICK_IDLE,
292 					ROGUE_CR_SIDEKICK_IDLE_GARTEN_EN,
293 					ROGUE_CR_SIDEKICK_IDLE_GARTEN_EN,
294 					POLL_TIMEOUT_USEC);
295 		if (err)
296 			return err;
297 	}
298 
299 	if (PVR_HAS_FEATURE(pvr_dev, pbe2_in_xe))
300 		pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET,
301 			       ROGUE_CR_SOFT_RESET__PBE2_XE__MASKFULL);
302 	else
303 		pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, ROGUE_CR_SOFT_RESET_MASKFULL);
304 
305 	return 0;
306 }
307