xref: /linux/drivers/gpu/drm/imagination/pvr_fw_meta.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_device.h"
5 #include "pvr_fw.h"
6 #include "pvr_fw_info.h"
7 #include "pvr_fw_meta.h"
8 #include "pvr_gem.h"
9 #include "pvr_rogue_cr_defs.h"
10 #include "pvr_rogue_meta.h"
11 #include "pvr_vm.h"
12 
13 #include <linux/compiler.h>
14 #include <linux/delay.h>
15 #include <linux/firmware.h>
16 #include <linux/ktime.h>
17 #include <linux/types.h>
18 
19 #define ROGUE_FW_HEAP_META_SHIFT 25 /* 32 MB */
20 
21 #define POLL_TIMEOUT_USEC 1000000
22 
23 /**
24  * pvr_meta_cr_read32() - Read a META register via the Slave Port
25  * @pvr_dev: Device pointer.
26  * @reg_addr: Address of register to read.
27  * @reg_value_out: Pointer to location to store register value.
28  *
29  * Returns:
30  *  * 0 on success, or
31  *  * Any error returned by pvr_cr_poll_reg32().
32  */
33 int
pvr_meta_cr_read32(struct pvr_device * pvr_dev,u32 reg_addr,u32 * reg_value_out)34 pvr_meta_cr_read32(struct pvr_device *pvr_dev, u32 reg_addr, u32 *reg_value_out)
35 {
36 	int err;
37 
38 	/* Wait for Slave Port to be Ready. */
39 	err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_META_SP_MSLVCTRL1,
40 				ROGUE_CR_META_SP_MSLVCTRL1_READY_EN |
41 					ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
42 				ROGUE_CR_META_SP_MSLVCTRL1_READY_EN |
43 					ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
44 				POLL_TIMEOUT_USEC);
45 	if (err)
46 		return err;
47 
48 	/* Issue a Read. */
49 	pvr_cr_write32(pvr_dev, ROGUE_CR_META_SP_MSLVCTRL0,
50 		       reg_addr | ROGUE_CR_META_SP_MSLVCTRL0_RD_EN);
51 	(void)pvr_cr_read32(pvr_dev, ROGUE_CR_META_SP_MSLVCTRL0); /* Fence write. */
52 
53 	/* Wait for Slave Port to be Ready. */
54 	err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_META_SP_MSLVCTRL1,
55 				ROGUE_CR_META_SP_MSLVCTRL1_READY_EN |
56 					ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
57 				ROGUE_CR_META_SP_MSLVCTRL1_READY_EN |
58 					ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
59 				POLL_TIMEOUT_USEC);
60 	if (err)
61 		return err;
62 
63 	*reg_value_out = pvr_cr_read32(pvr_dev, ROGUE_CR_META_SP_MSLVDATAX);
64 
65 	return 0;
66 }
67 
68 static int
pvr_meta_wrapper_init(struct pvr_device * pvr_dev)69 pvr_meta_wrapper_init(struct pvr_device *pvr_dev)
70 {
71 	u64 garten_config;
72 
73 	/* Configure META to Master boot. */
74 	pvr_cr_write64(pvr_dev, ROGUE_CR_META_BOOT, ROGUE_CR_META_BOOT_MODE_EN);
75 
76 	/* Set Garten IDLE to META idle and Set the Garten Wrapper BIF Fence address. */
77 
78 	/* Garten IDLE bit controlled by META. */
79 	garten_config = ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META;
80 
81 	/* The fence addr is set during the fw init sequence. */
82 
83 	/* Set PC = 0 for fences. */
84 	garten_config &=
85 		ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK;
86 	garten_config |=
87 		(u64)MMU_CONTEXT_MAPPING_FWPRIV
88 		<< ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT;
89 
90 	/* Set SLC DM=META. */
91 	garten_config |= ((u64)ROGUE_FW_SEGMMU_META_BIFDM_ID)
92 			 << ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT;
93 
94 	pvr_cr_write64(pvr_dev, ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG, garten_config);
95 
96 	return 0;
97 }
98 
99 static __always_inline void
add_boot_arg(u32 ** boot_conf,u32 param,u32 data)100 add_boot_arg(u32 **boot_conf, u32 param, u32 data)
101 {
102 	*(*boot_conf)++ = param;
103 	*(*boot_conf)++ = data;
104 }
105 
106 static int
meta_ldr_cmd_loadmem(struct drm_device * drm_dev,const u8 * fw,struct rogue_meta_ldr_l1_data_blk * l1_data,u32 coremem_size,u8 * fw_code_ptr,u8 * fw_data_ptr,u8 * fw_core_code_ptr,u8 * fw_core_data_ptr,const u32 fw_size)107 meta_ldr_cmd_loadmem(struct drm_device *drm_dev, const u8 *fw,
108 		     struct rogue_meta_ldr_l1_data_blk *l1_data, u32 coremem_size, u8 *fw_code_ptr,
109 		     u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr, const u32 fw_size)
110 {
111 	struct rogue_meta_ldr_l2_data_blk *l2_block =
112 		(struct rogue_meta_ldr_l2_data_blk *)(fw +
113 						      l1_data->cmd_data[1]);
114 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
115 	u32 offset = l1_data->cmd_data[0];
116 	u32 data_size;
117 	void *write_addr;
118 	int err;
119 
120 	/* Verify header is within bounds. */
121 	if (((u8 *)l2_block - fw) >= fw_size || ((u8 *)(l2_block + 1) - fw) >= fw_size)
122 		return -EINVAL;
123 
124 	data_size = l2_block->length - 6 /* L2 Tag length and checksum */;
125 
126 	/* Verify data is within bounds. */
127 	if (((u8 *)l2_block->block_data - fw) >= fw_size ||
128 	    ((((u8 *)l2_block->block_data) + data_size) - fw) >= fw_size)
129 		return -EINVAL;
130 
131 	if (!ROGUE_META_IS_COREMEM_CODE(offset, coremem_size) &&
132 	    !ROGUE_META_IS_COREMEM_DATA(offset, coremem_size)) {
133 		/* Global range is aliased to local range */
134 		offset &= ~META_MEM_GLOBAL_RANGE_BIT;
135 	}
136 
137 	err = pvr_fw_find_mmu_segment(pvr_dev, offset, data_size, fw_code_ptr, fw_data_ptr,
138 				      fw_core_code_ptr, fw_core_data_ptr, &write_addr);
139 	if (err) {
140 		drm_err(drm_dev,
141 			"Addr 0x%x (size: %d) not found in any firmware segment",
142 			offset, data_size);
143 		return err;
144 	}
145 
146 	memcpy(write_addr, l2_block->block_data, data_size);
147 
148 	return 0;
149 }
150 
151 static int
meta_ldr_cmd_zeromem(struct drm_device * drm_dev,struct rogue_meta_ldr_l1_data_blk * l1_data,u32 coremem_size,u8 * fw_code_ptr,u8 * fw_data_ptr,u8 * fw_core_code_ptr,u8 * fw_core_data_ptr)152 meta_ldr_cmd_zeromem(struct drm_device *drm_dev,
153 		     struct rogue_meta_ldr_l1_data_blk *l1_data, u32 coremem_size,
154 		     u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr)
155 {
156 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
157 	u32 offset = l1_data->cmd_data[0];
158 	u32 byte_count = l1_data->cmd_data[1];
159 	void *write_addr;
160 	int err;
161 
162 	if (ROGUE_META_IS_COREMEM_DATA(offset, coremem_size)) {
163 		/* cannot zero coremem directly */
164 		return 0;
165 	}
166 
167 	/* Global range is aliased to local range */
168 	offset &= ~META_MEM_GLOBAL_RANGE_BIT;
169 
170 	err = pvr_fw_find_mmu_segment(pvr_dev, offset, byte_count, fw_code_ptr, fw_data_ptr,
171 				      fw_core_code_ptr, fw_core_data_ptr, &write_addr);
172 	if (err) {
173 		drm_err(drm_dev,
174 			"Addr 0x%x (size: %d) not found in any firmware segment",
175 			offset, byte_count);
176 		return err;
177 	}
178 
179 	memset(write_addr, 0, byte_count);
180 
181 	return 0;
182 }
183 
184 static int
meta_ldr_cmd_config(struct drm_device * drm_dev,const u8 * fw,struct rogue_meta_ldr_l1_data_blk * l1_data,const u32 fw_size,u32 ** boot_conf_ptr)185 meta_ldr_cmd_config(struct drm_device *drm_dev, const u8 *fw,
186 		    struct rogue_meta_ldr_l1_data_blk *l1_data,
187 		    const u32 fw_size, u32 **boot_conf_ptr)
188 {
189 	struct rogue_meta_ldr_l2_data_blk *l2_block =
190 		(struct rogue_meta_ldr_l2_data_blk *)(fw +
191 						      l1_data->cmd_data[0]);
192 	struct rogue_meta_ldr_cfg_blk *config_command;
193 	u32 l2_block_size;
194 	u32 curr_block_size = 0;
195 	u32 *boot_conf = boot_conf_ptr ? *boot_conf_ptr : NULL;
196 
197 	/* Verify block header is within bounds. */
198 	if (((u8 *)l2_block - fw) >= fw_size || ((u8 *)(l2_block + 1) - fw) >= fw_size)
199 		return -EINVAL;
200 
201 	l2_block_size = l2_block->length - 6 /* L2 Tag length and checksum */;
202 	config_command = (struct rogue_meta_ldr_cfg_blk *)l2_block->block_data;
203 
204 	if (((u8 *)config_command - fw) >= fw_size ||
205 	    ((((u8 *)config_command) + l2_block_size) - fw) >= fw_size)
206 		return -EINVAL;
207 
208 	while (l2_block_size >= 12) {
209 		if (config_command->type != ROGUE_META_LDR_CFG_WRITE)
210 			return -EINVAL;
211 
212 		/*
213 		 * Only write to bootloader if we got a valid pointer to the FW
214 		 * code allocation.
215 		 */
216 		if (boot_conf) {
217 			u32 register_offset = config_command->block_data[0];
218 			u32 register_value = config_command->block_data[1];
219 
220 			/* Do register write */
221 			add_boot_arg(&boot_conf, register_offset,
222 				     register_value);
223 		}
224 
225 		curr_block_size = 12;
226 		l2_block_size -= curr_block_size;
227 		config_command = (struct rogue_meta_ldr_cfg_blk
228 					  *)((uintptr_t)config_command +
229 					     curr_block_size);
230 	}
231 
232 	if (boot_conf_ptr)
233 		*boot_conf_ptr = boot_conf;
234 
235 	return 0;
236 }
237 
238 /**
239  * process_ldr_command_stream() - Process LDR firmware image and populate
240  *                                firmware sections
241  * @pvr_dev: Device pointer.
242  * @fw: Pointer to firmware image.
243  * @fw_code_ptr: Pointer to FW code section.
244  * @fw_data_ptr: Pointer to FW data section.
245  * @fw_core_code_ptr: Pointer to FW coremem code section.
246  * @fw_core_data_ptr: Pointer to FW coremem data section.
247  * @boot_conf_ptr: Pointer to boot config argument pointer.
248  *
249  * Returns :
250  *  * 0 on success, or
251  *  * -EINVAL on any error in LDR command stream.
252  */
253 static int
process_ldr_command_stream(struct pvr_device * pvr_dev,const u8 * fw,u8 * fw_code_ptr,u8 * fw_data_ptr,u8 * fw_core_code_ptr,u8 * fw_core_data_ptr,u32 ** boot_conf_ptr)254 process_ldr_command_stream(struct pvr_device *pvr_dev, const u8 *fw, u8 *fw_code_ptr,
255 			   u8 *fw_data_ptr, u8 *fw_core_code_ptr,
256 			   u8 *fw_core_data_ptr, u32 **boot_conf_ptr)
257 {
258 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
259 	struct rogue_meta_ldr_block_hdr *ldr_header =
260 		(struct rogue_meta_ldr_block_hdr *)fw;
261 	struct rogue_meta_ldr_l1_data_blk *l1_data =
262 		(struct rogue_meta_ldr_l1_data_blk *)(fw + ldr_header->sl_data);
263 	const u32 fw_size = pvr_dev->fw_dev.firmware->size;
264 	int err;
265 
266 	u32 *boot_conf = boot_conf_ptr ? *boot_conf_ptr : NULL;
267 	u32 coremem_size;
268 
269 	err = PVR_FEATURE_VALUE(pvr_dev, meta_coremem_size, &coremem_size);
270 	if (err)
271 		return err;
272 
273 	coremem_size *= SZ_1K;
274 
275 	while (l1_data) {
276 		/* Verify block header is within bounds. */
277 		if (((u8 *)l1_data - fw) >= fw_size || ((u8 *)(l1_data + 1) - fw) >= fw_size)
278 			return -EINVAL;
279 
280 		if (ROGUE_META_LDR_BLK_IS_COMMENT(l1_data->cmd)) {
281 			/* Don't process comment blocks */
282 			goto next_block;
283 		}
284 
285 		switch (l1_data->cmd & ROGUE_META_LDR_CMD_MASK)
286 		case ROGUE_META_LDR_CMD_LOADMEM: {
287 			err = meta_ldr_cmd_loadmem(drm_dev, fw, l1_data,
288 						   coremem_size,
289 						   fw_code_ptr, fw_data_ptr,
290 						   fw_core_code_ptr,
291 						   fw_core_data_ptr, fw_size);
292 			if (err)
293 				return err;
294 			break;
295 
296 		case ROGUE_META_LDR_CMD_START_THREADS:
297 			/* Don't process this block */
298 			break;
299 
300 		case ROGUE_META_LDR_CMD_ZEROMEM:
301 			err = meta_ldr_cmd_zeromem(drm_dev, l1_data,
302 						   coremem_size,
303 						   fw_code_ptr, fw_data_ptr,
304 						   fw_core_code_ptr,
305 						   fw_core_data_ptr);
306 			if (err)
307 				return err;
308 			break;
309 
310 		case ROGUE_META_LDR_CMD_CONFIG:
311 			err = meta_ldr_cmd_config(drm_dev, fw, l1_data, fw_size,
312 						  &boot_conf);
313 			if (err)
314 				return err;
315 			break;
316 
317 		default:
318 			return -EINVAL;
319 		}
320 
321 next_block:
322 		if (l1_data->next == 0xFFFFFFFF)
323 			break;
324 
325 		l1_data = (struct rogue_meta_ldr_l1_data_blk *)(fw +
326 								l1_data->next);
327 	}
328 
329 	if (boot_conf_ptr)
330 		*boot_conf_ptr = boot_conf;
331 
332 	return 0;
333 }
334 
335 static void
configure_seg_id(u64 seg_out_addr,u32 seg_base,u32 seg_limit,u32 seg_id,u32 ** boot_conf_ptr)336 configure_seg_id(u64 seg_out_addr, u32 seg_base, u32 seg_limit, u32 seg_id,
337 		 u32 **boot_conf_ptr)
338 {
339 	u32 seg_out_addr0 = seg_out_addr & 0x00000000FFFFFFFFUL;
340 	u32 seg_out_addr1 = (seg_out_addr >> 32) & 0x00000000FFFFFFFFUL;
341 	u32 *boot_conf = *boot_conf_ptr;
342 
343 	/* META segments have a minimum size. */
344 	u32 limit_off = max(seg_limit, ROGUE_FW_SEGMMU_ALIGN);
345 
346 	/* The limit is an offset, therefore off = size - 1. */
347 	limit_off -= 1;
348 
349 	seg_base |= ROGUE_FW_SEGMMU_ALLTHRS_WRITEABLE;
350 
351 	add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_BASE(seg_id), seg_base);
352 	add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_LIMIT(seg_id), limit_off);
353 	add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_OUTA0(seg_id), seg_out_addr0);
354 	add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_OUTA1(seg_id), seg_out_addr1);
355 
356 	*boot_conf_ptr = boot_conf;
357 }
358 
get_fw_obj_gpu_addr(struct pvr_fw_object * fw_obj)359 static u64 get_fw_obj_gpu_addr(struct pvr_fw_object *fw_obj)
360 {
361 	struct pvr_device *pvr_dev = to_pvr_device(gem_from_pvr_gem(fw_obj->gem)->dev);
362 	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
363 
364 	return fw_obj->fw_addr_offset + fw_dev->fw_heap_info.gpu_addr;
365 }
366 
367 static void
configure_seg_mmu(struct pvr_device * pvr_dev,u32 ** boot_conf_ptr)368 configure_seg_mmu(struct pvr_device *pvr_dev, u32 **boot_conf_ptr)
369 {
370 	const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
371 	u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
372 	u64 seg_out_addr_top;
373 	u32 i;
374 
375 	seg_out_addr_top =
376 		ROGUE_FW_SEGMMU_OUTADDR_TOP_SLC(MMU_CONTEXT_MAPPING_FWPRIV,
377 						ROGUE_FW_SEGMMU_META_BIFDM_ID);
378 
379 	for (i = 0; i < num_layout_entries; i++) {
380 		/*
381 		 * FW code is using the bootloader segment which is already
382 		 * configured on boot. FW coremem code and data don't use the
383 		 * segment MMU. Only the FW data segment needs to be configured.
384 		 */
385 		if (layout_entries[i].type == FW_DATA) {
386 			u32 seg_id = ROGUE_FW_SEGMMU_DATA_ID;
387 			u64 seg_out_addr = get_fw_obj_gpu_addr(pvr_dev->fw_dev.mem.data_obj);
388 
389 			seg_out_addr += layout_entries[i].alloc_offset;
390 			seg_out_addr |= seg_out_addr_top;
391 
392 			/* Write the sequence to the bootldr. */
393 			configure_seg_id(seg_out_addr,
394 					 layout_entries[i].base_addr,
395 					 layout_entries[i].alloc_size, seg_id,
396 					 boot_conf_ptr);
397 
398 			break;
399 		}
400 	}
401 }
402 
403 static void
configure_meta_caches(u32 ** boot_conf_ptr)404 configure_meta_caches(u32 **boot_conf_ptr)
405 {
406 	u32 *boot_conf = *boot_conf_ptr;
407 	u32 d_cache_t0, i_cache_t0;
408 	u32 d_cache_t1, i_cache_t1;
409 	u32 d_cache_t2, i_cache_t2;
410 	u32 d_cache_t3, i_cache_t3;
411 
412 	/* Initialise I/Dcache settings */
413 	d_cache_t0 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
414 	d_cache_t1 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
415 	d_cache_t2 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
416 	d_cache_t3 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
417 	i_cache_t0 = 0;
418 	i_cache_t1 = 0;
419 	i_cache_t2 = 0;
420 	i_cache_t3 = 0;
421 
422 	d_cache_t0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
423 	i_cache_t0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
424 
425 	/* Local region MMU enhanced bypass: WIN-3 mode for code and data caches */
426 	add_boot_arg(&boot_conf, META_CR_MMCU_LOCAL_EBCTRL,
427 		     META_CR_MMCU_LOCAL_EBCTRL_ICWIN |
428 			     META_CR_MMCU_LOCAL_EBCTRL_DCWIN);
429 
430 	/* Data cache partitioning thread 0 to 3 */
431 	add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(0), d_cache_t0);
432 	add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(1), d_cache_t1);
433 	add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(2), d_cache_t2);
434 	add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(3), d_cache_t3);
435 
436 	/* Enable data cache hits */
437 	add_boot_arg(&boot_conf, META_CR_MMCU_DCACHE_CTRL,
438 		     META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN);
439 
440 	/* Instruction cache partitioning thread 0 to 3 */
441 	add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(0), i_cache_t0);
442 	add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(1), i_cache_t1);
443 	add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(2), i_cache_t2);
444 	add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(3), i_cache_t3);
445 
446 	/* Enable instruction cache hits */
447 	add_boot_arg(&boot_conf, META_CR_MMCU_ICACHE_CTRL,
448 		     META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN);
449 
450 	add_boot_arg(&boot_conf, 0x040000C0, 0);
451 
452 	*boot_conf_ptr = boot_conf;
453 }
454 
455 static int
pvr_meta_fw_process(struct pvr_device * pvr_dev,const u8 * fw,u8 * fw_code_ptr,u8 * fw_data_ptr,u8 * fw_core_code_ptr,u8 * fw_core_data_ptr,u32 core_code_alloc_size)456 pvr_meta_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
457 		    u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr,
458 		    u32 core_code_alloc_size)
459 {
460 	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
461 	u32 *boot_conf;
462 	int err;
463 
464 	boot_conf = ((u32 *)fw_code_ptr) + ROGUE_FW_BOOTLDR_CONF_OFFSET;
465 
466 	/* Slave port and JTAG accesses are privileged. */
467 	add_boot_arg(&boot_conf, META_CR_SYSC_JTAG_THREAD,
468 		     META_CR_SYSC_JTAG_THREAD_PRIV_EN);
469 
470 	configure_seg_mmu(pvr_dev, &boot_conf);
471 
472 	/* Populate FW sections from LDR image. */
473 	err = process_ldr_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr, fw_core_code_ptr,
474 					 fw_core_data_ptr, &boot_conf);
475 	if (err)
476 		return err;
477 
478 	configure_meta_caches(&boot_conf);
479 
480 	/* End argument list. */
481 	add_boot_arg(&boot_conf, 0, 0);
482 
483 	if (fw_dev->mem.core_code_obj) {
484 		u32 core_code_fw_addr;
485 
486 		pvr_fw_object_get_fw_addr(fw_dev->mem.core_code_obj, &core_code_fw_addr);
487 		add_boot_arg(&boot_conf, core_code_fw_addr, core_code_alloc_size);
488 	} else {
489 		add_boot_arg(&boot_conf, 0, 0);
490 	}
491 	/* None of the cores supported by this driver have META DMA. */
492 	add_boot_arg(&boot_conf, 0, 0);
493 
494 	return 0;
495 }
496 
497 static int
pvr_meta_init(struct pvr_device * pvr_dev)498 pvr_meta_init(struct pvr_device *pvr_dev)
499 {
500 	pvr_fw_heap_info_init(pvr_dev, ROGUE_FW_HEAP_META_SHIFT, 0);
501 
502 	return 0;
503 }
504 
505 static u32
pvr_meta_get_fw_addr_with_offset(struct pvr_fw_object * fw_obj,u32 offset)506 pvr_meta_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset)
507 {
508 	u32 fw_addr = fw_obj->fw_addr_offset + offset + ROGUE_FW_SEGMMU_DATA_BASE_ADDRESS;
509 
510 	/* META cacheability is determined by address. */
511 	if (fw_obj->gem->flags & PVR_BO_FW_FLAGS_DEVICE_UNCACHED)
512 		fw_addr |= ROGUE_FW_SEGMMU_DATA_META_UNCACHED |
513 			   ROGUE_FW_SEGMMU_DATA_VIVT_SLC_UNCACHED;
514 
515 	return fw_addr;
516 }
517 
518 static int
pvr_meta_vm_map(struct pvr_device * pvr_dev,struct pvr_fw_object * fw_obj)519 pvr_meta_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
520 {
521 	struct pvr_gem_object *pvr_obj = fw_obj->gem;
522 
523 	return pvr_vm_map(pvr_dev->kernel_vm_ctx, pvr_obj, 0, fw_obj->fw_mm_node.start,
524 			  pvr_gem_object_size(pvr_obj));
525 }
526 
527 static void
pvr_meta_vm_unmap(struct pvr_device * pvr_dev,struct pvr_fw_object * fw_obj)528 pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
529 {
530 	pvr_vm_unmap(pvr_dev->kernel_vm_ctx, fw_obj->fw_mm_node.start,
531 		     fw_obj->fw_mm_node.size);
532 }
533 
534 static bool
pvr_meta_has_fixed_data_addr(void)535 pvr_meta_has_fixed_data_addr(void)
536 {
537 	return false;
538 }
539 
540 const struct pvr_fw_defs pvr_fw_defs_meta = {
541 	.init = pvr_meta_init,
542 	.fw_process = pvr_meta_fw_process,
543 	.vm_map = pvr_meta_vm_map,
544 	.vm_unmap = pvr_meta_vm_unmap,
545 	.get_fw_addr_with_offset = pvr_meta_get_fw_addr_with_offset,
546 	.wrapper_init = pvr_meta_wrapper_init,
547 	.has_fixed_data_addr = pvr_meta_has_fixed_data_addr,
548 	.irq = {
549 		.enable_reg = ROGUE_CR_META_SP_MSLVIRQENABLE,
550 		.status_reg = ROGUE_CR_META_SP_MSLVIRQSTATUS,
551 		.clear_reg = ROGUE_CR_META_SP_MSLVIRQSTATUS,
552 		.event_mask = ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN,
553 		.clear_mask = ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK,
554 	},
555 };
556