xref: /linux/drivers/gpu/drm/imagination/pvr_fw_riscv.c (revision b08494a8f7416e5f09907318c5460ad6f6e2a548)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /* Copyright (c) 2024 Imagination Technologies Ltd. */
3 
4 #include "pvr_device.h"
5 #include "pvr_fw.h"
6 #include "pvr_fw_info.h"
7 #include "pvr_fw_mips.h"
8 #include "pvr_gem.h"
9 #include "pvr_rogue_cr_defs.h"
10 #include "pvr_rogue_riscv.h"
11 #include "pvr_vm.h"
12 
13 #include <linux/compiler.h>
14 #include <linux/delay.h>
15 #include <linux/firmware.h>
16 #include <linux/ktime.h>
17 #include <linux/types.h>
18 
19 #define ROGUE_FW_HEAP_RISCV_SHIFT 25 /* 32 MB */
20 #define ROGUE_FW_HEAP_RISCV_SIZE (1u << ROGUE_FW_HEAP_RISCV_SHIFT)
21 
22 static int
23 pvr_riscv_wrapper_init(struct pvr_device *pvr_dev)
24 {
25 	const u64 common_opts =
26 		((u64)(ROGUE_FW_HEAP_RISCV_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT)
27 		 << ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT) |
28 		((u64)MMU_CONTEXT_MAPPING_FWPRIV
29 		 << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT);
30 
31 	u64 code_addr = pvr_fw_obj_get_gpu_addr(pvr_dev->fw_dev.mem.code_obj);
32 	u64 data_addr = pvr_fw_obj_get_gpu_addr(pvr_dev->fw_dev.mem.data_obj);
33 
34 	/* This condition allows us to OR the addresses into the register directly. */
35 	static_assert(ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_SHIFT ==
36 		      ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSHIFT);
37 
38 	WARN_ON(code_addr & ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK);
39 	WARN_ON(data_addr & ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK);
40 
41 	pvr_cr_write64(pvr_dev, ROGUE_RISCVFW_REGION_REMAP_CR(BOOTLDR_CODE),
42 		       code_addr | common_opts | ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN);
43 
44 	pvr_cr_write64(pvr_dev, ROGUE_RISCVFW_REGION_REMAP_CR(BOOTLDR_DATA),
45 		       data_addr | common_opts |
46 			       ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN);
47 
48 	/* Garten IDLE bit controlled by RISC-V. */
49 	pvr_cr_write64(pvr_dev, ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG,
50 		       ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META);
51 
52 	return 0;
53 }
54 
55 struct rogue_riscv_fw_boot_data {
56 	u64 coremem_code_dev_vaddr;
57 	u64 coremem_data_dev_vaddr;
58 	u32 coremem_code_fw_addr;
59 	u32 coremem_data_fw_addr;
60 	u32 coremem_code_size;
61 	u32 coremem_data_size;
62 	u32 flags;
63 	u32 reserved;
64 };
65 
66 static int
67 pvr_riscv_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
68 		     u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr,
69 		     u32 core_code_alloc_size)
70 {
71 	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
72 	struct pvr_fw_mem *fw_mem = &fw_dev->mem;
73 	struct rogue_riscv_fw_boot_data *boot_data;
74 	int err;
75 
76 	err = pvr_fw_process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr,
77 						fw_core_code_ptr, fw_core_data_ptr);
78 	if (err)
79 		goto err_out;
80 
81 	boot_data = (struct rogue_riscv_fw_boot_data *)fw_data_ptr;
82 
83 	if (fw_mem->core_code_obj) {
84 		boot_data->coremem_code_dev_vaddr = pvr_fw_obj_get_gpu_addr(fw_mem->core_code_obj);
85 		pvr_fw_object_get_fw_addr(fw_mem->core_code_obj, &boot_data->coremem_code_fw_addr);
86 		boot_data->coremem_code_size = pvr_fw_obj_get_object_size(fw_mem->core_code_obj);
87 	}
88 
89 	if (fw_mem->core_data_obj) {
90 		boot_data->coremem_data_dev_vaddr = pvr_fw_obj_get_gpu_addr(fw_mem->core_data_obj);
91 		pvr_fw_object_get_fw_addr(fw_mem->core_data_obj, &boot_data->coremem_data_fw_addr);
92 		boot_data->coremem_data_size = pvr_fw_obj_get_object_size(fw_mem->core_data_obj);
93 	}
94 
95 	return 0;
96 
97 err_out:
98 	return err;
99 }
100 
101 static int
102 pvr_riscv_init(struct pvr_device *pvr_dev)
103 {
104 	pvr_fw_heap_info_init(pvr_dev, ROGUE_FW_HEAP_RISCV_SHIFT, 0);
105 
106 	return 0;
107 }
108 
109 static u32
110 pvr_riscv_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset)
111 {
112 	u32 fw_addr = fw_obj->fw_addr_offset + offset;
113 
114 	/* RISC-V cacheability is determined by address. */
115 	if (fw_obj->gem->flags & PVR_BO_FW_FLAGS_DEVICE_UNCACHED)
116 		fw_addr |= ROGUE_RISCVFW_REGION_BASE(SHARED_UNCACHED_DATA);
117 	else
118 		fw_addr |= ROGUE_RISCVFW_REGION_BASE(SHARED_CACHED_DATA);
119 
120 	return fw_addr;
121 }
122 
123 static int
124 pvr_riscv_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
125 {
126 	struct pvr_gem_object *pvr_obj = fw_obj->gem;
127 
128 	return pvr_vm_map(pvr_dev->kernel_vm_ctx, pvr_obj, 0, fw_obj->fw_mm_node.start,
129 			  pvr_gem_object_size(pvr_obj));
130 }
131 
132 static void
133 pvr_riscv_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
134 {
135 	struct pvr_gem_object *pvr_obj = fw_obj->gem;
136 
137 	pvr_vm_unmap_obj(pvr_dev->kernel_vm_ctx, pvr_obj,
138 			 fw_obj->fw_mm_node.start, fw_obj->fw_mm_node.size);
139 }
140 
141 static bool
142 pvr_riscv_irq_pending(struct pvr_device *pvr_dev)
143 {
144 	return pvr_cr_read32(pvr_dev, ROGUE_CR_IRQ_OS0_EVENT_STATUS) &
145 	       ROGUE_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN;
146 }
147 
148 static void
149 pvr_riscv_irq_clear(struct pvr_device *pvr_dev)
150 {
151 	pvr_cr_write32(pvr_dev, ROGUE_CR_IRQ_OS0_EVENT_CLEAR,
152 		       ROGUE_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN);
153 }
154 
155 const struct pvr_fw_defs pvr_fw_defs_riscv = {
156 	.init = pvr_riscv_init,
157 	.fw_process = pvr_riscv_fw_process,
158 	.vm_map = pvr_riscv_vm_map,
159 	.vm_unmap = pvr_riscv_vm_unmap,
160 	.get_fw_addr_with_offset = pvr_riscv_get_fw_addr_with_offset,
161 	.wrapper_init = pvr_riscv_wrapper_init,
162 	.irq_pending = pvr_riscv_irq_pending,
163 	.irq_clear = pvr_riscv_irq_clear,
164 	.has_fixed_data_addr = false,
165 };
166