1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3
4 #include "pvr_ccb.h"
5 #include "pvr_device.h"
6 #include "pvr_device_info.h"
7 #include "pvr_fw.h"
8 #include "pvr_fw_info.h"
9 #include "pvr_fw_startstop.h"
10 #include "pvr_fw_trace.h"
11 #include "pvr_gem.h"
12 #include "pvr_power.h"
13 #include "pvr_rogue_fwif_dev_info.h"
14 #include "pvr_rogue_heap_config.h"
15 #include "pvr_vm.h"
16
17 #include <drm/drm_drv.h>
18 #include <drm/drm_managed.h>
19 #include <drm/drm_mm.h>
20 #include <linux/clk.h>
21 #include <linux/firmware.h>
22 #include <linux/math.h>
23 #include <linux/minmax.h>
24 #include <linux/sizes.h>
25
26 #define FW_MAX_SUPPORTED_MAJOR_VERSION 1
27
28 #define FW_BOOT_TIMEOUT_USEC 5000000
29
30 /* Config heap occupies top 192k of the firmware heap. */
31 #define PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY SZ_64K
32 #define PVR_ROGUE_FW_CONFIG_HEAP_SIZE (3 * PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY)
33
34 /* Main firmware allocations should come from the remainder of the heap. */
35 #define PVR_ROGUE_FW_MAIN_HEAP_BASE ROGUE_FW_HEAP_BASE
36
37 /* Offsets from start of configuration area of FW heap. */
38 #define PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET 0
39 #define PVR_ROGUE_FWIF_OSINIT_OFFSET \
40 (PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET + PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY)
41 #define PVR_ROGUE_FWIF_SYSINIT_OFFSET \
42 (PVR_ROGUE_FWIF_OSINIT_OFFSET + PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY)
43
44 #define PVR_ROGUE_FAULT_PAGE_SIZE SZ_4K
45
46 #define PVR_SYNC_OBJ_SIZE sizeof(u32)
47
48 const struct pvr_fw_layout_entry *
pvr_fw_find_layout_entry(struct pvr_device * pvr_dev,enum pvr_fw_section_id id)49 pvr_fw_find_layout_entry(struct pvr_device *pvr_dev, enum pvr_fw_section_id id)
50 {
51 const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
52 u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
53
54 for (u32 entry = 0; entry < num_layout_entries; entry++) {
55 if (layout_entries[entry].id == id)
56 return &layout_entries[entry];
57 }
58
59 return NULL;
60 }
61
62 static const struct pvr_fw_layout_entry *
pvr_fw_find_private_data(struct pvr_device * pvr_dev)63 pvr_fw_find_private_data(struct pvr_device *pvr_dev)
64 {
65 const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
66 u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
67
68 for (u32 entry = 0; entry < num_layout_entries; entry++) {
69 if (layout_entries[entry].id == META_PRIVATE_DATA ||
70 layout_entries[entry].id == MIPS_PRIVATE_DATA ||
71 layout_entries[entry].id == RISCV_PRIVATE_DATA)
72 return &layout_entries[entry];
73 }
74
75 return NULL;
76 }
77
78 #define DEV_INFO_MASK_SIZE(x) DIV_ROUND_UP(x, 64)
79
80 /**
81 * pvr_fw_validate() - Parse firmware header and check compatibility
82 * @pvr_dev: Device pointer.
83 *
84 * Returns:
85 * * 0 on success, or
86 * * -EINVAL if firmware is incompatible.
87 */
88 static int
pvr_fw_validate(struct pvr_device * pvr_dev)89 pvr_fw_validate(struct pvr_device *pvr_dev)
90 {
91 struct drm_device *drm_dev = from_pvr_device(pvr_dev);
92 const struct firmware *firmware = pvr_dev->fw_dev.firmware;
93 const struct pvr_fw_layout_entry *layout_entries;
94 const struct pvr_fw_info_header *header;
95 const u8 *fw = firmware->data;
96 u32 fw_offset = firmware->size - SZ_4K;
97 u32 layout_table_size;
98
99 if (firmware->size < SZ_4K || (firmware->size % FW_BLOCK_SIZE))
100 return -EINVAL;
101
102 header = (const struct pvr_fw_info_header *)&fw[fw_offset];
103
104 if (header->info_version != PVR_FW_INFO_VERSION) {
105 drm_err(drm_dev, "Unsupported fw info version %u\n",
106 header->info_version);
107 return -EINVAL;
108 }
109
110 if (header->header_len != sizeof(struct pvr_fw_info_header) ||
111 header->layout_entry_size != sizeof(struct pvr_fw_layout_entry) ||
112 header->layout_entry_num > PVR_FW_INFO_MAX_NUM_ENTRIES) {
113 drm_err(drm_dev, "FW info format mismatch\n");
114 return -EINVAL;
115 }
116
117 if (!(header->flags & PVR_FW_FLAGS_OPEN_SOURCE) ||
118 header->fw_version_major > FW_MAX_SUPPORTED_MAJOR_VERSION ||
119 header->fw_version_major == 0) {
120 drm_err(drm_dev, "Unsupported FW version %u.%u (build: %u%s)\n",
121 header->fw_version_major, header->fw_version_minor,
122 header->fw_version_build,
123 (header->flags & PVR_FW_FLAGS_OPEN_SOURCE) ? " OS" : "");
124 return -EINVAL;
125 }
126
127 if (pvr_gpu_id_to_packed_bvnc(&pvr_dev->gpu_id) != header->bvnc) {
128 struct pvr_gpu_id fw_gpu_id;
129
130 packed_bvnc_to_pvr_gpu_id(header->bvnc, &fw_gpu_id);
131 drm_err(drm_dev, "FW built for incorrect GPU ID %i.%i.%i.%i (expected %i.%i.%i.%i)\n",
132 fw_gpu_id.b, fw_gpu_id.v, fw_gpu_id.n, fw_gpu_id.c,
133 pvr_dev->gpu_id.b, pvr_dev->gpu_id.v, pvr_dev->gpu_id.n, pvr_dev->gpu_id.c);
134 return -EINVAL;
135 }
136
137 fw_offset += header->header_len;
138 layout_table_size =
139 header->layout_entry_size * header->layout_entry_num;
140 if ((fw_offset + layout_table_size) > firmware->size)
141 return -EINVAL;
142
143 layout_entries = (const struct pvr_fw_layout_entry *)&fw[fw_offset];
144 for (u32 entry = 0; entry < header->layout_entry_num; entry++) {
145 u32 start_addr = layout_entries[entry].base_addr;
146 u32 end_addr = start_addr + layout_entries[entry].alloc_size;
147
148 if (start_addr >= end_addr)
149 return -EINVAL;
150 }
151
152 fw_offset = (firmware->size - SZ_4K) - header->device_info_size;
153
154 drm_info(drm_dev, "FW version v%u.%u (build %u OS)\n", header->fw_version_major,
155 header->fw_version_minor, header->fw_version_build);
156
157 pvr_dev->fw_version.major = header->fw_version_major;
158 pvr_dev->fw_version.minor = header->fw_version_minor;
159
160 pvr_dev->fw_dev.header = header;
161 pvr_dev->fw_dev.layout_entries = layout_entries;
162
163 return 0;
164 }
165
166 static int
pvr_fw_get_device_info(struct pvr_device * pvr_dev)167 pvr_fw_get_device_info(struct pvr_device *pvr_dev)
168 {
169 const struct firmware *firmware = pvr_dev->fw_dev.firmware;
170 struct pvr_fw_device_info_header *header;
171 const u8 *fw = firmware->data;
172 const u64 *dev_info;
173 u32 fw_offset;
174
175 fw_offset = (firmware->size - SZ_4K) - pvr_dev->fw_dev.header->device_info_size;
176
177 header = (struct pvr_fw_device_info_header *)&fw[fw_offset];
178 dev_info = (u64 *)(header + 1);
179
180 pvr_device_info_set_quirks(pvr_dev, dev_info, header->brn_mask_size);
181 dev_info += header->brn_mask_size;
182
183 pvr_device_info_set_enhancements(pvr_dev, dev_info, header->ern_mask_size);
184 dev_info += header->ern_mask_size;
185
186 return pvr_device_info_set_features(pvr_dev, dev_info, header->feature_mask_size,
187 header->feature_param_size);
188 }
189
190 static void
layout_get_sizes(struct pvr_device * pvr_dev)191 layout_get_sizes(struct pvr_device *pvr_dev)
192 {
193 const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
194 u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
195 struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
196
197 fw_mem->code_alloc_size = 0;
198 fw_mem->data_alloc_size = 0;
199 fw_mem->core_code_alloc_size = 0;
200 fw_mem->core_data_alloc_size = 0;
201
202 /* Extract section sizes from FW layout table. */
203 for (u32 entry = 0; entry < num_layout_entries; entry++) {
204 switch (layout_entries[entry].type) {
205 case FW_CODE:
206 fw_mem->code_alloc_size += layout_entries[entry].alloc_size;
207 break;
208 case FW_DATA:
209 fw_mem->data_alloc_size += layout_entries[entry].alloc_size;
210 break;
211 case FW_COREMEM_CODE:
212 fw_mem->core_code_alloc_size +=
213 layout_entries[entry].alloc_size;
214 break;
215 case FW_COREMEM_DATA:
216 fw_mem->core_data_alloc_size +=
217 layout_entries[entry].alloc_size;
218 break;
219 case NONE:
220 break;
221 }
222 }
223 }
224
225 int
pvr_fw_find_mmu_segment(struct pvr_device * pvr_dev,u32 addr,u32 size,void * fw_code_ptr,void * fw_data_ptr,void * fw_core_code_ptr,void * fw_core_data_ptr,void ** host_addr_out)226 pvr_fw_find_mmu_segment(struct pvr_device *pvr_dev, u32 addr, u32 size, void *fw_code_ptr,
227 void *fw_data_ptr, void *fw_core_code_ptr, void *fw_core_data_ptr,
228 void **host_addr_out)
229 {
230 const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
231 u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
232 u32 end_addr = addr + size;
233
234 /* Ensure requested range is not zero, and size is not causing addr to overflow. */
235 if (end_addr <= addr)
236 return -EINVAL;
237
238 for (int entry = 0; entry < num_layout_entries; entry++) {
239 u32 entry_start_addr = layout_entries[entry].base_addr;
240 u32 entry_end_addr = entry_start_addr + layout_entries[entry].alloc_size;
241
242 if (addr >= entry_start_addr && addr < entry_end_addr &&
243 end_addr > entry_start_addr && end_addr <= entry_end_addr) {
244 switch (layout_entries[entry].type) {
245 case FW_CODE:
246 *host_addr_out = fw_code_ptr;
247 break;
248
249 case FW_DATA:
250 *host_addr_out = fw_data_ptr;
251 break;
252
253 case FW_COREMEM_CODE:
254 *host_addr_out = fw_core_code_ptr;
255 break;
256
257 case FW_COREMEM_DATA:
258 *host_addr_out = fw_core_data_ptr;
259 break;
260
261 default:
262 return -EINVAL;
263 }
264 /* Direct Mem write to mapped memory */
265 addr -= layout_entries[entry].base_addr;
266 addr += layout_entries[entry].alloc_offset;
267
268 /*
269 * Add offset to pointer to FW allocation only if that
270 * allocation is available
271 */
272 *(u8 **)host_addr_out += addr;
273 return 0;
274 }
275 }
276
277 return -EINVAL;
278 }
279
280 static int
pvr_fw_create_fwif_connection_ctl(struct pvr_device * pvr_dev)281 pvr_fw_create_fwif_connection_ctl(struct pvr_device *pvr_dev)
282 {
283 struct drm_device *drm_dev = from_pvr_device(pvr_dev);
284 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
285
286 fw_dev->fwif_connection_ctl =
287 pvr_fw_object_create_and_map_offset(pvr_dev,
288 fw_dev->fw_heap_info.config_offset +
289 PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET,
290 sizeof(*fw_dev->fwif_connection_ctl),
291 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
292 NULL, NULL,
293 &fw_dev->mem.fwif_connection_ctl_obj);
294 if (IS_ERR(fw_dev->fwif_connection_ctl)) {
295 drm_err(drm_dev,
296 "Unable to allocate FWIF connection control memory\n");
297 return PTR_ERR(fw_dev->fwif_connection_ctl);
298 }
299
300 return 0;
301 }
302
303 static void
pvr_fw_fini_fwif_connection_ctl(struct pvr_device * pvr_dev)304 pvr_fw_fini_fwif_connection_ctl(struct pvr_device *pvr_dev)
305 {
306 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
307
308 pvr_fw_object_unmap_and_destroy(fw_dev->mem.fwif_connection_ctl_obj);
309 }
310
311 static void
fw_osinit_init(void * cpu_ptr,void * priv)312 fw_osinit_init(void *cpu_ptr, void *priv)
313 {
314 struct rogue_fwif_osinit *fwif_osinit = cpu_ptr;
315 struct pvr_device *pvr_dev = priv;
316 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
317 struct pvr_fw_mem *fw_mem = &fw_dev->mem;
318
319 fwif_osinit->kernel_ccbctl_fw_addr = pvr_dev->kccb.ccb.ctrl_fw_addr;
320 fwif_osinit->kernel_ccb_fw_addr = pvr_dev->kccb.ccb.ccb_fw_addr;
321 pvr_fw_object_get_fw_addr(pvr_dev->kccb.rtn_obj,
322 &fwif_osinit->kernel_ccb_rtn_slots_fw_addr);
323
324 fwif_osinit->firmware_ccbctl_fw_addr = pvr_dev->fwccb.ctrl_fw_addr;
325 fwif_osinit->firmware_ccb_fw_addr = pvr_dev->fwccb.ccb_fw_addr;
326
327 fwif_osinit->work_est_firmware_ccbctl_fw_addr = 0;
328 fwif_osinit->work_est_firmware_ccb_fw_addr = 0;
329
330 pvr_fw_object_get_fw_addr(fw_mem->hwrinfobuf_obj,
331 &fwif_osinit->rogue_fwif_hwr_info_buf_ctl_fw_addr);
332 pvr_fw_object_get_fw_addr(fw_mem->osdata_obj, &fwif_osinit->fw_os_data_fw_addr);
333
334 fwif_osinit->hwr_debug_dump_limit = 0;
335
336 rogue_fwif_compchecks_bvnc_init(&fwif_osinit->rogue_comp_checks.hw_bvnc);
337 rogue_fwif_compchecks_bvnc_init(&fwif_osinit->rogue_comp_checks.fw_bvnc);
338 }
339
340 static void
fw_osdata_init(void * cpu_ptr,void * priv)341 fw_osdata_init(void *cpu_ptr, void *priv)
342 {
343 struct rogue_fwif_osdata *fwif_osdata = cpu_ptr;
344 struct pvr_device *pvr_dev = priv;
345 struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
346
347 pvr_fw_object_get_fw_addr(fw_mem->power_sync_obj, &fwif_osdata->power_sync_fw_addr);
348 }
349
350 static void
fw_fault_page_init(void * cpu_ptr,void * priv)351 fw_fault_page_init(void *cpu_ptr, void *priv)
352 {
353 u32 *fault_page = cpu_ptr;
354
355 for (int i = 0; i < PVR_ROGUE_FAULT_PAGE_SIZE / sizeof(*fault_page); i++)
356 fault_page[i] = 0xdeadbee0;
357 }
358
359 static void
fw_sysinit_init(void * cpu_ptr,void * priv)360 fw_sysinit_init(void *cpu_ptr, void *priv)
361 {
362 struct rogue_fwif_sysinit *fwif_sysinit = cpu_ptr;
363 struct pvr_device *pvr_dev = priv;
364 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
365 struct pvr_fw_mem *fw_mem = &fw_dev->mem;
366 dma_addr_t fault_dma_addr = 0;
367 u32 clock_speed_hz = clk_get_rate(pvr_dev->core_clk);
368
369 WARN_ON(!clock_speed_hz);
370
371 WARN_ON(pvr_fw_object_get_dma_addr(fw_mem->fault_page_obj, 0, &fault_dma_addr));
372 fwif_sysinit->fault_phys_addr = (u64)fault_dma_addr;
373
374 fwif_sysinit->pds_exec_base = ROGUE_PDSCODEDATA_HEAP_BASE;
375 fwif_sysinit->usc_exec_base = ROGUE_USCCODE_HEAP_BASE;
376
377 pvr_fw_object_get_fw_addr(fw_mem->runtime_cfg_obj, &fwif_sysinit->runtime_cfg_fw_addr);
378 pvr_fw_object_get_fw_addr(fw_dev->fw_trace.tracebuf_ctrl_obj,
379 &fwif_sysinit->trace_buf_ctl_fw_addr);
380 pvr_fw_object_get_fw_addr(fw_mem->sysdata_obj, &fwif_sysinit->fw_sys_data_fw_addr);
381 pvr_fw_object_get_fw_addr(fw_mem->gpu_util_fwcb_obj,
382 &fwif_sysinit->gpu_util_fw_cb_ctl_fw_addr);
383 if (fw_mem->core_data_obj) {
384 pvr_fw_object_get_fw_addr(fw_mem->core_data_obj,
385 &fwif_sysinit->coremem_data_store.fw_addr);
386 }
387
388 /* Currently unsupported. */
389 fwif_sysinit->counter_dump_ctl.buffer_fw_addr = 0;
390 fwif_sysinit->counter_dump_ctl.size_in_dwords = 0;
391
392 /* Skip alignment checks. */
393 fwif_sysinit->align_checks = 0;
394
395 fwif_sysinit->filter_flags = 0;
396 fwif_sysinit->hw_perf_filter = 0;
397 fwif_sysinit->firmware_perf = FW_PERF_CONF_NONE;
398 fwif_sysinit->initial_core_clock_speed = clock_speed_hz;
399 fwif_sysinit->active_pm_latency_ms = 0;
400 fwif_sysinit->gpio_validation_mode = ROGUE_FWIF_GPIO_VAL_OFF;
401 fwif_sysinit->firmware_started = false;
402 fwif_sysinit->marker_val = 1;
403
404 memset(&fwif_sysinit->bvnc_km_feature_flags, 0,
405 sizeof(fwif_sysinit->bvnc_km_feature_flags));
406 }
407
408 #define ROGUE_FWIF_SLC_MIN_SIZE_FOR_DM_OVERLAP_KB 4
409
410 static void
fw_sysdata_init(void * cpu_ptr,void * priv)411 fw_sysdata_init(void *cpu_ptr, void *priv)
412 {
413 struct rogue_fwif_sysdata *fwif_sysdata = cpu_ptr;
414 struct pvr_device *pvr_dev = priv;
415 u32 slc_size_in_kilobytes = 0;
416 u32 config_flags = 0;
417
418 WARN_ON(PVR_FEATURE_VALUE(pvr_dev, slc_size_in_kilobytes, &slc_size_in_kilobytes));
419
420 if (slc_size_in_kilobytes < ROGUE_FWIF_SLC_MIN_SIZE_FOR_DM_OVERLAP_KB)
421 config_flags |= ROGUE_FWIF_INICFG_DISABLE_DM_OVERLAP;
422
423 fwif_sysdata->config_flags = config_flags;
424 }
425
426 static void
fw_runtime_cfg_init(void * cpu_ptr,void * priv)427 fw_runtime_cfg_init(void *cpu_ptr, void *priv)
428 {
429 struct rogue_fwif_runtime_cfg *runtime_cfg = cpu_ptr;
430 struct pvr_device *pvr_dev = priv;
431 u32 clock_speed_hz = clk_get_rate(pvr_dev->core_clk);
432
433 WARN_ON(!clock_speed_hz);
434
435 runtime_cfg->core_clock_speed = clock_speed_hz;
436 runtime_cfg->active_pm_latency_ms = 0;
437 runtime_cfg->active_pm_latency_persistant = true;
438 WARN_ON(PVR_FEATURE_VALUE(pvr_dev, num_clusters,
439 &runtime_cfg->default_dusts_num_init) != 0);
440
441 /* Keep watchdog timer disabled. */
442 runtime_cfg->wdg_period_us = 0;
443 }
444
445 static void
fw_gpu_util_fwcb_init(void * cpu_ptr,void * priv)446 fw_gpu_util_fwcb_init(void *cpu_ptr, void *priv)
447 {
448 struct rogue_fwif_gpu_util_fwcb *gpu_util_fwcb = cpu_ptr;
449
450 gpu_util_fwcb->last_word = PVR_FWIF_GPU_UTIL_STATE_IDLE;
451 }
452
453 static int
pvr_fw_create_structures(struct pvr_device * pvr_dev)454 pvr_fw_create_structures(struct pvr_device *pvr_dev)
455 {
456 struct drm_device *drm_dev = from_pvr_device(pvr_dev);
457 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
458 struct pvr_fw_mem *fw_mem = &fw_dev->mem;
459 int err;
460
461 fw_dev->power_sync = pvr_fw_object_create_and_map(pvr_dev, sizeof(*fw_dev->power_sync),
462 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
463 NULL, NULL, &fw_mem->power_sync_obj);
464 if (IS_ERR(fw_dev->power_sync)) {
465 drm_err(drm_dev, "Unable to allocate FW power_sync structure\n");
466 return PTR_ERR(fw_dev->power_sync);
467 }
468
469 fw_dev->hwrinfobuf = pvr_fw_object_create_and_map(pvr_dev, sizeof(*fw_dev->hwrinfobuf),
470 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
471 NULL, NULL, &fw_mem->hwrinfobuf_obj);
472 if (IS_ERR(fw_dev->hwrinfobuf)) {
473 drm_err(drm_dev,
474 "Unable to allocate FW hwrinfobuf structure\n");
475 err = PTR_ERR(fw_dev->hwrinfobuf);
476 goto err_release_power_sync;
477 }
478
479 err = pvr_fw_object_create(pvr_dev, PVR_SYNC_OBJ_SIZE,
480 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
481 NULL, NULL, &fw_mem->mmucache_sync_obj);
482 if (err) {
483 drm_err(drm_dev,
484 "Unable to allocate MMU cache sync object\n");
485 goto err_release_hwrinfobuf;
486 }
487
488 fw_dev->fwif_sysdata = pvr_fw_object_create_and_map(pvr_dev,
489 sizeof(*fw_dev->fwif_sysdata),
490 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
491 fw_sysdata_init, pvr_dev,
492 &fw_mem->sysdata_obj);
493 if (IS_ERR(fw_dev->fwif_sysdata)) {
494 drm_err(drm_dev, "Unable to allocate FW SYSDATA structure\n");
495 err = PTR_ERR(fw_dev->fwif_sysdata);
496 goto err_release_mmucache_sync_obj;
497 }
498
499 err = pvr_fw_object_create(pvr_dev, PVR_ROGUE_FAULT_PAGE_SIZE,
500 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
501 fw_fault_page_init, NULL, &fw_mem->fault_page_obj);
502 if (err) {
503 drm_err(drm_dev, "Unable to allocate FW fault page\n");
504 goto err_release_sysdata;
505 }
506
507 err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_gpu_util_fwcb),
508 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
509 fw_gpu_util_fwcb_init, pvr_dev, &fw_mem->gpu_util_fwcb_obj);
510 if (err) {
511 drm_err(drm_dev, "Unable to allocate GPU util FWCB\n");
512 goto err_release_fault_page;
513 }
514
515 err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_runtime_cfg),
516 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
517 fw_runtime_cfg_init, pvr_dev, &fw_mem->runtime_cfg_obj);
518 if (err) {
519 drm_err(drm_dev, "Unable to allocate FW runtime config\n");
520 goto err_release_gpu_util_fwcb;
521 }
522
523 err = pvr_fw_trace_init(pvr_dev);
524 if (err)
525 goto err_release_runtime_cfg;
526
527 fw_dev->fwif_osdata = pvr_fw_object_create_and_map(pvr_dev,
528 sizeof(*fw_dev->fwif_osdata),
529 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
530 fw_osdata_init, pvr_dev,
531 &fw_mem->osdata_obj);
532 if (IS_ERR(fw_dev->fwif_osdata)) {
533 drm_err(drm_dev, "Unable to allocate FW OSDATA structure\n");
534 err = PTR_ERR(fw_dev->fwif_osdata);
535 goto err_fw_trace_fini;
536 }
537
538 fw_dev->fwif_osinit =
539 pvr_fw_object_create_and_map_offset(pvr_dev,
540 fw_dev->fw_heap_info.config_offset +
541 PVR_ROGUE_FWIF_OSINIT_OFFSET,
542 sizeof(*fw_dev->fwif_osinit),
543 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
544 fw_osinit_init, pvr_dev, &fw_mem->osinit_obj);
545 if (IS_ERR(fw_dev->fwif_osinit)) {
546 drm_err(drm_dev, "Unable to allocate FW OSINIT structure\n");
547 err = PTR_ERR(fw_dev->fwif_osinit);
548 goto err_release_osdata;
549 }
550
551 fw_dev->fwif_sysinit =
552 pvr_fw_object_create_and_map_offset(pvr_dev,
553 fw_dev->fw_heap_info.config_offset +
554 PVR_ROGUE_FWIF_SYSINIT_OFFSET,
555 sizeof(*fw_dev->fwif_sysinit),
556 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
557 fw_sysinit_init, pvr_dev, &fw_mem->sysinit_obj);
558 if (IS_ERR(fw_dev->fwif_sysinit)) {
559 drm_err(drm_dev, "Unable to allocate FW SYSINIT structure\n");
560 err = PTR_ERR(fw_dev->fwif_sysinit);
561 goto err_release_osinit;
562 }
563
564 return 0;
565
566 err_release_osinit:
567 pvr_fw_object_unmap_and_destroy(fw_mem->osinit_obj);
568
569 err_release_osdata:
570 pvr_fw_object_unmap_and_destroy(fw_mem->osdata_obj);
571
572 err_fw_trace_fini:
573 pvr_fw_trace_fini(pvr_dev);
574
575 err_release_runtime_cfg:
576 pvr_fw_object_destroy(fw_mem->runtime_cfg_obj);
577
578 err_release_gpu_util_fwcb:
579 pvr_fw_object_destroy(fw_mem->gpu_util_fwcb_obj);
580
581 err_release_fault_page:
582 pvr_fw_object_destroy(fw_mem->fault_page_obj);
583
584 err_release_sysdata:
585 pvr_fw_object_unmap_and_destroy(fw_mem->sysdata_obj);
586
587 err_release_mmucache_sync_obj:
588 pvr_fw_object_destroy(fw_mem->mmucache_sync_obj);
589
590 err_release_hwrinfobuf:
591 pvr_fw_object_unmap_and_destroy(fw_mem->hwrinfobuf_obj);
592
593 err_release_power_sync:
594 pvr_fw_object_unmap_and_destroy(fw_mem->power_sync_obj);
595
596 return err;
597 }
598
599 static void
pvr_fw_destroy_structures(struct pvr_device * pvr_dev)600 pvr_fw_destroy_structures(struct pvr_device *pvr_dev)
601 {
602 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
603 struct pvr_fw_mem *fw_mem = &fw_dev->mem;
604
605 pvr_fw_trace_fini(pvr_dev);
606 pvr_fw_object_destroy(fw_mem->runtime_cfg_obj);
607 pvr_fw_object_destroy(fw_mem->gpu_util_fwcb_obj);
608 pvr_fw_object_destroy(fw_mem->fault_page_obj);
609 pvr_fw_object_unmap_and_destroy(fw_mem->sysdata_obj);
610 pvr_fw_object_unmap_and_destroy(fw_mem->sysinit_obj);
611
612 pvr_fw_object_destroy(fw_mem->mmucache_sync_obj);
613 pvr_fw_object_unmap_and_destroy(fw_mem->hwrinfobuf_obj);
614 pvr_fw_object_unmap_and_destroy(fw_mem->power_sync_obj);
615 pvr_fw_object_unmap_and_destroy(fw_mem->osdata_obj);
616 pvr_fw_object_unmap_and_destroy(fw_mem->osinit_obj);
617 }
618
619 /**
620 * pvr_fw_process() - Process firmware image, allocate FW memory and create boot
621 * arguments
622 * @pvr_dev: Device pointer.
623 *
624 * Returns:
625 * * 0 on success, or
626 * * Any error returned by pvr_fw_object_create_and_map_offset(), or
627 * * Any error returned by pvr_fw_object_create_and_map().
628 */
629 static int
pvr_fw_process(struct pvr_device * pvr_dev)630 pvr_fw_process(struct pvr_device *pvr_dev)
631 {
632 struct drm_device *drm_dev = from_pvr_device(pvr_dev);
633 struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
634 const u8 *fw = pvr_dev->fw_dev.firmware->data;
635 const struct pvr_fw_layout_entry *private_data;
636 u8 *fw_code_ptr;
637 u8 *fw_data_ptr;
638 u8 *fw_core_code_ptr;
639 u8 *fw_core_data_ptr;
640 int err;
641
642 layout_get_sizes(pvr_dev);
643
644 private_data = pvr_fw_find_private_data(pvr_dev);
645 if (!private_data)
646 return -EINVAL;
647
648 /* Allocate and map memory for firmware sections. */
649
650 /*
651 * Code allocation must be at the start of the firmware heap, otherwise
652 * firmware processor will be unable to boot.
653 *
654 * This has the useful side-effect that for every other object in the
655 * driver, a firmware address of 0 is invalid.
656 */
657 fw_code_ptr = pvr_fw_object_create_and_map_offset(pvr_dev, 0, fw_mem->code_alloc_size,
658 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
659 NULL, NULL, &fw_mem->code_obj);
660 if (IS_ERR(fw_code_ptr)) {
661 drm_err(drm_dev, "Unable to allocate FW code memory\n");
662 return PTR_ERR(fw_code_ptr);
663 }
664
665 if (pvr_dev->fw_dev.defs->has_fixed_data_addr) {
666 u32 base_addr = private_data->base_addr & pvr_dev->fw_dev.fw_heap_info.offset_mask;
667
668 fw_data_ptr =
669 pvr_fw_object_create_and_map_offset(pvr_dev, base_addr,
670 fw_mem->data_alloc_size,
671 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
672 NULL, NULL, &fw_mem->data_obj);
673 } else {
674 fw_data_ptr = pvr_fw_object_create_and_map(pvr_dev, fw_mem->data_alloc_size,
675 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
676 NULL, NULL, &fw_mem->data_obj);
677 }
678 if (IS_ERR(fw_data_ptr)) {
679 drm_err(drm_dev, "Unable to allocate FW data memory\n");
680 err = PTR_ERR(fw_data_ptr);
681 goto err_free_fw_code_obj;
682 }
683
684 /* Core code and data sections are optional. */
685 if (fw_mem->core_code_alloc_size) {
686 fw_core_code_ptr =
687 pvr_fw_object_create_and_map(pvr_dev, fw_mem->core_code_alloc_size,
688 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
689 NULL, NULL, &fw_mem->core_code_obj);
690 if (IS_ERR(fw_core_code_ptr)) {
691 drm_err(drm_dev,
692 "Unable to allocate FW core code memory\n");
693 err = PTR_ERR(fw_core_code_ptr);
694 goto err_free_fw_data_obj;
695 }
696 } else {
697 fw_core_code_ptr = NULL;
698 }
699
700 if (fw_mem->core_data_alloc_size) {
701 fw_core_data_ptr =
702 pvr_fw_object_create_and_map(pvr_dev, fw_mem->core_data_alloc_size,
703 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
704 NULL, NULL, &fw_mem->core_data_obj);
705 if (IS_ERR(fw_core_data_ptr)) {
706 drm_err(drm_dev,
707 "Unable to allocate FW core data memory\n");
708 err = PTR_ERR(fw_core_data_ptr);
709 goto err_free_fw_core_code_obj;
710 }
711 } else {
712 fw_core_data_ptr = NULL;
713 }
714
715 fw_mem->code = kzalloc(fw_mem->code_alloc_size, GFP_KERNEL);
716 fw_mem->data = kzalloc(fw_mem->data_alloc_size, GFP_KERNEL);
717 if (fw_mem->core_code_alloc_size)
718 fw_mem->core_code = kzalloc(fw_mem->core_code_alloc_size, GFP_KERNEL);
719 if (fw_mem->core_data_alloc_size)
720 fw_mem->core_data = kzalloc(fw_mem->core_data_alloc_size, GFP_KERNEL);
721
722 if (!fw_mem->code || !fw_mem->data ||
723 (!fw_mem->core_code && fw_mem->core_code_alloc_size) ||
724 (!fw_mem->core_data && fw_mem->core_data_alloc_size)) {
725 err = -ENOMEM;
726 goto err_free_kdata;
727 }
728
729 err = pvr_dev->fw_dev.defs->fw_process(pvr_dev, fw,
730 fw_mem->code, fw_mem->data, fw_mem->core_code,
731 fw_mem->core_data, fw_mem->core_code_alloc_size);
732
733 if (err)
734 goto err_free_kdata;
735
736 memcpy(fw_code_ptr, fw_mem->code, fw_mem->code_alloc_size);
737 memcpy(fw_data_ptr, fw_mem->data, fw_mem->data_alloc_size);
738 if (fw_mem->core_code)
739 memcpy(fw_core_code_ptr, fw_mem->core_code, fw_mem->core_code_alloc_size);
740 if (fw_mem->core_data)
741 memcpy(fw_core_data_ptr, fw_mem->core_data, fw_mem->core_data_alloc_size);
742
743 /* We're finished with the firmware section memory on the CPU, unmap. */
744 if (fw_core_data_ptr) {
745 pvr_fw_object_vunmap(fw_mem->core_data_obj);
746 fw_core_data_ptr = NULL;
747 }
748 if (fw_core_code_ptr) {
749 pvr_fw_object_vunmap(fw_mem->core_code_obj);
750 fw_core_code_ptr = NULL;
751 }
752 pvr_fw_object_vunmap(fw_mem->data_obj);
753 fw_data_ptr = NULL;
754 pvr_fw_object_vunmap(fw_mem->code_obj);
755 fw_code_ptr = NULL;
756
757 err = pvr_fw_create_fwif_connection_ctl(pvr_dev);
758 if (err)
759 goto err_free_kdata;
760
761 return 0;
762
763 err_free_kdata:
764 kfree(fw_mem->core_data);
765 kfree(fw_mem->core_code);
766 kfree(fw_mem->data);
767 kfree(fw_mem->code);
768
769 if (fw_core_data_ptr)
770 pvr_fw_object_vunmap(fw_mem->core_data_obj);
771 if (fw_mem->core_data_obj)
772 pvr_fw_object_destroy(fw_mem->core_data_obj);
773
774 err_free_fw_core_code_obj:
775 if (fw_core_code_ptr)
776 pvr_fw_object_vunmap(fw_mem->core_code_obj);
777 if (fw_mem->core_code_obj)
778 pvr_fw_object_destroy(fw_mem->core_code_obj);
779
780 err_free_fw_data_obj:
781 if (fw_data_ptr)
782 pvr_fw_object_vunmap(fw_mem->data_obj);
783 pvr_fw_object_destroy(fw_mem->data_obj);
784
785 err_free_fw_code_obj:
786 if (fw_code_ptr)
787 pvr_fw_object_vunmap(fw_mem->code_obj);
788 pvr_fw_object_destroy(fw_mem->code_obj);
789
790 return err;
791 }
792
793 static int
pvr_copy_to_fw(struct pvr_fw_object * dest_obj,u8 * src_ptr,u32 size)794 pvr_copy_to_fw(struct pvr_fw_object *dest_obj, u8 *src_ptr, u32 size)
795 {
796 u8 *dest_ptr = pvr_fw_object_vmap(dest_obj);
797
798 if (IS_ERR(dest_ptr))
799 return PTR_ERR(dest_ptr);
800
801 memcpy(dest_ptr, src_ptr, size);
802
803 pvr_fw_object_vunmap(dest_obj);
804
805 return 0;
806 }
807
808 static int
pvr_fw_reinit_code_data(struct pvr_device * pvr_dev)809 pvr_fw_reinit_code_data(struct pvr_device *pvr_dev)
810 {
811 struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
812 int err;
813
814 err = pvr_copy_to_fw(fw_mem->code_obj, fw_mem->code, fw_mem->code_alloc_size);
815 if (err)
816 return err;
817
818 err = pvr_copy_to_fw(fw_mem->data_obj, fw_mem->data, fw_mem->data_alloc_size);
819 if (err)
820 return err;
821
822 if (fw_mem->core_code) {
823 err = pvr_copy_to_fw(fw_mem->core_code_obj, fw_mem->core_code,
824 fw_mem->core_code_alloc_size);
825 if (err)
826 return err;
827 }
828
829 if (fw_mem->core_data) {
830 err = pvr_copy_to_fw(fw_mem->core_data_obj, fw_mem->core_data,
831 fw_mem->core_data_alloc_size);
832 if (err)
833 return err;
834 }
835
836 return 0;
837 }
838
839 static void
pvr_fw_cleanup(struct pvr_device * pvr_dev)840 pvr_fw_cleanup(struct pvr_device *pvr_dev)
841 {
842 struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
843
844 pvr_fw_fini_fwif_connection_ctl(pvr_dev);
845
846 kfree(fw_mem->core_data);
847 kfree(fw_mem->core_code);
848 kfree(fw_mem->data);
849 kfree(fw_mem->code);
850
851 if (fw_mem->core_code_obj)
852 pvr_fw_object_destroy(fw_mem->core_code_obj);
853 if (fw_mem->core_data_obj)
854 pvr_fw_object_destroy(fw_mem->core_data_obj);
855 pvr_fw_object_destroy(fw_mem->code_obj);
856 pvr_fw_object_destroy(fw_mem->data_obj);
857 }
858
859 /**
860 * pvr_wait_for_fw_boot() - Wait for firmware to finish booting
861 * @pvr_dev: Target PowerVR device.
862 *
863 * Returns:
864 * * 0 on success, or
865 * * -%ETIMEDOUT if firmware fails to boot within timeout.
866 */
867 int
pvr_wait_for_fw_boot(struct pvr_device * pvr_dev)868 pvr_wait_for_fw_boot(struct pvr_device *pvr_dev)
869 {
870 ktime_t deadline = ktime_add_us(ktime_get(), FW_BOOT_TIMEOUT_USEC);
871 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
872
873 while (ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0) {
874 if (READ_ONCE(fw_dev->fwif_sysinit->firmware_started))
875 return 0;
876 }
877
878 return -ETIMEDOUT;
879 }
880
881 /*
882 * pvr_fw_heap_info_init() - Calculate size and masks for FW heap
883 * @pvr_dev: Target PowerVR device.
884 * @log2_size: Log2 of raw heap size.
885 * @reserved_size: Size of reserved area of heap, in bytes. May be zero.
886 */
887 void
pvr_fw_heap_info_init(struct pvr_device * pvr_dev,u32 log2_size,u32 reserved_size)888 pvr_fw_heap_info_init(struct pvr_device *pvr_dev, u32 log2_size, u32 reserved_size)
889 {
890 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
891
892 fw_dev->fw_heap_info.gpu_addr = PVR_ROGUE_FW_MAIN_HEAP_BASE;
893 fw_dev->fw_heap_info.log2_size = log2_size;
894 fw_dev->fw_heap_info.reserved_size = reserved_size;
895 fw_dev->fw_heap_info.raw_size = 1 << fw_dev->fw_heap_info.log2_size;
896 fw_dev->fw_heap_info.offset_mask = fw_dev->fw_heap_info.raw_size - 1;
897 fw_dev->fw_heap_info.config_offset = fw_dev->fw_heap_info.raw_size -
898 PVR_ROGUE_FW_CONFIG_HEAP_SIZE;
899 fw_dev->fw_heap_info.size = fw_dev->fw_heap_info.raw_size -
900 (PVR_ROGUE_FW_CONFIG_HEAP_SIZE + reserved_size);
901 }
902
903 /**
904 * pvr_fw_validate_init_device_info() - Validate firmware and initialise device information
905 * @pvr_dev: Target PowerVR device.
906 *
907 * This function must be called before querying device information.
908 *
909 * Returns:
910 * * 0 on success, or
911 * * -%EINVAL if firmware validation fails.
912 */
913 int
pvr_fw_validate_init_device_info(struct pvr_device * pvr_dev)914 pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev)
915 {
916 int err;
917
918 err = pvr_fw_validate(pvr_dev);
919 if (err)
920 return err;
921
922 return pvr_fw_get_device_info(pvr_dev);
923 }
924
925 /**
926 * pvr_fw_init() - Initialise and boot firmware
927 * @pvr_dev: Target PowerVR device
928 *
929 * On successful completion of the function the PowerVR device will be
930 * initialised and ready to use.
931 *
932 * Returns:
933 * * 0 on success,
934 * * -%EINVAL on invalid firmware image,
935 * * -%ENOMEM on out of memory, or
936 * * -%ETIMEDOUT if firmware processor fails to boot or on register poll timeout.
937 */
938 int
pvr_fw_init(struct pvr_device * pvr_dev)939 pvr_fw_init(struct pvr_device *pvr_dev)
940 {
941 static const struct pvr_fw_defs *fw_defs[PVR_FW_PROCESSOR_TYPE_COUNT] = {
942 [PVR_FW_PROCESSOR_TYPE_META] = &pvr_fw_defs_meta,
943 [PVR_FW_PROCESSOR_TYPE_MIPS] = &pvr_fw_defs_mips,
944 [PVR_FW_PROCESSOR_TYPE_RISCV] = &pvr_fw_defs_riscv,
945 };
946
947 u32 kccb_size_log2 = ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT;
948 u32 kccb_rtn_size = (1 << kccb_size_log2) * sizeof(*pvr_dev->kccb.rtn);
949 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
950 int err;
951
952 if (fw_dev->processor_type >= PVR_FW_PROCESSOR_TYPE_COUNT)
953 return -EINVAL;
954
955 fw_dev->defs = fw_defs[fw_dev->processor_type];
956
957 err = fw_dev->defs->init(pvr_dev);
958 if (err)
959 return err;
960
961 drm_mm_init(&fw_dev->fw_mm, ROGUE_FW_HEAP_BASE, fw_dev->fw_heap_info.raw_size);
962 fw_dev->fw_mm_base = ROGUE_FW_HEAP_BASE;
963 spin_lock_init(&fw_dev->fw_mm_lock);
964
965 INIT_LIST_HEAD(&fw_dev->fw_objs.list);
966 err = drmm_mutex_init(from_pvr_device(pvr_dev), &fw_dev->fw_objs.lock);
967 if (err)
968 goto err_mm_takedown;
969
970 err = pvr_fw_process(pvr_dev);
971 if (err)
972 goto err_mm_takedown;
973
974 /* Initialise KCCB and FWCCB. */
975 err = pvr_kccb_init(pvr_dev);
976 if (err)
977 goto err_fw_cleanup;
978
979 err = pvr_fwccb_init(pvr_dev);
980 if (err)
981 goto err_kccb_fini;
982
983 /* Allocate memory for KCCB return slots. */
984 pvr_dev->kccb.rtn = pvr_fw_object_create_and_map(pvr_dev, kccb_rtn_size,
985 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
986 NULL, NULL, &pvr_dev->kccb.rtn_obj);
987 if (IS_ERR(pvr_dev->kccb.rtn)) {
988 err = PTR_ERR(pvr_dev->kccb.rtn);
989 goto err_fwccb_fini;
990 }
991
992 err = pvr_fw_create_structures(pvr_dev);
993 if (err)
994 goto err_kccb_rtn_release;
995
996 err = pvr_fw_start(pvr_dev);
997 if (err)
998 goto err_destroy_structures;
999
1000 err = pvr_wait_for_fw_boot(pvr_dev);
1001 if (err) {
1002 drm_err(from_pvr_device(pvr_dev), "Firmware failed to boot\n");
1003 goto err_fw_stop;
1004 }
1005
1006 fw_dev->booted = true;
1007
1008 return 0;
1009
1010 err_fw_stop:
1011 pvr_fw_stop(pvr_dev);
1012
1013 err_destroy_structures:
1014 pvr_fw_destroy_structures(pvr_dev);
1015
1016 err_kccb_rtn_release:
1017 pvr_fw_object_unmap_and_destroy(pvr_dev->kccb.rtn_obj);
1018
1019 err_fwccb_fini:
1020 pvr_ccb_fini(&pvr_dev->fwccb);
1021
1022 err_kccb_fini:
1023 pvr_kccb_fini(pvr_dev);
1024
1025 err_fw_cleanup:
1026 pvr_fw_cleanup(pvr_dev);
1027
1028 err_mm_takedown:
1029 drm_mm_takedown(&fw_dev->fw_mm);
1030
1031 if (fw_dev->defs->fini)
1032 fw_dev->defs->fini(pvr_dev);
1033
1034 return err;
1035 }
1036
1037 /**
1038 * pvr_fw_fini() - Shutdown firmware processor and free associated memory
1039 * @pvr_dev: Target PowerVR device
1040 */
1041 void
pvr_fw_fini(struct pvr_device * pvr_dev)1042 pvr_fw_fini(struct pvr_device *pvr_dev)
1043 {
1044 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
1045
1046 fw_dev->booted = false;
1047
1048 pvr_fw_destroy_structures(pvr_dev);
1049 pvr_fw_object_unmap_and_destroy(pvr_dev->kccb.rtn_obj);
1050
1051 /*
1052 * Ensure FWCCB worker has finished executing before destroying FWCCB. The IRQ handler has
1053 * been unregistered at this point so no new work should be being submitted.
1054 */
1055 pvr_ccb_fini(&pvr_dev->fwccb);
1056 pvr_kccb_fini(pvr_dev);
1057 pvr_fw_cleanup(pvr_dev);
1058
1059 mutex_lock(&pvr_dev->fw_dev.fw_objs.lock);
1060 WARN_ON(!list_empty(&pvr_dev->fw_dev.fw_objs.list));
1061 mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock);
1062
1063 drm_mm_takedown(&fw_dev->fw_mm);
1064
1065 if (fw_dev->defs->fini)
1066 fw_dev->defs->fini(pvr_dev);
1067 }
1068
1069 /**
1070 * pvr_fw_mts_schedule() - Schedule work via an MTS kick
1071 * @pvr_dev: Target PowerVR device
1072 * @val: Kick mask. Should be a combination of %ROGUE_CR_MTS_SCHEDULE_*
1073 */
1074 void
pvr_fw_mts_schedule(struct pvr_device * pvr_dev,u32 val)1075 pvr_fw_mts_schedule(struct pvr_device *pvr_dev, u32 val)
1076 {
1077 /* Ensure memory is flushed before kicking MTS. */
1078 wmb();
1079
1080 pvr_cr_write32(pvr_dev, ROGUE_CR_MTS_SCHEDULE, val);
1081
1082 /* Ensure the MTS kick goes through before continuing. */
1083 mb();
1084 }
1085
1086 /**
1087 * pvr_fw_structure_cleanup() - Send FW cleanup request for an object
1088 * @pvr_dev: Target PowerVR device.
1089 * @type: Type of object to cleanup. Must be one of &enum rogue_fwif_cleanup_type.
1090 * @fw_obj: Pointer to FW object containing object to cleanup.
1091 * @offset: Offset within FW object of object to cleanup.
1092 *
1093 * Returns:
1094 * * 0 on success,
1095 * * -EBUSY if object is busy,
1096 * * -ETIMEDOUT on timeout, or
1097 * * -EIO if device is lost.
1098 */
1099 int
pvr_fw_structure_cleanup(struct pvr_device * pvr_dev,u32 type,struct pvr_fw_object * fw_obj,u32 offset)1100 pvr_fw_structure_cleanup(struct pvr_device *pvr_dev, u32 type, struct pvr_fw_object *fw_obj,
1101 u32 offset)
1102 {
1103 struct rogue_fwif_kccb_cmd cmd;
1104 int slot_nr;
1105 int idx;
1106 int err;
1107 u32 rtn;
1108
1109 struct rogue_fwif_cleanup_request *cleanup_req = &cmd.cmd_data.cleanup_data;
1110
1111 down_read(&pvr_dev->reset_sem);
1112
1113 if (!drm_dev_enter(from_pvr_device(pvr_dev), &idx)) {
1114 err = -EIO;
1115 goto err_up_read;
1116 }
1117
1118 cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_CLEANUP;
1119 cmd.kccb_flags = 0;
1120 cleanup_req->cleanup_type = type;
1121
1122 switch (type) {
1123 case ROGUE_FWIF_CLEANUP_FWCOMMONCONTEXT:
1124 pvr_fw_object_get_fw_addr_offset(fw_obj, offset,
1125 &cleanup_req->cleanup_data.context_fw_addr);
1126 break;
1127 case ROGUE_FWIF_CLEANUP_HWRTDATA:
1128 pvr_fw_object_get_fw_addr_offset(fw_obj, offset,
1129 &cleanup_req->cleanup_data.hwrt_data_fw_addr);
1130 break;
1131 case ROGUE_FWIF_CLEANUP_FREELIST:
1132 pvr_fw_object_get_fw_addr_offset(fw_obj, offset,
1133 &cleanup_req->cleanup_data.freelist_fw_addr);
1134 break;
1135 default:
1136 err = -EINVAL;
1137 goto err_drm_dev_exit;
1138 }
1139
1140 err = pvr_kccb_send_cmd(pvr_dev, &cmd, &slot_nr);
1141 if (err)
1142 goto err_drm_dev_exit;
1143
1144 err = pvr_kccb_wait_for_completion(pvr_dev, slot_nr, HZ, &rtn);
1145 if (err)
1146 goto err_drm_dev_exit;
1147
1148 if (rtn & ROGUE_FWIF_KCCB_RTN_SLOT_CLEANUP_BUSY)
1149 err = -EBUSY;
1150
1151 err_drm_dev_exit:
1152 drm_dev_exit(idx);
1153
1154 err_up_read:
1155 up_read(&pvr_dev->reset_sem);
1156
1157 return err;
1158 }
1159
1160 /**
1161 * pvr_fw_object_fw_map() - Map a FW object in firmware address space
1162 * @pvr_dev: Device pointer.
1163 * @fw_obj: FW object to map.
1164 * @dev_addr: Desired address in device space, if a specific address is
1165 * required. 0 otherwise.
1166 *
1167 * Returns:
1168 * * 0 on success, or
1169 * * -%EINVAL if @fw_obj is already mapped but has no references, or
1170 * * Any error returned by DRM.
1171 */
1172 static int
pvr_fw_object_fw_map(struct pvr_device * pvr_dev,struct pvr_fw_object * fw_obj,u64 dev_addr)1173 pvr_fw_object_fw_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj, u64 dev_addr)
1174 {
1175 struct pvr_gem_object *pvr_obj = fw_obj->gem;
1176 struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj);
1177 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
1178
1179 int err;
1180
1181 spin_lock(&fw_dev->fw_mm_lock);
1182
1183 if (drm_mm_node_allocated(&fw_obj->fw_mm_node)) {
1184 err = -EINVAL;
1185 goto err_unlock;
1186 }
1187
1188 if (!dev_addr) {
1189 /*
1190 * Allocate from the main heap only (firmware heap minus
1191 * config space).
1192 */
1193 err = drm_mm_insert_node_in_range(&fw_dev->fw_mm, &fw_obj->fw_mm_node,
1194 gem_obj->size, 0, 0,
1195 fw_dev->fw_heap_info.gpu_addr,
1196 fw_dev->fw_heap_info.gpu_addr +
1197 fw_dev->fw_heap_info.size, 0);
1198 if (err)
1199 goto err_unlock;
1200 } else {
1201 fw_obj->fw_mm_node.start = dev_addr;
1202 fw_obj->fw_mm_node.size = gem_obj->size;
1203 err = drm_mm_reserve_node(&fw_dev->fw_mm, &fw_obj->fw_mm_node);
1204 if (err)
1205 goto err_unlock;
1206 }
1207
1208 spin_unlock(&fw_dev->fw_mm_lock);
1209
1210 /* Map object on GPU. */
1211 err = fw_dev->defs->vm_map(pvr_dev, fw_obj);
1212 if (err)
1213 goto err_remove_node;
1214
1215 fw_obj->fw_addr_offset = (u32)(fw_obj->fw_mm_node.start - fw_dev->fw_mm_base);
1216
1217 return 0;
1218
1219 err_remove_node:
1220 spin_lock(&fw_dev->fw_mm_lock);
1221 drm_mm_remove_node(&fw_obj->fw_mm_node);
1222
1223 err_unlock:
1224 spin_unlock(&fw_dev->fw_mm_lock);
1225
1226 return err;
1227 }
1228
1229 /**
1230 * pvr_fw_object_fw_unmap() - Unmap a previously mapped FW object
1231 * @fw_obj: FW object to unmap.
1232 *
1233 * Returns:
1234 * * 0 on success, or
1235 * * -%EINVAL if object is not currently mapped.
1236 */
1237 static int
pvr_fw_object_fw_unmap(struct pvr_fw_object * fw_obj)1238 pvr_fw_object_fw_unmap(struct pvr_fw_object *fw_obj)
1239 {
1240 struct pvr_gem_object *pvr_obj = fw_obj->gem;
1241 struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj);
1242 struct pvr_device *pvr_dev = to_pvr_device(gem_obj->dev);
1243 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
1244
1245 fw_dev->defs->vm_unmap(pvr_dev, fw_obj);
1246
1247 spin_lock(&fw_dev->fw_mm_lock);
1248
1249 if (!drm_mm_node_allocated(&fw_obj->fw_mm_node)) {
1250 spin_unlock(&fw_dev->fw_mm_lock);
1251 return -EINVAL;
1252 }
1253
1254 drm_mm_remove_node(&fw_obj->fw_mm_node);
1255
1256 spin_unlock(&fw_dev->fw_mm_lock);
1257
1258 return 0;
1259 }
1260
1261 static void *
pvr_fw_object_create_and_map_common(struct pvr_device * pvr_dev,size_t size,u64 flags,u64 dev_addr,void (* init)(void * cpu_ptr,void * priv),void * init_priv,struct pvr_fw_object ** fw_obj_out)1262 pvr_fw_object_create_and_map_common(struct pvr_device *pvr_dev, size_t size,
1263 u64 flags, u64 dev_addr,
1264 void (*init)(void *cpu_ptr, void *priv),
1265 void *init_priv, struct pvr_fw_object **fw_obj_out)
1266 {
1267 struct pvr_fw_object *fw_obj;
1268 void *cpu_ptr;
1269 int err;
1270
1271 /* %DRM_PVR_BO_PM_FW_PROTECT is implicit for FW objects. */
1272 flags |= DRM_PVR_BO_PM_FW_PROTECT;
1273
1274 fw_obj = kzalloc(sizeof(*fw_obj), GFP_KERNEL);
1275 if (!fw_obj)
1276 return ERR_PTR(-ENOMEM);
1277
1278 INIT_LIST_HEAD(&fw_obj->node);
1279 fw_obj->init = init;
1280 fw_obj->init_priv = init_priv;
1281
1282 fw_obj->gem = pvr_gem_object_create(pvr_dev, size, flags);
1283 if (IS_ERR(fw_obj->gem)) {
1284 err = PTR_ERR(fw_obj->gem);
1285 fw_obj->gem = NULL;
1286 goto err_put_object;
1287 }
1288
1289 err = pvr_fw_object_fw_map(pvr_dev, fw_obj, dev_addr);
1290 if (err)
1291 goto err_put_object;
1292
1293 cpu_ptr = pvr_fw_object_vmap(fw_obj);
1294 if (IS_ERR(cpu_ptr)) {
1295 err = PTR_ERR(cpu_ptr);
1296 goto err_put_object;
1297 }
1298
1299 *fw_obj_out = fw_obj;
1300
1301 if (fw_obj->init)
1302 fw_obj->init(cpu_ptr, fw_obj->init_priv);
1303
1304 mutex_lock(&pvr_dev->fw_dev.fw_objs.lock);
1305 list_add_tail(&fw_obj->node, &pvr_dev->fw_dev.fw_objs.list);
1306 mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock);
1307
1308 return cpu_ptr;
1309
1310 err_put_object:
1311 pvr_fw_object_destroy(fw_obj);
1312
1313 return ERR_PTR(err);
1314 }
1315
1316 /**
1317 * pvr_fw_object_create() - Create a FW object and map to firmware
1318 * @pvr_dev: PowerVR device pointer.
1319 * @size: Size of object, in bytes.
1320 * @flags: Options which affect both this operation and future mapping
1321 * operations performed on the returned object. Must be a combination of
1322 * DRM_PVR_BO_* and/or PVR_BO_* flags.
1323 * @init: Initialisation callback.
1324 * @init_priv: Private pointer to pass to initialisation callback.
1325 * @fw_obj_out: Pointer to location to store created object pointer.
1326 *
1327 * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently,
1328 * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS
1329 * set.
1330 *
1331 * Returns:
1332 * * 0 on success, or
1333 * * Any error returned by pvr_fw_object_create_common().
1334 */
1335 int
pvr_fw_object_create(struct pvr_device * pvr_dev,size_t size,u64 flags,void (* init)(void * cpu_ptr,void * priv),void * init_priv,struct pvr_fw_object ** fw_obj_out)1336 pvr_fw_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags,
1337 void (*init)(void *cpu_ptr, void *priv), void *init_priv,
1338 struct pvr_fw_object **fw_obj_out)
1339 {
1340 void *cpu_ptr;
1341
1342 cpu_ptr = pvr_fw_object_create_and_map_common(pvr_dev, size, flags, 0, init, init_priv,
1343 fw_obj_out);
1344 if (IS_ERR(cpu_ptr))
1345 return PTR_ERR(cpu_ptr);
1346
1347 pvr_fw_object_vunmap(*fw_obj_out);
1348
1349 return 0;
1350 }
1351
1352 /**
1353 * pvr_fw_object_create_and_map() - Create a FW object and map to firmware and CPU
1354 * @pvr_dev: PowerVR device pointer.
1355 * @size: Size of object, in bytes.
1356 * @flags: Options which affect both this operation and future mapping
1357 * operations performed on the returned object. Must be a combination of
1358 * DRM_PVR_BO_* and/or PVR_BO_* flags.
1359 * @init: Initialisation callback.
1360 * @init_priv: Private pointer to pass to initialisation callback.
1361 * @fw_obj_out: Pointer to location to store created object pointer.
1362 *
1363 * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently,
1364 * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS
1365 * set.
1366 *
1367 * Caller is responsible for calling pvr_fw_object_vunmap() to release the CPU
1368 * mapping.
1369 *
1370 * Returns:
1371 * * Pointer to CPU mapping of newly created object, or
1372 * * Any error returned by pvr_fw_object_create(), or
1373 * * Any error returned by pvr_fw_object_vmap().
1374 */
1375 void *
pvr_fw_object_create_and_map(struct pvr_device * pvr_dev,size_t size,u64 flags,void (* init)(void * cpu_ptr,void * priv),void * init_priv,struct pvr_fw_object ** fw_obj_out)1376 pvr_fw_object_create_and_map(struct pvr_device *pvr_dev, size_t size, u64 flags,
1377 void (*init)(void *cpu_ptr, void *priv),
1378 void *init_priv, struct pvr_fw_object **fw_obj_out)
1379 {
1380 return pvr_fw_object_create_and_map_common(pvr_dev, size, flags, 0, init, init_priv,
1381 fw_obj_out);
1382 }
1383
1384 /**
1385 * pvr_fw_object_create_and_map_offset() - Create a FW object and map to
1386 * firmware at the provided offset and to the CPU.
1387 * @pvr_dev: PowerVR device pointer.
1388 * @dev_offset: Base address of desired FW mapping, offset from start of FW heap.
1389 * @size: Size of object, in bytes.
1390 * @flags: Options which affect both this operation and future mapping
1391 * operations performed on the returned object. Must be a combination of
1392 * DRM_PVR_BO_* and/or PVR_BO_* flags.
1393 * @init: Initialisation callback.
1394 * @init_priv: Private pointer to pass to initialisation callback.
1395 * @fw_obj_out: Pointer to location to store created object pointer.
1396 *
1397 * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently,
1398 * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS
1399 * set.
1400 *
1401 * Caller is responsible for calling pvr_fw_object_vunmap() to release the CPU
1402 * mapping.
1403 *
1404 * Returns:
1405 * * Pointer to CPU mapping of newly created object, or
1406 * * Any error returned by pvr_fw_object_create(), or
1407 * * Any error returned by pvr_fw_object_vmap().
1408 */
1409 void *
pvr_fw_object_create_and_map_offset(struct pvr_device * pvr_dev,u32 dev_offset,size_t size,u64 flags,void (* init)(void * cpu_ptr,void * priv),void * init_priv,struct pvr_fw_object ** fw_obj_out)1410 pvr_fw_object_create_and_map_offset(struct pvr_device *pvr_dev,
1411 u32 dev_offset, size_t size, u64 flags,
1412 void (*init)(void *cpu_ptr, void *priv),
1413 void *init_priv, struct pvr_fw_object **fw_obj_out)
1414 {
1415 u64 dev_addr = pvr_dev->fw_dev.fw_mm_base + dev_offset;
1416
1417 return pvr_fw_object_create_and_map_common(pvr_dev, size, flags, dev_addr, init, init_priv,
1418 fw_obj_out);
1419 }
1420
1421 /**
1422 * pvr_fw_object_destroy() - Destroy a pvr_fw_object
1423 * @fw_obj: Pointer to object to destroy.
1424 */
pvr_fw_object_destroy(struct pvr_fw_object * fw_obj)1425 void pvr_fw_object_destroy(struct pvr_fw_object *fw_obj)
1426 {
1427 struct pvr_gem_object *pvr_obj = fw_obj->gem;
1428 struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj);
1429 struct pvr_device *pvr_dev = to_pvr_device(gem_obj->dev);
1430
1431 mutex_lock(&pvr_dev->fw_dev.fw_objs.lock);
1432 list_del(&fw_obj->node);
1433 mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock);
1434
1435 if (drm_mm_node_allocated(&fw_obj->fw_mm_node)) {
1436 /* If we can't unmap, leak the memory. */
1437 if (WARN_ON(pvr_fw_object_fw_unmap(fw_obj)))
1438 return;
1439 }
1440
1441 if (fw_obj->gem)
1442 pvr_gem_object_put(fw_obj->gem);
1443
1444 kfree(fw_obj);
1445 }
1446
1447 /**
1448 * pvr_fw_object_get_fw_addr_offset() - Return address of object in firmware address space, with
1449 * given offset.
1450 * @fw_obj: Pointer to object.
1451 * @offset: Desired offset from start of object.
1452 * @fw_addr_out: Location to store address to.
1453 */
pvr_fw_object_get_fw_addr_offset(struct pvr_fw_object * fw_obj,u32 offset,u32 * fw_addr_out)1454 void pvr_fw_object_get_fw_addr_offset(struct pvr_fw_object *fw_obj, u32 offset, u32 *fw_addr_out)
1455 {
1456 struct pvr_gem_object *pvr_obj = fw_obj->gem;
1457 struct pvr_device *pvr_dev = to_pvr_device(gem_from_pvr_gem(pvr_obj)->dev);
1458
1459 *fw_addr_out = pvr_dev->fw_dev.defs->get_fw_addr_with_offset(fw_obj, offset);
1460 }
1461
1462 u64
pvr_fw_obj_get_gpu_addr(struct pvr_fw_object * fw_obj)1463 pvr_fw_obj_get_gpu_addr(struct pvr_fw_object *fw_obj)
1464 {
1465 struct pvr_device *pvr_dev = to_pvr_device(gem_from_pvr_gem(fw_obj->gem)->dev);
1466 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
1467
1468 return fw_dev->fw_heap_info.gpu_addr + fw_obj->fw_addr_offset;
1469 }
1470
1471 /*
1472 * pvr_fw_hard_reset() - Re-initialise the FW code and data segments, and reset all global FW
1473 * structures
1474 * @pvr_dev: Device pointer
1475 *
1476 * If this function returns an error then the caller must regard the device as lost.
1477 *
1478 * Returns:
1479 * * 0 on success, or
1480 * * Any error returned by pvr_fw_init_dev_structures() or pvr_fw_reset_all().
1481 */
1482 int
pvr_fw_hard_reset(struct pvr_device * pvr_dev)1483 pvr_fw_hard_reset(struct pvr_device *pvr_dev)
1484 {
1485 struct list_head *pos;
1486 int err;
1487
1488 /* Reset all FW objects */
1489 mutex_lock(&pvr_dev->fw_dev.fw_objs.lock);
1490
1491 list_for_each(pos, &pvr_dev->fw_dev.fw_objs.list) {
1492 struct pvr_fw_object *fw_obj = container_of(pos, struct pvr_fw_object, node);
1493 void *cpu_ptr = pvr_fw_object_vmap(fw_obj);
1494
1495 WARN_ON(IS_ERR(cpu_ptr));
1496
1497 if (!(fw_obj->gem->flags & PVR_BO_FW_NO_CLEAR_ON_RESET)) {
1498 memset(cpu_ptr, 0, pvr_gem_object_size(fw_obj->gem));
1499
1500 if (fw_obj->init)
1501 fw_obj->init(cpu_ptr, fw_obj->init_priv);
1502 }
1503
1504 pvr_fw_object_vunmap(fw_obj);
1505 }
1506
1507 mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock);
1508
1509 err = pvr_fw_reinit_code_data(pvr_dev);
1510 if (err)
1511 return err;
1512
1513 return 0;
1514 }
1515