xref: /linux/drivers/gpu/drm/imagination/pvr_fw.c (revision 252c4711973de4e4f3ecddcc18912aecfd4e537c)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_ccb.h"
5 #include "pvr_device.h"
6 #include "pvr_device_info.h"
7 #include "pvr_fw.h"
8 #include "pvr_fw_info.h"
9 #include "pvr_fw_startstop.h"
10 #include "pvr_fw_trace.h"
11 #include "pvr_gem.h"
12 #include "pvr_power.h"
13 #include "pvr_rogue_fwif_dev_info.h"
14 #include "pvr_rogue_heap_config.h"
15 #include "pvr_vm.h"
16 
17 #include <drm/drm_drv.h>
18 #include <drm/drm_managed.h>
19 #include <drm/drm_mm.h>
20 #include <linux/clk.h>
21 #include <linux/firmware.h>
22 #include <linux/math.h>
23 #include <linux/minmax.h>
24 #include <linux/sizes.h>
25 
26 #define FW_MAX_SUPPORTED_MAJOR_VERSION 1
27 
28 #define FW_BOOT_TIMEOUT_USEC 5000000
29 
30 /* Config heap occupies top 192k of the firmware heap. */
31 #define PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY SZ_64K
32 #define PVR_ROGUE_FW_CONFIG_HEAP_SIZE (3 * PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY)
33 
34 /* Main firmware allocations should come from the remainder of the heap. */
35 #define PVR_ROGUE_FW_MAIN_HEAP_BASE ROGUE_FW_HEAP_BASE
36 
37 /* Offsets from start of configuration area of FW heap. */
38 #define PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET 0
39 #define PVR_ROGUE_FWIF_OSINIT_OFFSET \
40 	(PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET + PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY)
41 #define PVR_ROGUE_FWIF_SYSINIT_OFFSET \
42 	(PVR_ROGUE_FWIF_OSINIT_OFFSET + PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY)
43 
44 #define PVR_ROGUE_FAULT_PAGE_SIZE SZ_4K
45 
46 #define PVR_SYNC_OBJ_SIZE sizeof(u32)
47 
48 const struct pvr_fw_layout_entry *
49 pvr_fw_find_layout_entry(struct pvr_device *pvr_dev, enum pvr_fw_section_id id)
50 {
51 	const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
52 	u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
53 
54 	for (u32 entry = 0; entry < num_layout_entries; entry++) {
55 		if (layout_entries[entry].id == id)
56 			return &layout_entries[entry];
57 	}
58 
59 	return NULL;
60 }
61 
62 static const struct pvr_fw_layout_entry *
63 pvr_fw_find_private_data(struct pvr_device *pvr_dev)
64 {
65 	const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
66 	u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
67 
68 	for (u32 entry = 0; entry < num_layout_entries; entry++) {
69 		if (layout_entries[entry].id == META_PRIVATE_DATA ||
70 		    layout_entries[entry].id == MIPS_PRIVATE_DATA ||
71 		    layout_entries[entry].id == RISCV_PRIVATE_DATA)
72 			return &layout_entries[entry];
73 	}
74 
75 	return NULL;
76 }
77 
78 #define DEV_INFO_MASK_SIZE(x) DIV_ROUND_UP(x, 64)
79 
80 /**
81  * pvr_fw_validate() - Parse firmware header and check compatibility
82  * @pvr_dev: Device pointer.
83  *
84  * Returns:
85  *  * 0 on success, or
86  *  * -EINVAL if firmware is incompatible.
87  */
88 static int
89 pvr_fw_validate(struct pvr_device *pvr_dev)
90 {
91 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
92 	const struct firmware *firmware = pvr_dev->fw_dev.firmware;
93 	const struct pvr_fw_layout_entry *layout_entries;
94 	const struct pvr_fw_info_header *header;
95 	const u8 *fw = firmware->data;
96 	u32 fw_offset = firmware->size - SZ_4K;
97 	u32 layout_table_size;
98 
99 	if (firmware->size < SZ_4K || (firmware->size % FW_BLOCK_SIZE))
100 		return -EINVAL;
101 
102 	header = (const struct pvr_fw_info_header *)&fw[fw_offset];
103 
104 	if (header->info_version != PVR_FW_INFO_VERSION) {
105 		drm_err(drm_dev, "Unsupported fw info version %u\n",
106 			header->info_version);
107 		return -EINVAL;
108 	}
109 
110 	if (header->header_len != sizeof(struct pvr_fw_info_header) ||
111 	    header->layout_entry_size != sizeof(struct pvr_fw_layout_entry) ||
112 	    header->layout_entry_num > PVR_FW_INFO_MAX_NUM_ENTRIES) {
113 		drm_err(drm_dev, "FW info format mismatch\n");
114 		return -EINVAL;
115 	}
116 
117 	if (!(header->flags & PVR_FW_FLAGS_OPEN_SOURCE) ||
118 	    header->fw_version_major > FW_MAX_SUPPORTED_MAJOR_VERSION ||
119 	    header->fw_version_major == 0) {
120 		drm_err(drm_dev, "Unsupported FW version %u.%u (build: %u%s)\n",
121 			header->fw_version_major, header->fw_version_minor,
122 			header->fw_version_build,
123 			(header->flags & PVR_FW_FLAGS_OPEN_SOURCE) ? " OS" : "");
124 		return -EINVAL;
125 	}
126 
127 	if (pvr_gpu_id_to_packed_bvnc(&pvr_dev->gpu_id) != header->bvnc) {
128 		struct pvr_gpu_id fw_gpu_id;
129 
130 		packed_bvnc_to_pvr_gpu_id(header->bvnc, &fw_gpu_id);
131 		drm_err(drm_dev, "FW built for incorrect GPU ID %i.%i.%i.%i (expected %i.%i.%i.%i)\n",
132 			fw_gpu_id.b, fw_gpu_id.v, fw_gpu_id.n, fw_gpu_id.c,
133 			pvr_dev->gpu_id.b, pvr_dev->gpu_id.v, pvr_dev->gpu_id.n, pvr_dev->gpu_id.c);
134 		return -EINVAL;
135 	}
136 
137 	fw_offset += header->header_len;
138 	layout_table_size =
139 		header->layout_entry_size * header->layout_entry_num;
140 	if ((fw_offset + layout_table_size) > firmware->size)
141 		return -EINVAL;
142 
143 	layout_entries = (const struct pvr_fw_layout_entry *)&fw[fw_offset];
144 	for (u32 entry = 0; entry < header->layout_entry_num; entry++) {
145 		u32 start_addr = layout_entries[entry].base_addr;
146 		u32 end_addr = start_addr + layout_entries[entry].alloc_size;
147 
148 		if (start_addr >= end_addr)
149 			return -EINVAL;
150 	}
151 
152 	fw_offset = (firmware->size - SZ_4K) - header->device_info_size;
153 
154 	drm_info(drm_dev, "FW version v%u.%u (build %u OS)\n", header->fw_version_major,
155 		 header->fw_version_minor, header->fw_version_build);
156 
157 	pvr_dev->fw_version.major = header->fw_version_major;
158 	pvr_dev->fw_version.minor = header->fw_version_minor;
159 
160 	pvr_dev->fw_dev.header = header;
161 	pvr_dev->fw_dev.layout_entries = layout_entries;
162 
163 	return 0;
164 }
165 
166 static int
167 pvr_fw_get_device_info(struct pvr_device *pvr_dev)
168 {
169 	const struct firmware *firmware = pvr_dev->fw_dev.firmware;
170 	struct pvr_fw_device_info_header *header;
171 	const u8 *fw = firmware->data;
172 	const u64 *dev_info;
173 	u32 fw_offset;
174 
175 	fw_offset = (firmware->size - SZ_4K) - pvr_dev->fw_dev.header->device_info_size;
176 
177 	header = (struct pvr_fw_device_info_header *)&fw[fw_offset];
178 	dev_info = (u64 *)(header + 1);
179 
180 	pvr_device_info_set_quirks(pvr_dev, dev_info, header->brn_mask_size);
181 	dev_info += header->brn_mask_size;
182 
183 	pvr_device_info_set_enhancements(pvr_dev, dev_info, header->ern_mask_size);
184 	dev_info += header->ern_mask_size;
185 
186 	return pvr_device_info_set_features(pvr_dev, dev_info, header->feature_mask_size,
187 					    header->feature_param_size);
188 }
189 
190 static void
191 layout_get_sizes(struct pvr_device *pvr_dev)
192 {
193 	const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
194 	u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
195 	struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
196 
197 	fw_mem->code_alloc_size = 0;
198 	fw_mem->data_alloc_size = 0;
199 	fw_mem->core_code_alloc_size = 0;
200 	fw_mem->core_data_alloc_size = 0;
201 
202 	/* Extract section sizes from FW layout table. */
203 	for (u32 entry = 0; entry < num_layout_entries; entry++) {
204 		switch (layout_entries[entry].type) {
205 		case FW_CODE:
206 			fw_mem->code_alloc_size += layout_entries[entry].alloc_size;
207 			break;
208 		case FW_DATA:
209 			fw_mem->data_alloc_size += layout_entries[entry].alloc_size;
210 			break;
211 		case FW_COREMEM_CODE:
212 			fw_mem->core_code_alloc_size +=
213 				layout_entries[entry].alloc_size;
214 			break;
215 		case FW_COREMEM_DATA:
216 			fw_mem->core_data_alloc_size +=
217 				layout_entries[entry].alloc_size;
218 			break;
219 		case NONE:
220 			break;
221 		}
222 	}
223 }
224 
225 int
226 pvr_fw_find_mmu_segment(struct pvr_device *pvr_dev, u32 addr, u32 size, void *fw_code_ptr,
227 			void *fw_data_ptr, void *fw_core_code_ptr, void *fw_core_data_ptr,
228 			void **host_addr_out)
229 {
230 	const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
231 	u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
232 	u32 end_addr = addr + size;
233 
234 	/* Ensure requested range is not zero, and size is not causing addr to overflow. */
235 	if (end_addr <= addr)
236 		return -EINVAL;
237 
238 	for (int entry = 0; entry < num_layout_entries; entry++) {
239 		u32 entry_start_addr = layout_entries[entry].base_addr;
240 		u32 entry_end_addr = entry_start_addr + layout_entries[entry].alloc_size;
241 
242 		if (addr >= entry_start_addr && addr < entry_end_addr &&
243 		    end_addr > entry_start_addr && end_addr <= entry_end_addr) {
244 			switch (layout_entries[entry].type) {
245 			case FW_CODE:
246 				*host_addr_out = fw_code_ptr;
247 				break;
248 
249 			case FW_DATA:
250 				*host_addr_out = fw_data_ptr;
251 				break;
252 
253 			case FW_COREMEM_CODE:
254 				*host_addr_out = fw_core_code_ptr;
255 				break;
256 
257 			case FW_COREMEM_DATA:
258 				*host_addr_out = fw_core_data_ptr;
259 				break;
260 
261 			default:
262 				return -EINVAL;
263 			}
264 			/* Direct Mem write to mapped memory */
265 			addr -= layout_entries[entry].base_addr;
266 			addr += layout_entries[entry].alloc_offset;
267 
268 			/*
269 			 * Add offset to pointer to FW allocation only if that
270 			 * allocation is available
271 			 */
272 			*(u8 **)host_addr_out += addr;
273 			return 0;
274 		}
275 	}
276 
277 	return -EINVAL;
278 }
279 
280 static int
281 pvr_fw_create_fwif_connection_ctl(struct pvr_device *pvr_dev)
282 {
283 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
284 	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
285 
286 	fw_dev->fwif_connection_ctl =
287 		pvr_fw_object_create_and_map_offset(pvr_dev,
288 						    fw_dev->fw_heap_info.config_offset +
289 						    PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET,
290 						    sizeof(*fw_dev->fwif_connection_ctl),
291 						    PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
292 						    NULL, NULL,
293 						    &fw_dev->mem.fwif_connection_ctl_obj);
294 	if (IS_ERR(fw_dev->fwif_connection_ctl)) {
295 		drm_err(drm_dev,
296 			"Unable to allocate FWIF connection control memory\n");
297 		return PTR_ERR(fw_dev->fwif_connection_ctl);
298 	}
299 
300 	return 0;
301 }
302 
303 static void
304 pvr_fw_fini_fwif_connection_ctl(struct pvr_device *pvr_dev)
305 {
306 	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
307 
308 	pvr_fw_object_unmap_and_destroy(fw_dev->mem.fwif_connection_ctl_obj);
309 }
310 
311 static void
312 fw_osinit_init(void *cpu_ptr, void *priv)
313 {
314 	struct rogue_fwif_osinit *fwif_osinit = cpu_ptr;
315 	struct pvr_device *pvr_dev = priv;
316 	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
317 	struct pvr_fw_mem *fw_mem = &fw_dev->mem;
318 
319 	fwif_osinit->kernel_ccbctl_fw_addr = pvr_dev->kccb.ccb.ctrl_fw_addr;
320 	fwif_osinit->kernel_ccb_fw_addr = pvr_dev->kccb.ccb.ccb_fw_addr;
321 	pvr_fw_object_get_fw_addr(pvr_dev->kccb.rtn_obj,
322 				  &fwif_osinit->kernel_ccb_rtn_slots_fw_addr);
323 
324 	fwif_osinit->firmware_ccbctl_fw_addr = pvr_dev->fwccb.ctrl_fw_addr;
325 	fwif_osinit->firmware_ccb_fw_addr = pvr_dev->fwccb.ccb_fw_addr;
326 
327 	fwif_osinit->work_est_firmware_ccbctl_fw_addr = 0;
328 	fwif_osinit->work_est_firmware_ccb_fw_addr = 0;
329 
330 	pvr_fw_object_get_fw_addr(fw_mem->hwrinfobuf_obj,
331 				  &fwif_osinit->rogue_fwif_hwr_info_buf_ctl_fw_addr);
332 	pvr_fw_object_get_fw_addr(fw_mem->osdata_obj, &fwif_osinit->fw_os_data_fw_addr);
333 
334 	fwif_osinit->hwr_debug_dump_limit = 0;
335 
336 	rogue_fwif_compchecks_bvnc_init(&fwif_osinit->rogue_comp_checks.hw_bvnc);
337 	rogue_fwif_compchecks_bvnc_init(&fwif_osinit->rogue_comp_checks.fw_bvnc);
338 }
339 
340 static void
341 fw_osdata_init(void *cpu_ptr, void *priv)
342 {
343 	struct rogue_fwif_osdata *fwif_osdata = cpu_ptr;
344 	struct pvr_device *pvr_dev = priv;
345 	struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
346 
347 	pvr_fw_object_get_fw_addr(fw_mem->power_sync_obj, &fwif_osdata->power_sync_fw_addr);
348 }
349 
350 static void
351 fw_fault_page_init(void *cpu_ptr, void *priv)
352 {
353 	u32 *fault_page = cpu_ptr;
354 
355 	for (int i = 0; i < PVR_ROGUE_FAULT_PAGE_SIZE / sizeof(*fault_page); i++)
356 		fault_page[i] = 0xdeadbee0;
357 }
358 
359 static void
360 fw_sysinit_init(void *cpu_ptr, void *priv)
361 {
362 	struct rogue_fwif_sysinit *fwif_sysinit = cpu_ptr;
363 	struct pvr_device *pvr_dev = priv;
364 	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
365 	struct pvr_fw_mem *fw_mem = &fw_dev->mem;
366 	dma_addr_t fault_dma_addr = 0;
367 	u32 clock_speed_hz = clk_get_rate(pvr_dev->core_clk);
368 
369 	WARN_ON(!clock_speed_hz);
370 
371 	WARN_ON(pvr_fw_object_get_dma_addr(fw_mem->fault_page_obj, 0, &fault_dma_addr));
372 	fwif_sysinit->fault_phys_addr = (u64)fault_dma_addr;
373 
374 	fwif_sysinit->pds_exec_base = ROGUE_PDSCODEDATA_HEAP_BASE;
375 	fwif_sysinit->usc_exec_base = ROGUE_USCCODE_HEAP_BASE;
376 
377 	pvr_fw_object_get_fw_addr(fw_mem->runtime_cfg_obj, &fwif_sysinit->runtime_cfg_fw_addr);
378 	pvr_fw_object_get_fw_addr(fw_dev->fw_trace.tracebuf_ctrl_obj,
379 				  &fwif_sysinit->trace_buf_ctl_fw_addr);
380 	pvr_fw_object_get_fw_addr(fw_mem->sysdata_obj, &fwif_sysinit->fw_sys_data_fw_addr);
381 	pvr_fw_object_get_fw_addr(fw_mem->gpu_util_fwcb_obj,
382 				  &fwif_sysinit->gpu_util_fw_cb_ctl_fw_addr);
383 	if (fw_mem->core_data_obj) {
384 		pvr_fw_object_get_fw_addr(fw_mem->core_data_obj,
385 					  &fwif_sysinit->coremem_data_store.fw_addr);
386 	}
387 
388 	/* Currently unsupported. */
389 	fwif_sysinit->counter_dump_ctl.buffer_fw_addr = 0;
390 	fwif_sysinit->counter_dump_ctl.size_in_dwords = 0;
391 
392 	/* Skip alignment checks. */
393 	fwif_sysinit->align_checks = 0;
394 
395 	fwif_sysinit->filter_flags = 0;
396 	fwif_sysinit->hw_perf_filter = 0;
397 	fwif_sysinit->firmware_perf = FW_PERF_CONF_NONE;
398 	fwif_sysinit->initial_core_clock_speed = clock_speed_hz;
399 	fwif_sysinit->active_pm_latency_ms = 0;
400 	fwif_sysinit->gpio_validation_mode = ROGUE_FWIF_GPIO_VAL_OFF;
401 	fwif_sysinit->firmware_started = false;
402 	fwif_sysinit->marker_val = 1;
403 
404 	memset(&fwif_sysinit->bvnc_km_feature_flags, 0,
405 	       sizeof(fwif_sysinit->bvnc_km_feature_flags));
406 }
407 
408 #define ROGUE_FWIF_SLC_MIN_SIZE_FOR_DM_OVERLAP_KB 4
409 
410 static void
411 fw_sysdata_init(void *cpu_ptr, void *priv)
412 {
413 	struct rogue_fwif_sysdata *fwif_sysdata = cpu_ptr;
414 	struct pvr_device *pvr_dev = priv;
415 	u32 slc_size_in_kilobytes = 0;
416 	u32 config_flags = 0;
417 
418 	WARN_ON(PVR_FEATURE_VALUE(pvr_dev, slc_size_in_kilobytes, &slc_size_in_kilobytes));
419 
420 	if (slc_size_in_kilobytes < ROGUE_FWIF_SLC_MIN_SIZE_FOR_DM_OVERLAP_KB)
421 		config_flags |= ROGUE_FWIF_INICFG_DISABLE_DM_OVERLAP;
422 
423 	fwif_sysdata->config_flags = config_flags;
424 }
425 
426 static void
427 fw_runtime_cfg_init(void *cpu_ptr, void *priv)
428 {
429 	struct rogue_fwif_runtime_cfg *runtime_cfg = cpu_ptr;
430 	struct pvr_device *pvr_dev = priv;
431 	u32 clock_speed_hz = clk_get_rate(pvr_dev->core_clk);
432 
433 	WARN_ON(!clock_speed_hz);
434 
435 	runtime_cfg->core_clock_speed = clock_speed_hz;
436 	runtime_cfg->active_pm_latency_ms = 0;
437 	runtime_cfg->active_pm_latency_persistant = true;
438 	WARN_ON(PVR_FEATURE_VALUE(pvr_dev, num_clusters,
439 				  &runtime_cfg->default_dusts_num_init) != 0);
440 }
441 
442 static void
443 fw_gpu_util_fwcb_init(void *cpu_ptr, void *priv)
444 {
445 	struct rogue_fwif_gpu_util_fwcb *gpu_util_fwcb = cpu_ptr;
446 
447 	gpu_util_fwcb->last_word = PVR_FWIF_GPU_UTIL_STATE_IDLE;
448 }
449 
450 static int
451 pvr_fw_create_structures(struct pvr_device *pvr_dev)
452 {
453 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
454 	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
455 	struct pvr_fw_mem *fw_mem = &fw_dev->mem;
456 	int err;
457 
458 	fw_dev->power_sync = pvr_fw_object_create_and_map(pvr_dev, sizeof(*fw_dev->power_sync),
459 							  PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
460 							  NULL, NULL, &fw_mem->power_sync_obj);
461 	if (IS_ERR(fw_dev->power_sync)) {
462 		drm_err(drm_dev, "Unable to allocate FW power_sync structure\n");
463 		return PTR_ERR(fw_dev->power_sync);
464 	}
465 
466 	fw_dev->hwrinfobuf = pvr_fw_object_create_and_map(pvr_dev, sizeof(*fw_dev->hwrinfobuf),
467 							  PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
468 							  NULL, NULL, &fw_mem->hwrinfobuf_obj);
469 	if (IS_ERR(fw_dev->hwrinfobuf)) {
470 		drm_err(drm_dev,
471 			"Unable to allocate FW hwrinfobuf structure\n");
472 		err = PTR_ERR(fw_dev->hwrinfobuf);
473 		goto err_release_power_sync;
474 	}
475 
476 	err = pvr_fw_object_create(pvr_dev, PVR_SYNC_OBJ_SIZE,
477 				   PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
478 				   NULL, NULL, &fw_mem->mmucache_sync_obj);
479 	if (err) {
480 		drm_err(drm_dev,
481 			"Unable to allocate MMU cache sync object\n");
482 		goto err_release_hwrinfobuf;
483 	}
484 
485 	fw_dev->fwif_sysdata = pvr_fw_object_create_and_map(pvr_dev,
486 							    sizeof(*fw_dev->fwif_sysdata),
487 							    PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
488 							    fw_sysdata_init, pvr_dev,
489 							    &fw_mem->sysdata_obj);
490 	if (IS_ERR(fw_dev->fwif_sysdata)) {
491 		drm_err(drm_dev, "Unable to allocate FW SYSDATA structure\n");
492 		err = PTR_ERR(fw_dev->fwif_sysdata);
493 		goto err_release_mmucache_sync_obj;
494 	}
495 
496 	err = pvr_fw_object_create(pvr_dev, PVR_ROGUE_FAULT_PAGE_SIZE,
497 				   PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
498 				   fw_fault_page_init, NULL, &fw_mem->fault_page_obj);
499 	if (err) {
500 		drm_err(drm_dev, "Unable to allocate FW fault page\n");
501 		goto err_release_sysdata;
502 	}
503 
504 	err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_gpu_util_fwcb),
505 				   PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
506 				   fw_gpu_util_fwcb_init, pvr_dev, &fw_mem->gpu_util_fwcb_obj);
507 	if (err) {
508 		drm_err(drm_dev, "Unable to allocate GPU util FWCB\n");
509 		goto err_release_fault_page;
510 	}
511 
512 	err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_runtime_cfg),
513 				   PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
514 				   fw_runtime_cfg_init, pvr_dev, &fw_mem->runtime_cfg_obj);
515 	if (err) {
516 		drm_err(drm_dev, "Unable to allocate FW runtime config\n");
517 		goto err_release_gpu_util_fwcb;
518 	}
519 
520 	err = pvr_fw_trace_init(pvr_dev);
521 	if (err)
522 		goto err_release_runtime_cfg;
523 
524 	fw_dev->fwif_osdata = pvr_fw_object_create_and_map(pvr_dev,
525 							   sizeof(*fw_dev->fwif_osdata),
526 							   PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
527 							   fw_osdata_init, pvr_dev,
528 							   &fw_mem->osdata_obj);
529 	if (IS_ERR(fw_dev->fwif_osdata)) {
530 		drm_err(drm_dev, "Unable to allocate FW OSDATA structure\n");
531 		err = PTR_ERR(fw_dev->fwif_osdata);
532 		goto err_fw_trace_fini;
533 	}
534 
535 	fw_dev->fwif_osinit =
536 		pvr_fw_object_create_and_map_offset(pvr_dev,
537 						    fw_dev->fw_heap_info.config_offset +
538 						    PVR_ROGUE_FWIF_OSINIT_OFFSET,
539 						    sizeof(*fw_dev->fwif_osinit),
540 						    PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
541 						    fw_osinit_init, pvr_dev, &fw_mem->osinit_obj);
542 	if (IS_ERR(fw_dev->fwif_osinit)) {
543 		drm_err(drm_dev, "Unable to allocate FW OSINIT structure\n");
544 		err = PTR_ERR(fw_dev->fwif_osinit);
545 		goto err_release_osdata;
546 	}
547 
548 	fw_dev->fwif_sysinit =
549 		pvr_fw_object_create_and_map_offset(pvr_dev,
550 						    fw_dev->fw_heap_info.config_offset +
551 						    PVR_ROGUE_FWIF_SYSINIT_OFFSET,
552 						    sizeof(*fw_dev->fwif_sysinit),
553 						    PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
554 						    fw_sysinit_init, pvr_dev, &fw_mem->sysinit_obj);
555 	if (IS_ERR(fw_dev->fwif_sysinit)) {
556 		drm_err(drm_dev, "Unable to allocate FW SYSINIT structure\n");
557 		err = PTR_ERR(fw_dev->fwif_sysinit);
558 		goto err_release_osinit;
559 	}
560 
561 	return 0;
562 
563 err_release_osinit:
564 	pvr_fw_object_unmap_and_destroy(fw_mem->osinit_obj);
565 
566 err_release_osdata:
567 	pvr_fw_object_unmap_and_destroy(fw_mem->osdata_obj);
568 
569 err_fw_trace_fini:
570 	pvr_fw_trace_fini(pvr_dev);
571 
572 err_release_runtime_cfg:
573 	pvr_fw_object_destroy(fw_mem->runtime_cfg_obj);
574 
575 err_release_gpu_util_fwcb:
576 	pvr_fw_object_destroy(fw_mem->gpu_util_fwcb_obj);
577 
578 err_release_fault_page:
579 	pvr_fw_object_destroy(fw_mem->fault_page_obj);
580 
581 err_release_sysdata:
582 	pvr_fw_object_unmap_and_destroy(fw_mem->sysdata_obj);
583 
584 err_release_mmucache_sync_obj:
585 	pvr_fw_object_destroy(fw_mem->mmucache_sync_obj);
586 
587 err_release_hwrinfobuf:
588 	pvr_fw_object_unmap_and_destroy(fw_mem->hwrinfobuf_obj);
589 
590 err_release_power_sync:
591 	pvr_fw_object_unmap_and_destroy(fw_mem->power_sync_obj);
592 
593 	return err;
594 }
595 
596 static void
597 pvr_fw_destroy_structures(struct pvr_device *pvr_dev)
598 {
599 	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
600 	struct pvr_fw_mem *fw_mem = &fw_dev->mem;
601 
602 	pvr_fw_trace_fini(pvr_dev);
603 	pvr_fw_object_destroy(fw_mem->runtime_cfg_obj);
604 	pvr_fw_object_destroy(fw_mem->gpu_util_fwcb_obj);
605 	pvr_fw_object_destroy(fw_mem->fault_page_obj);
606 	pvr_fw_object_unmap_and_destroy(fw_mem->sysdata_obj);
607 	pvr_fw_object_unmap_and_destroy(fw_mem->sysinit_obj);
608 
609 	pvr_fw_object_destroy(fw_mem->mmucache_sync_obj);
610 	pvr_fw_object_unmap_and_destroy(fw_mem->hwrinfobuf_obj);
611 	pvr_fw_object_unmap_and_destroy(fw_mem->power_sync_obj);
612 	pvr_fw_object_unmap_and_destroy(fw_mem->osdata_obj);
613 	pvr_fw_object_unmap_and_destroy(fw_mem->osinit_obj);
614 }
615 
616 /**
617  * pvr_fw_process() - Process firmware image, allocate FW memory and create boot
618  *                    arguments
619  * @pvr_dev: Device pointer.
620  *
621  * Returns:
622  *  * 0 on success, or
623  *  * Any error returned by pvr_fw_object_create_and_map_offset(), or
624  *  * Any error returned by pvr_fw_object_create_and_map().
625  */
626 static int
627 pvr_fw_process(struct pvr_device *pvr_dev)
628 {
629 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
630 	struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
631 	const u8 *fw = pvr_dev->fw_dev.firmware->data;
632 	const struct pvr_fw_layout_entry *private_data;
633 	u8 *fw_code_ptr;
634 	u8 *fw_data_ptr;
635 	u8 *fw_core_code_ptr;
636 	u8 *fw_core_data_ptr;
637 	int err;
638 
639 	layout_get_sizes(pvr_dev);
640 
641 	private_data = pvr_fw_find_private_data(pvr_dev);
642 	if (!private_data)
643 		return -EINVAL;
644 
645 	/* Allocate and map memory for firmware sections. */
646 
647 	/*
648 	 * Code allocation must be at the start of the firmware heap, otherwise
649 	 * firmware processor will be unable to boot.
650 	 *
651 	 * This has the useful side-effect that for every other object in the
652 	 * driver, a firmware address of 0 is invalid.
653 	 */
654 	fw_code_ptr = pvr_fw_object_create_and_map_offset(pvr_dev, 0, fw_mem->code_alloc_size,
655 							  PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
656 							  NULL, NULL, &fw_mem->code_obj);
657 	if (IS_ERR(fw_code_ptr)) {
658 		drm_err(drm_dev, "Unable to allocate FW code memory\n");
659 		return PTR_ERR(fw_code_ptr);
660 	}
661 
662 	if (pvr_dev->fw_dev.defs->has_fixed_data_addr()) {
663 		u32 base_addr = private_data->base_addr & pvr_dev->fw_dev.fw_heap_info.offset_mask;
664 
665 		fw_data_ptr =
666 			pvr_fw_object_create_and_map_offset(pvr_dev, base_addr,
667 							    fw_mem->data_alloc_size,
668 							    PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
669 							    NULL, NULL, &fw_mem->data_obj);
670 	} else {
671 		fw_data_ptr = pvr_fw_object_create_and_map(pvr_dev, fw_mem->data_alloc_size,
672 							   PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
673 							   NULL, NULL, &fw_mem->data_obj);
674 	}
675 	if (IS_ERR(fw_data_ptr)) {
676 		drm_err(drm_dev, "Unable to allocate FW data memory\n");
677 		err = PTR_ERR(fw_data_ptr);
678 		goto err_free_fw_code_obj;
679 	}
680 
681 	/* Core code and data sections are optional. */
682 	if (fw_mem->core_code_alloc_size) {
683 		fw_core_code_ptr =
684 			pvr_fw_object_create_and_map(pvr_dev, fw_mem->core_code_alloc_size,
685 						     PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
686 						     NULL, NULL, &fw_mem->core_code_obj);
687 		if (IS_ERR(fw_core_code_ptr)) {
688 			drm_err(drm_dev,
689 				"Unable to allocate FW core code memory\n");
690 			err = PTR_ERR(fw_core_code_ptr);
691 			goto err_free_fw_data_obj;
692 		}
693 	} else {
694 		fw_core_code_ptr = NULL;
695 	}
696 
697 	if (fw_mem->core_data_alloc_size) {
698 		fw_core_data_ptr =
699 			pvr_fw_object_create_and_map(pvr_dev, fw_mem->core_data_alloc_size,
700 						     PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
701 						     NULL, NULL, &fw_mem->core_data_obj);
702 		if (IS_ERR(fw_core_data_ptr)) {
703 			drm_err(drm_dev,
704 				"Unable to allocate FW core data memory\n");
705 			err = PTR_ERR(fw_core_data_ptr);
706 			goto err_free_fw_core_code_obj;
707 		}
708 	} else {
709 		fw_core_data_ptr = NULL;
710 	}
711 
712 	fw_mem->code = kzalloc(fw_mem->code_alloc_size, GFP_KERNEL);
713 	fw_mem->data = kzalloc(fw_mem->data_alloc_size, GFP_KERNEL);
714 	if (fw_mem->core_code_alloc_size)
715 		fw_mem->core_code = kzalloc(fw_mem->core_code_alloc_size, GFP_KERNEL);
716 	if (fw_mem->core_data_alloc_size)
717 		fw_mem->core_data = kzalloc(fw_mem->core_data_alloc_size, GFP_KERNEL);
718 
719 	if (!fw_mem->code || !fw_mem->data ||
720 	    (!fw_mem->core_code && fw_mem->core_code_alloc_size) ||
721 	    (!fw_mem->core_data && fw_mem->core_data_alloc_size)) {
722 		err = -ENOMEM;
723 		goto err_free_kdata;
724 	}
725 
726 	err = pvr_dev->fw_dev.defs->fw_process(pvr_dev, fw,
727 					       fw_mem->code, fw_mem->data, fw_mem->core_code,
728 					       fw_mem->core_data, fw_mem->core_code_alloc_size);
729 
730 	if (err)
731 		goto err_free_kdata;
732 
733 	memcpy(fw_code_ptr, fw_mem->code, fw_mem->code_alloc_size);
734 	memcpy(fw_data_ptr, fw_mem->data, fw_mem->data_alloc_size);
735 	if (fw_mem->core_code)
736 		memcpy(fw_core_code_ptr, fw_mem->core_code, fw_mem->core_code_alloc_size);
737 	if (fw_mem->core_data)
738 		memcpy(fw_core_data_ptr, fw_mem->core_data, fw_mem->core_data_alloc_size);
739 
740 	/* We're finished with the firmware section memory on the CPU, unmap. */
741 	if (fw_core_data_ptr) {
742 		pvr_fw_object_vunmap(fw_mem->core_data_obj);
743 		fw_core_data_ptr = NULL;
744 	}
745 	if (fw_core_code_ptr) {
746 		pvr_fw_object_vunmap(fw_mem->core_code_obj);
747 		fw_core_code_ptr = NULL;
748 	}
749 	pvr_fw_object_vunmap(fw_mem->data_obj);
750 	fw_data_ptr = NULL;
751 	pvr_fw_object_vunmap(fw_mem->code_obj);
752 	fw_code_ptr = NULL;
753 
754 	err = pvr_fw_create_fwif_connection_ctl(pvr_dev);
755 	if (err)
756 		goto err_free_kdata;
757 
758 	return 0;
759 
760 err_free_kdata:
761 	kfree(fw_mem->core_data);
762 	kfree(fw_mem->core_code);
763 	kfree(fw_mem->data);
764 	kfree(fw_mem->code);
765 
766 	if (fw_core_data_ptr)
767 		pvr_fw_object_vunmap(fw_mem->core_data_obj);
768 	if (fw_mem->core_data_obj)
769 		pvr_fw_object_destroy(fw_mem->core_data_obj);
770 
771 err_free_fw_core_code_obj:
772 	if (fw_core_code_ptr)
773 		pvr_fw_object_vunmap(fw_mem->core_code_obj);
774 	if (fw_mem->core_code_obj)
775 		pvr_fw_object_destroy(fw_mem->core_code_obj);
776 
777 err_free_fw_data_obj:
778 	if (fw_data_ptr)
779 		pvr_fw_object_vunmap(fw_mem->data_obj);
780 	pvr_fw_object_destroy(fw_mem->data_obj);
781 
782 err_free_fw_code_obj:
783 	if (fw_code_ptr)
784 		pvr_fw_object_vunmap(fw_mem->code_obj);
785 	pvr_fw_object_destroy(fw_mem->code_obj);
786 
787 	return err;
788 }
789 
790 static int
791 pvr_copy_to_fw(struct pvr_fw_object *dest_obj, u8 *src_ptr, u32 size)
792 {
793 	u8 *dest_ptr = pvr_fw_object_vmap(dest_obj);
794 
795 	if (IS_ERR(dest_ptr))
796 		return PTR_ERR(dest_ptr);
797 
798 	memcpy(dest_ptr, src_ptr, size);
799 
800 	pvr_fw_object_vunmap(dest_obj);
801 
802 	return 0;
803 }
804 
805 static int
806 pvr_fw_reinit_code_data(struct pvr_device *pvr_dev)
807 {
808 	struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
809 	int err;
810 
811 	err = pvr_copy_to_fw(fw_mem->code_obj, fw_mem->code, fw_mem->code_alloc_size);
812 	if (err)
813 		return err;
814 
815 	err = pvr_copy_to_fw(fw_mem->data_obj, fw_mem->data, fw_mem->data_alloc_size);
816 	if (err)
817 		return err;
818 
819 	if (fw_mem->core_code) {
820 		err = pvr_copy_to_fw(fw_mem->core_code_obj, fw_mem->core_code,
821 				     fw_mem->core_code_alloc_size);
822 		if (err)
823 			return err;
824 	}
825 
826 	if (fw_mem->core_data) {
827 		err = pvr_copy_to_fw(fw_mem->core_data_obj, fw_mem->core_data,
828 				     fw_mem->core_data_alloc_size);
829 		if (err)
830 			return err;
831 	}
832 
833 	return 0;
834 }
835 
836 static void
837 pvr_fw_cleanup(struct pvr_device *pvr_dev)
838 {
839 	struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
840 
841 	pvr_fw_fini_fwif_connection_ctl(pvr_dev);
842 
843 	kfree(fw_mem->core_data);
844 	kfree(fw_mem->core_code);
845 	kfree(fw_mem->data);
846 	kfree(fw_mem->code);
847 
848 	if (fw_mem->core_code_obj)
849 		pvr_fw_object_destroy(fw_mem->core_code_obj);
850 	if (fw_mem->core_data_obj)
851 		pvr_fw_object_destroy(fw_mem->core_data_obj);
852 	pvr_fw_object_destroy(fw_mem->code_obj);
853 	pvr_fw_object_destroy(fw_mem->data_obj);
854 }
855 
856 /**
857  * pvr_wait_for_fw_boot() - Wait for firmware to finish booting
858  * @pvr_dev: Target PowerVR device.
859  *
860  * Returns:
861  *  * 0 on success, or
862  *  * -%ETIMEDOUT if firmware fails to boot within timeout.
863  */
864 int
865 pvr_wait_for_fw_boot(struct pvr_device *pvr_dev)
866 {
867 	ktime_t deadline = ktime_add_us(ktime_get(), FW_BOOT_TIMEOUT_USEC);
868 	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
869 
870 	while (ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0) {
871 		if (READ_ONCE(fw_dev->fwif_sysinit->firmware_started))
872 			return 0;
873 	}
874 
875 	return -ETIMEDOUT;
876 }
877 
878 /*
879  * pvr_fw_heap_info_init() - Calculate size and masks for FW heap
880  * @pvr_dev: Target PowerVR device.
881  * @log2_size: Log2 of raw heap size.
882  * @reserved_size: Size of reserved area of heap, in bytes. May be zero.
883  */
884 void
885 pvr_fw_heap_info_init(struct pvr_device *pvr_dev, u32 log2_size, u32 reserved_size)
886 {
887 	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
888 
889 	fw_dev->fw_heap_info.gpu_addr = PVR_ROGUE_FW_MAIN_HEAP_BASE;
890 	fw_dev->fw_heap_info.log2_size = log2_size;
891 	fw_dev->fw_heap_info.reserved_size = reserved_size;
892 	fw_dev->fw_heap_info.raw_size = 1 << fw_dev->fw_heap_info.log2_size;
893 	fw_dev->fw_heap_info.offset_mask = fw_dev->fw_heap_info.raw_size - 1;
894 	fw_dev->fw_heap_info.config_offset = fw_dev->fw_heap_info.raw_size -
895 					     PVR_ROGUE_FW_CONFIG_HEAP_SIZE;
896 	fw_dev->fw_heap_info.size = fw_dev->fw_heap_info.raw_size -
897 				    (PVR_ROGUE_FW_CONFIG_HEAP_SIZE + reserved_size);
898 }
899 
900 /**
901  * pvr_fw_validate_init_device_info() - Validate firmware and initialise device information
902  * @pvr_dev: Target PowerVR device.
903  *
904  * This function must be called before querying device information.
905  *
906  * Returns:
907  *  * 0 on success, or
908  *  * -%EINVAL if firmware validation fails.
909  */
910 int
911 pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev)
912 {
913 	int err;
914 
915 	err = pvr_fw_validate(pvr_dev);
916 	if (err)
917 		return err;
918 
919 	return pvr_fw_get_device_info(pvr_dev);
920 }
921 
922 /**
923  * pvr_fw_init() - Initialise and boot firmware
924  * @pvr_dev: Target PowerVR device
925  *
926  * On successful completion of the function the PowerVR device will be
927  * initialised and ready to use.
928  *
929  * Returns:
930  *  * 0 on success,
931  *  * -%EINVAL on invalid firmware image,
932  *  * -%ENOMEM on out of memory, or
933  *  * -%ETIMEDOUT if firmware processor fails to boot or on register poll timeout.
934  */
935 int
936 pvr_fw_init(struct pvr_device *pvr_dev)
937 {
938 	u32 kccb_size_log2 = ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT;
939 	u32 kccb_rtn_size = (1 << kccb_size_log2) * sizeof(*pvr_dev->kccb.rtn);
940 	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
941 	int err;
942 
943 	if (fw_dev->processor_type == PVR_FW_PROCESSOR_TYPE_META)
944 		fw_dev->defs = &pvr_fw_defs_meta;
945 	else if (fw_dev->processor_type == PVR_FW_PROCESSOR_TYPE_MIPS)
946 		fw_dev->defs = &pvr_fw_defs_mips;
947 	else
948 		return -EINVAL;
949 
950 	err = fw_dev->defs->init(pvr_dev);
951 	if (err)
952 		return err;
953 
954 	drm_mm_init(&fw_dev->fw_mm, ROGUE_FW_HEAP_BASE, fw_dev->fw_heap_info.raw_size);
955 	fw_dev->fw_mm_base = ROGUE_FW_HEAP_BASE;
956 	spin_lock_init(&fw_dev->fw_mm_lock);
957 
958 	INIT_LIST_HEAD(&fw_dev->fw_objs.list);
959 	err = drmm_mutex_init(from_pvr_device(pvr_dev), &fw_dev->fw_objs.lock);
960 	if (err)
961 		goto err_mm_takedown;
962 
963 	err = pvr_fw_process(pvr_dev);
964 	if (err)
965 		goto err_mm_takedown;
966 
967 	/* Initialise KCCB and FWCCB. */
968 	err = pvr_kccb_init(pvr_dev);
969 	if (err)
970 		goto err_fw_cleanup;
971 
972 	err = pvr_fwccb_init(pvr_dev);
973 	if (err)
974 		goto err_kccb_fini;
975 
976 	/* Allocate memory for KCCB return slots. */
977 	pvr_dev->kccb.rtn = pvr_fw_object_create_and_map(pvr_dev, kccb_rtn_size,
978 							 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
979 							 NULL, NULL, &pvr_dev->kccb.rtn_obj);
980 	if (IS_ERR(pvr_dev->kccb.rtn)) {
981 		err = PTR_ERR(pvr_dev->kccb.rtn);
982 		goto err_fwccb_fini;
983 	}
984 
985 	err = pvr_fw_create_structures(pvr_dev);
986 	if (err)
987 		goto err_kccb_rtn_release;
988 
989 	err = pvr_fw_start(pvr_dev);
990 	if (err)
991 		goto err_destroy_structures;
992 
993 	err = pvr_wait_for_fw_boot(pvr_dev);
994 	if (err) {
995 		drm_err(from_pvr_device(pvr_dev), "Firmware failed to boot\n");
996 		goto err_fw_stop;
997 	}
998 
999 	fw_dev->booted = true;
1000 
1001 	return 0;
1002 
1003 err_fw_stop:
1004 	pvr_fw_stop(pvr_dev);
1005 
1006 err_destroy_structures:
1007 	pvr_fw_destroy_structures(pvr_dev);
1008 
1009 err_kccb_rtn_release:
1010 	pvr_fw_object_unmap_and_destroy(pvr_dev->kccb.rtn_obj);
1011 
1012 err_fwccb_fini:
1013 	pvr_ccb_fini(&pvr_dev->fwccb);
1014 
1015 err_kccb_fini:
1016 	pvr_kccb_fini(pvr_dev);
1017 
1018 err_fw_cleanup:
1019 	pvr_fw_cleanup(pvr_dev);
1020 
1021 err_mm_takedown:
1022 	drm_mm_takedown(&fw_dev->fw_mm);
1023 
1024 	if (fw_dev->defs->fini)
1025 		fw_dev->defs->fini(pvr_dev);
1026 
1027 	return err;
1028 }
1029 
1030 /**
1031  * pvr_fw_fini() - Shutdown firmware processor and free associated memory
1032  * @pvr_dev: Target PowerVR device
1033  */
1034 void
1035 pvr_fw_fini(struct pvr_device *pvr_dev)
1036 {
1037 	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
1038 
1039 	fw_dev->booted = false;
1040 
1041 	pvr_fw_destroy_structures(pvr_dev);
1042 	pvr_fw_object_unmap_and_destroy(pvr_dev->kccb.rtn_obj);
1043 
1044 	/*
1045 	 * Ensure FWCCB worker has finished executing before destroying FWCCB. The IRQ handler has
1046 	 * been unregistered at this point so no new work should be being submitted.
1047 	 */
1048 	pvr_ccb_fini(&pvr_dev->fwccb);
1049 	pvr_kccb_fini(pvr_dev);
1050 	pvr_fw_cleanup(pvr_dev);
1051 
1052 	mutex_lock(&pvr_dev->fw_dev.fw_objs.lock);
1053 	WARN_ON(!list_empty(&pvr_dev->fw_dev.fw_objs.list));
1054 	mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock);
1055 
1056 	drm_mm_takedown(&fw_dev->fw_mm);
1057 
1058 	if (fw_dev->defs->fini)
1059 		fw_dev->defs->fini(pvr_dev);
1060 }
1061 
1062 /**
1063  * pvr_fw_mts_schedule() - Schedule work via an MTS kick
1064  * @pvr_dev: Target PowerVR device
1065  * @val: Kick mask. Should be a combination of %ROGUE_CR_MTS_SCHEDULE_*
1066  */
1067 void
1068 pvr_fw_mts_schedule(struct pvr_device *pvr_dev, u32 val)
1069 {
1070 	/* Ensure memory is flushed before kicking MTS. */
1071 	wmb();
1072 
1073 	pvr_cr_write32(pvr_dev, ROGUE_CR_MTS_SCHEDULE, val);
1074 
1075 	/* Ensure the MTS kick goes through before continuing. */
1076 	mb();
1077 }
1078 
1079 /**
1080  * pvr_fw_structure_cleanup() - Send FW cleanup request for an object
1081  * @pvr_dev: Target PowerVR device.
1082  * @type: Type of object to cleanup. Must be one of &enum rogue_fwif_cleanup_type.
1083  * @fw_obj: Pointer to FW object containing object to cleanup.
1084  * @offset: Offset within FW object of object to cleanup.
1085  *
1086  * Returns:
1087  *  * 0 on success,
1088  *  * -EBUSY if object is busy,
1089  *  * -ETIMEDOUT on timeout, or
1090  *  * -EIO if device is lost.
1091  */
1092 int
1093 pvr_fw_structure_cleanup(struct pvr_device *pvr_dev, u32 type, struct pvr_fw_object *fw_obj,
1094 			 u32 offset)
1095 {
1096 	struct rogue_fwif_kccb_cmd cmd;
1097 	int slot_nr;
1098 	int idx;
1099 	int err;
1100 	u32 rtn;
1101 
1102 	struct rogue_fwif_cleanup_request *cleanup_req = &cmd.cmd_data.cleanup_data;
1103 
1104 	down_read(&pvr_dev->reset_sem);
1105 
1106 	if (!drm_dev_enter(from_pvr_device(pvr_dev), &idx)) {
1107 		err = -EIO;
1108 		goto err_up_read;
1109 	}
1110 
1111 	cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_CLEANUP;
1112 	cmd.kccb_flags = 0;
1113 	cleanup_req->cleanup_type = type;
1114 
1115 	switch (type) {
1116 	case ROGUE_FWIF_CLEANUP_FWCOMMONCONTEXT:
1117 		pvr_fw_object_get_fw_addr_offset(fw_obj, offset,
1118 						 &cleanup_req->cleanup_data.context_fw_addr);
1119 		break;
1120 	case ROGUE_FWIF_CLEANUP_HWRTDATA:
1121 		pvr_fw_object_get_fw_addr_offset(fw_obj, offset,
1122 						 &cleanup_req->cleanup_data.hwrt_data_fw_addr);
1123 		break;
1124 	case ROGUE_FWIF_CLEANUP_FREELIST:
1125 		pvr_fw_object_get_fw_addr_offset(fw_obj, offset,
1126 						 &cleanup_req->cleanup_data.freelist_fw_addr);
1127 		break;
1128 	default:
1129 		err = -EINVAL;
1130 		goto err_drm_dev_exit;
1131 	}
1132 
1133 	err = pvr_kccb_send_cmd(pvr_dev, &cmd, &slot_nr);
1134 	if (err)
1135 		goto err_drm_dev_exit;
1136 
1137 	err = pvr_kccb_wait_for_completion(pvr_dev, slot_nr, HZ, &rtn);
1138 	if (err)
1139 		goto err_drm_dev_exit;
1140 
1141 	if (rtn & ROGUE_FWIF_KCCB_RTN_SLOT_CLEANUP_BUSY)
1142 		err = -EBUSY;
1143 
1144 err_drm_dev_exit:
1145 	drm_dev_exit(idx);
1146 
1147 err_up_read:
1148 	up_read(&pvr_dev->reset_sem);
1149 
1150 	return err;
1151 }
1152 
1153 /**
1154  * pvr_fw_object_fw_map() - Map a FW object in firmware address space
1155  * @pvr_dev: Device pointer.
1156  * @fw_obj: FW object to map.
1157  * @dev_addr: Desired address in device space, if a specific address is
1158  *            required. 0 otherwise.
1159  *
1160  * Returns:
1161  *  * 0 on success, or
1162  *  * -%EINVAL if @fw_obj is already mapped but has no references, or
1163  *  * Any error returned by DRM.
1164  */
1165 static int
1166 pvr_fw_object_fw_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj, u64 dev_addr)
1167 {
1168 	struct pvr_gem_object *pvr_obj = fw_obj->gem;
1169 	struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj);
1170 	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
1171 
1172 	int err;
1173 
1174 	spin_lock(&fw_dev->fw_mm_lock);
1175 
1176 	if (drm_mm_node_allocated(&fw_obj->fw_mm_node)) {
1177 		err = -EINVAL;
1178 		goto err_unlock;
1179 	}
1180 
1181 	if (!dev_addr) {
1182 		/*
1183 		 * Allocate from the main heap only (firmware heap minus
1184 		 * config space).
1185 		 */
1186 		err = drm_mm_insert_node_in_range(&fw_dev->fw_mm, &fw_obj->fw_mm_node,
1187 						  gem_obj->size, 0, 0,
1188 						  fw_dev->fw_heap_info.gpu_addr,
1189 						  fw_dev->fw_heap_info.gpu_addr +
1190 						  fw_dev->fw_heap_info.size, 0);
1191 		if (err)
1192 			goto err_unlock;
1193 	} else {
1194 		fw_obj->fw_mm_node.start = dev_addr;
1195 		fw_obj->fw_mm_node.size = gem_obj->size;
1196 		err = drm_mm_reserve_node(&fw_dev->fw_mm, &fw_obj->fw_mm_node);
1197 		if (err)
1198 			goto err_unlock;
1199 	}
1200 
1201 	spin_unlock(&fw_dev->fw_mm_lock);
1202 
1203 	/* Map object on GPU. */
1204 	err = fw_dev->defs->vm_map(pvr_dev, fw_obj);
1205 	if (err)
1206 		goto err_remove_node;
1207 
1208 	fw_obj->fw_addr_offset = (u32)(fw_obj->fw_mm_node.start - fw_dev->fw_mm_base);
1209 
1210 	return 0;
1211 
1212 err_remove_node:
1213 	spin_lock(&fw_dev->fw_mm_lock);
1214 	drm_mm_remove_node(&fw_obj->fw_mm_node);
1215 
1216 err_unlock:
1217 	spin_unlock(&fw_dev->fw_mm_lock);
1218 
1219 	return err;
1220 }
1221 
1222 /**
1223  * pvr_fw_object_fw_unmap() - Unmap a previously mapped FW object
1224  * @fw_obj: FW object to unmap.
1225  *
1226  * Returns:
1227  *  * 0 on success, or
1228  *  * -%EINVAL if object is not currently mapped.
1229  */
1230 static int
1231 pvr_fw_object_fw_unmap(struct pvr_fw_object *fw_obj)
1232 {
1233 	struct pvr_gem_object *pvr_obj = fw_obj->gem;
1234 	struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj);
1235 	struct pvr_device *pvr_dev = to_pvr_device(gem_obj->dev);
1236 	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
1237 
1238 	fw_dev->defs->vm_unmap(pvr_dev, fw_obj);
1239 
1240 	spin_lock(&fw_dev->fw_mm_lock);
1241 
1242 	if (!drm_mm_node_allocated(&fw_obj->fw_mm_node)) {
1243 		spin_unlock(&fw_dev->fw_mm_lock);
1244 		return -EINVAL;
1245 	}
1246 
1247 	drm_mm_remove_node(&fw_obj->fw_mm_node);
1248 
1249 	spin_unlock(&fw_dev->fw_mm_lock);
1250 
1251 	return 0;
1252 }
1253 
1254 static void *
1255 pvr_fw_object_create_and_map_common(struct pvr_device *pvr_dev, size_t size,
1256 				    u64 flags, u64 dev_addr,
1257 				    void (*init)(void *cpu_ptr, void *priv),
1258 				    void *init_priv, struct pvr_fw_object **fw_obj_out)
1259 {
1260 	struct pvr_fw_object *fw_obj;
1261 	void *cpu_ptr;
1262 	int err;
1263 
1264 	/* %DRM_PVR_BO_PM_FW_PROTECT is implicit for FW objects. */
1265 	flags |= DRM_PVR_BO_PM_FW_PROTECT;
1266 
1267 	fw_obj = kzalloc(sizeof(*fw_obj), GFP_KERNEL);
1268 	if (!fw_obj)
1269 		return ERR_PTR(-ENOMEM);
1270 
1271 	INIT_LIST_HEAD(&fw_obj->node);
1272 	fw_obj->init = init;
1273 	fw_obj->init_priv = init_priv;
1274 
1275 	fw_obj->gem = pvr_gem_object_create(pvr_dev, size, flags);
1276 	if (IS_ERR(fw_obj->gem)) {
1277 		err = PTR_ERR(fw_obj->gem);
1278 		fw_obj->gem = NULL;
1279 		goto err_put_object;
1280 	}
1281 
1282 	err = pvr_fw_object_fw_map(pvr_dev, fw_obj, dev_addr);
1283 	if (err)
1284 		goto err_put_object;
1285 
1286 	cpu_ptr = pvr_fw_object_vmap(fw_obj);
1287 	if (IS_ERR(cpu_ptr)) {
1288 		err = PTR_ERR(cpu_ptr);
1289 		goto err_put_object;
1290 	}
1291 
1292 	*fw_obj_out = fw_obj;
1293 
1294 	if (fw_obj->init)
1295 		fw_obj->init(cpu_ptr, fw_obj->init_priv);
1296 
1297 	mutex_lock(&pvr_dev->fw_dev.fw_objs.lock);
1298 	list_add_tail(&fw_obj->node, &pvr_dev->fw_dev.fw_objs.list);
1299 	mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock);
1300 
1301 	return cpu_ptr;
1302 
1303 err_put_object:
1304 	pvr_fw_object_destroy(fw_obj);
1305 
1306 	return ERR_PTR(err);
1307 }
1308 
1309 /**
1310  * pvr_fw_object_create() - Create a FW object and map to firmware
1311  * @pvr_dev: PowerVR device pointer.
1312  * @size: Size of object, in bytes.
1313  * @flags: Options which affect both this operation and future mapping
1314  * operations performed on the returned object. Must be a combination of
1315  * DRM_PVR_BO_* and/or PVR_BO_* flags.
1316  * @init: Initialisation callback.
1317  * @init_priv: Private pointer to pass to initialisation callback.
1318  * @fw_obj_out: Pointer to location to store created object pointer.
1319  *
1320  * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently,
1321  * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS
1322  * set.
1323  *
1324  * Returns:
1325  *  * 0 on success, or
1326  *  * Any error returned by pvr_fw_object_create_common().
1327  */
1328 int
1329 pvr_fw_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags,
1330 		     void (*init)(void *cpu_ptr, void *priv), void *init_priv,
1331 		     struct pvr_fw_object **fw_obj_out)
1332 {
1333 	void *cpu_ptr;
1334 
1335 	cpu_ptr = pvr_fw_object_create_and_map_common(pvr_dev, size, flags, 0, init, init_priv,
1336 						      fw_obj_out);
1337 	if (IS_ERR(cpu_ptr))
1338 		return PTR_ERR(cpu_ptr);
1339 
1340 	pvr_fw_object_vunmap(*fw_obj_out);
1341 
1342 	return 0;
1343 }
1344 
1345 /**
1346  * pvr_fw_object_create_and_map() - Create a FW object and map to firmware and CPU
1347  * @pvr_dev: PowerVR device pointer.
1348  * @size: Size of object, in bytes.
1349  * @flags: Options which affect both this operation and future mapping
1350  * operations performed on the returned object. Must be a combination of
1351  * DRM_PVR_BO_* and/or PVR_BO_* flags.
1352  * @init: Initialisation callback.
1353  * @init_priv: Private pointer to pass to initialisation callback.
1354  * @fw_obj_out: Pointer to location to store created object pointer.
1355  *
1356  * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently,
1357  * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS
1358  * set.
1359  *
1360  * Caller is responsible for calling pvr_fw_object_vunmap() to release the CPU
1361  * mapping.
1362  *
1363  * Returns:
1364  *  * Pointer to CPU mapping of newly created object, or
1365  *  * Any error returned by pvr_fw_object_create(), or
1366  *  * Any error returned by pvr_fw_object_vmap().
1367  */
1368 void *
1369 pvr_fw_object_create_and_map(struct pvr_device *pvr_dev, size_t size, u64 flags,
1370 			     void (*init)(void *cpu_ptr, void *priv),
1371 			     void *init_priv, struct pvr_fw_object **fw_obj_out)
1372 {
1373 	return pvr_fw_object_create_and_map_common(pvr_dev, size, flags, 0, init, init_priv,
1374 						   fw_obj_out);
1375 }
1376 
1377 /**
1378  * pvr_fw_object_create_and_map_offset() - Create a FW object and map to
1379  * firmware at the provided offset and to the CPU.
1380  * @pvr_dev: PowerVR device pointer.
1381  * @dev_offset: Base address of desired FW mapping, offset from start of FW heap.
1382  * @size: Size of object, in bytes.
1383  * @flags: Options which affect both this operation and future mapping
1384  * operations performed on the returned object. Must be a combination of
1385  * DRM_PVR_BO_* and/or PVR_BO_* flags.
1386  * @init: Initialisation callback.
1387  * @init_priv: Private pointer to pass to initialisation callback.
1388  * @fw_obj_out: Pointer to location to store created object pointer.
1389  *
1390  * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently,
1391  * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS
1392  * set.
1393  *
1394  * Caller is responsible for calling pvr_fw_object_vunmap() to release the CPU
1395  * mapping.
1396  *
1397  * Returns:
1398  *  * Pointer to CPU mapping of newly created object, or
1399  *  * Any error returned by pvr_fw_object_create(), or
1400  *  * Any error returned by pvr_fw_object_vmap().
1401  */
1402 void *
1403 pvr_fw_object_create_and_map_offset(struct pvr_device *pvr_dev,
1404 				    u32 dev_offset, size_t size, u64 flags,
1405 				    void (*init)(void *cpu_ptr, void *priv),
1406 				    void *init_priv, struct pvr_fw_object **fw_obj_out)
1407 {
1408 	u64 dev_addr = pvr_dev->fw_dev.fw_mm_base + dev_offset;
1409 
1410 	return pvr_fw_object_create_and_map_common(pvr_dev, size, flags, dev_addr, init, init_priv,
1411 						   fw_obj_out);
1412 }
1413 
1414 /**
1415  * pvr_fw_object_destroy() - Destroy a pvr_fw_object
1416  * @fw_obj: Pointer to object to destroy.
1417  */
1418 void pvr_fw_object_destroy(struct pvr_fw_object *fw_obj)
1419 {
1420 	struct pvr_gem_object *pvr_obj = fw_obj->gem;
1421 	struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj);
1422 	struct pvr_device *pvr_dev = to_pvr_device(gem_obj->dev);
1423 
1424 	mutex_lock(&pvr_dev->fw_dev.fw_objs.lock);
1425 	list_del(&fw_obj->node);
1426 	mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock);
1427 
1428 	if (drm_mm_node_allocated(&fw_obj->fw_mm_node)) {
1429 		/* If we can't unmap, leak the memory. */
1430 		if (WARN_ON(pvr_fw_object_fw_unmap(fw_obj)))
1431 			return;
1432 	}
1433 
1434 	if (fw_obj->gem)
1435 		pvr_gem_object_put(fw_obj->gem);
1436 
1437 	kfree(fw_obj);
1438 }
1439 
1440 /**
1441  * pvr_fw_object_get_fw_addr_offset() - Return address of object in firmware address space, with
1442  * given offset.
1443  * @fw_obj: Pointer to object.
1444  * @offset: Desired offset from start of object.
1445  * @fw_addr_out: Location to store address to.
1446  */
1447 void pvr_fw_object_get_fw_addr_offset(struct pvr_fw_object *fw_obj, u32 offset, u32 *fw_addr_out)
1448 {
1449 	struct pvr_gem_object *pvr_obj = fw_obj->gem;
1450 	struct pvr_device *pvr_dev = to_pvr_device(gem_from_pvr_gem(pvr_obj)->dev);
1451 
1452 	*fw_addr_out = pvr_dev->fw_dev.defs->get_fw_addr_with_offset(fw_obj, offset);
1453 }
1454 
1455 /*
1456  * pvr_fw_hard_reset() - Re-initialise the FW code and data segments, and reset all global FW
1457  *                       structures
1458  * @pvr_dev: Device pointer
1459  *
1460  * If this function returns an error then the caller must regard the device as lost.
1461  *
1462  * Returns:
1463  *  * 0 on success, or
1464  *  * Any error returned by pvr_fw_init_dev_structures() or pvr_fw_reset_all().
1465  */
1466 int
1467 pvr_fw_hard_reset(struct pvr_device *pvr_dev)
1468 {
1469 	struct list_head *pos;
1470 	int err;
1471 
1472 	/* Reset all FW objects */
1473 	mutex_lock(&pvr_dev->fw_dev.fw_objs.lock);
1474 
1475 	list_for_each(pos, &pvr_dev->fw_dev.fw_objs.list) {
1476 		struct pvr_fw_object *fw_obj = container_of(pos, struct pvr_fw_object, node);
1477 		void *cpu_ptr = pvr_fw_object_vmap(fw_obj);
1478 
1479 		WARN_ON(IS_ERR(cpu_ptr));
1480 
1481 		if (!(fw_obj->gem->flags & PVR_BO_FW_NO_CLEAR_ON_RESET)) {
1482 			memset(cpu_ptr, 0, pvr_gem_object_size(fw_obj->gem));
1483 
1484 			if (fw_obj->init)
1485 				fw_obj->init(cpu_ptr, fw_obj->init_priv);
1486 		}
1487 
1488 		pvr_fw_object_vunmap(fw_obj);
1489 	}
1490 
1491 	mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock);
1492 
1493 	err = pvr_fw_reinit_code_data(pvr_dev);
1494 	if (err)
1495 		return err;
1496 
1497 	return 0;
1498 }
1499