1 // SPDX-License-Identifier: GPL-2.0-only OR MIT 2 /* Copyright (c) 2023 Imagination Technologies Ltd. */ 3 4 #include "pvr_ccb.h" 5 #include "pvr_device.h" 6 #include "pvr_device_info.h" 7 #include "pvr_fw.h" 8 #include "pvr_fw_info.h" 9 #include "pvr_fw_startstop.h" 10 #include "pvr_fw_trace.h" 11 #include "pvr_gem.h" 12 #include "pvr_power.h" 13 #include "pvr_rogue_fwif_dev_info.h" 14 #include "pvr_rogue_heap_config.h" 15 #include "pvr_vm.h" 16 17 #include <drm/drm_drv.h> 18 #include <drm/drm_managed.h> 19 #include <drm/drm_mm.h> 20 #include <drm/drm_print.h> 21 #include <linux/clk.h> 22 #include <linux/firmware.h> 23 #include <linux/math.h> 24 #include <linux/minmax.h> 25 #include <linux/sizes.h> 26 27 #define FW_MAX_SUPPORTED_MAJOR_VERSION 1 28 29 #define FW_BOOT_TIMEOUT_USEC 5000000 30 31 /* Config heap occupies top 192k of the firmware heap. */ 32 #define PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY SZ_64K 33 #define PVR_ROGUE_FW_CONFIG_HEAP_SIZE (3 * PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY) 34 35 /* Main firmware allocations should come from the remainder of the heap. */ 36 #define PVR_ROGUE_FW_MAIN_HEAP_BASE ROGUE_FW_HEAP_BASE 37 38 /* Offsets from start of configuration area of FW heap. */ 39 #define PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET 0 40 #define PVR_ROGUE_FWIF_OSINIT_OFFSET \ 41 (PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET + PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY) 42 #define PVR_ROGUE_FWIF_SYSINIT_OFFSET \ 43 (PVR_ROGUE_FWIF_OSINIT_OFFSET + PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY) 44 45 #define PVR_ROGUE_FAULT_PAGE_SIZE SZ_4K 46 47 #define PVR_SYNC_OBJ_SIZE sizeof(u32) 48 49 const struct pvr_fw_layout_entry * 50 pvr_fw_find_layout_entry(struct pvr_device *pvr_dev, enum pvr_fw_section_id id) 51 { 52 const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries; 53 u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num; 54 55 for (u32 entry = 0; entry < num_layout_entries; entry++) { 56 if (layout_entries[entry].id == id) 57 return &layout_entries[entry]; 58 } 59 60 return NULL; 61 } 62 63 static const struct pvr_fw_layout_entry * 64 pvr_fw_find_private_data(struct pvr_device *pvr_dev) 65 { 66 const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries; 67 u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num; 68 69 for (u32 entry = 0; entry < num_layout_entries; entry++) { 70 if (layout_entries[entry].id == META_PRIVATE_DATA || 71 layout_entries[entry].id == MIPS_PRIVATE_DATA || 72 layout_entries[entry].id == RISCV_PRIVATE_DATA) 73 return &layout_entries[entry]; 74 } 75 76 return NULL; 77 } 78 79 #define DEV_INFO_MASK_SIZE(x) DIV_ROUND_UP(x, 64) 80 81 /** 82 * pvr_fw_validate() - Parse firmware header and check compatibility 83 * @pvr_dev: Device pointer. 84 * 85 * Returns: 86 * * 0 on success, or 87 * * -EINVAL if firmware is incompatible. 88 */ 89 static int 90 pvr_fw_validate(struct pvr_device *pvr_dev) 91 { 92 struct drm_device *drm_dev = from_pvr_device(pvr_dev); 93 const struct firmware *firmware = pvr_dev->fw_dev.firmware; 94 const struct pvr_fw_layout_entry *layout_entries; 95 const struct pvr_fw_info_header *header; 96 const u8 *fw = firmware->data; 97 u32 fw_offset = firmware->size - SZ_4K; 98 u32 layout_table_size; 99 100 if (firmware->size < SZ_4K || (firmware->size % FW_BLOCK_SIZE)) 101 return -EINVAL; 102 103 header = (const struct pvr_fw_info_header *)&fw[fw_offset]; 104 105 if (header->info_version != PVR_FW_INFO_VERSION) { 106 drm_err(drm_dev, "Unsupported fw info version %u\n", 107 header->info_version); 108 return -EINVAL; 109 } 110 111 if (header->header_len != sizeof(struct pvr_fw_info_header) || 112 header->layout_entry_size != sizeof(struct pvr_fw_layout_entry) || 113 header->layout_entry_num > PVR_FW_INFO_MAX_NUM_ENTRIES) { 114 drm_err(drm_dev, "FW info format mismatch\n"); 115 return -EINVAL; 116 } 117 118 if (!(header->flags & PVR_FW_FLAGS_OPEN_SOURCE) || 119 header->fw_version_major > FW_MAX_SUPPORTED_MAJOR_VERSION || 120 header->fw_version_major == 0) { 121 drm_err(drm_dev, "Unsupported FW version %u.%u (build: %u%s)\n", 122 header->fw_version_major, header->fw_version_minor, 123 header->fw_version_build, 124 (header->flags & PVR_FW_FLAGS_OPEN_SOURCE) ? " OS" : ""); 125 return -EINVAL; 126 } 127 128 if (pvr_gpu_id_to_packed_bvnc(&pvr_dev->gpu_id) != header->bvnc) { 129 struct pvr_gpu_id fw_gpu_id; 130 131 packed_bvnc_to_pvr_gpu_id(header->bvnc, &fw_gpu_id); 132 drm_err(drm_dev, "FW built for incorrect GPU ID %i.%i.%i.%i (expected %i.%i.%i.%i)\n", 133 fw_gpu_id.b, fw_gpu_id.v, fw_gpu_id.n, fw_gpu_id.c, 134 pvr_dev->gpu_id.b, pvr_dev->gpu_id.v, pvr_dev->gpu_id.n, pvr_dev->gpu_id.c); 135 return -EINVAL; 136 } 137 138 fw_offset += header->header_len; 139 layout_table_size = 140 header->layout_entry_size * header->layout_entry_num; 141 if ((fw_offset + layout_table_size) > firmware->size) 142 return -EINVAL; 143 144 layout_entries = (const struct pvr_fw_layout_entry *)&fw[fw_offset]; 145 for (u32 entry = 0; entry < header->layout_entry_num; entry++) { 146 u32 start_addr = layout_entries[entry].base_addr; 147 u32 end_addr = start_addr + layout_entries[entry].alloc_size; 148 149 if (start_addr >= end_addr) 150 return -EINVAL; 151 } 152 153 fw_offset = (firmware->size - SZ_4K) - header->device_info_size; 154 155 drm_info(drm_dev, "FW version v%u.%u (build %u OS)\n", header->fw_version_major, 156 header->fw_version_minor, header->fw_version_build); 157 158 pvr_dev->fw_version.major = header->fw_version_major; 159 pvr_dev->fw_version.minor = header->fw_version_minor; 160 161 pvr_dev->fw_dev.header = header; 162 pvr_dev->fw_dev.layout_entries = layout_entries; 163 164 return 0; 165 } 166 167 static int 168 pvr_fw_get_device_info(struct pvr_device *pvr_dev) 169 { 170 const struct firmware *firmware = pvr_dev->fw_dev.firmware; 171 struct pvr_fw_device_info_header *header; 172 const u8 *fw = firmware->data; 173 const u64 *dev_info; 174 u32 fw_offset; 175 176 fw_offset = (firmware->size - SZ_4K) - pvr_dev->fw_dev.header->device_info_size; 177 178 header = (struct pvr_fw_device_info_header *)&fw[fw_offset]; 179 dev_info = (u64 *)(header + 1); 180 181 pvr_device_info_set_quirks(pvr_dev, dev_info, header->brn_mask_size); 182 dev_info += header->brn_mask_size; 183 184 pvr_device_info_set_enhancements(pvr_dev, dev_info, header->ern_mask_size); 185 dev_info += header->ern_mask_size; 186 187 return pvr_device_info_set_features(pvr_dev, dev_info, header->feature_mask_size, 188 header->feature_param_size); 189 } 190 191 static void 192 layout_get_sizes(struct pvr_device *pvr_dev) 193 { 194 const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries; 195 u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num; 196 struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem; 197 198 fw_mem->code_alloc_size = 0; 199 fw_mem->data_alloc_size = 0; 200 fw_mem->core_code_alloc_size = 0; 201 fw_mem->core_data_alloc_size = 0; 202 203 /* Extract section sizes from FW layout table. */ 204 for (u32 entry = 0; entry < num_layout_entries; entry++) { 205 switch (layout_entries[entry].type) { 206 case FW_CODE: 207 fw_mem->code_alloc_size += layout_entries[entry].alloc_size; 208 break; 209 case FW_DATA: 210 fw_mem->data_alloc_size += layout_entries[entry].alloc_size; 211 break; 212 case FW_COREMEM_CODE: 213 fw_mem->core_code_alloc_size += 214 layout_entries[entry].alloc_size; 215 break; 216 case FW_COREMEM_DATA: 217 fw_mem->core_data_alloc_size += 218 layout_entries[entry].alloc_size; 219 break; 220 case NONE: 221 break; 222 } 223 } 224 } 225 226 int 227 pvr_fw_find_mmu_segment(struct pvr_device *pvr_dev, u32 addr, u32 size, void *fw_code_ptr, 228 void *fw_data_ptr, void *fw_core_code_ptr, void *fw_core_data_ptr, 229 void **host_addr_out) 230 { 231 const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries; 232 u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num; 233 u32 end_addr = addr + size; 234 235 /* Ensure requested range is not zero, and size is not causing addr to overflow. */ 236 if (end_addr <= addr) 237 return -EINVAL; 238 239 for (int entry = 0; entry < num_layout_entries; entry++) { 240 u32 entry_start_addr = layout_entries[entry].base_addr; 241 u32 entry_end_addr = entry_start_addr + layout_entries[entry].alloc_size; 242 243 if (addr >= entry_start_addr && addr < entry_end_addr && 244 end_addr > entry_start_addr && end_addr <= entry_end_addr) { 245 switch (layout_entries[entry].type) { 246 case FW_CODE: 247 *host_addr_out = fw_code_ptr; 248 break; 249 250 case FW_DATA: 251 *host_addr_out = fw_data_ptr; 252 break; 253 254 case FW_COREMEM_CODE: 255 *host_addr_out = fw_core_code_ptr; 256 break; 257 258 case FW_COREMEM_DATA: 259 *host_addr_out = fw_core_data_ptr; 260 break; 261 262 default: 263 return -EINVAL; 264 } 265 /* Direct Mem write to mapped memory */ 266 addr -= layout_entries[entry].base_addr; 267 addr += layout_entries[entry].alloc_offset; 268 269 /* 270 * Add offset to pointer to FW allocation only if that 271 * allocation is available 272 */ 273 *(u8 **)host_addr_out += addr; 274 return 0; 275 } 276 } 277 278 return -EINVAL; 279 } 280 281 static int 282 pvr_fw_create_fwif_connection_ctl(struct pvr_device *pvr_dev) 283 { 284 struct drm_device *drm_dev = from_pvr_device(pvr_dev); 285 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; 286 287 fw_dev->fwif_connection_ctl = 288 pvr_fw_object_create_and_map_offset(pvr_dev, 289 fw_dev->fw_heap_info.config_offset + 290 PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET, 291 sizeof(*fw_dev->fwif_connection_ctl), 292 PVR_BO_FW_FLAGS_DEVICE_UNCACHED, 293 NULL, NULL, 294 &fw_dev->mem.fwif_connection_ctl_obj); 295 if (IS_ERR(fw_dev->fwif_connection_ctl)) { 296 drm_err(drm_dev, 297 "Unable to allocate FWIF connection control memory\n"); 298 return PTR_ERR(fw_dev->fwif_connection_ctl); 299 } 300 301 return 0; 302 } 303 304 static void 305 pvr_fw_fini_fwif_connection_ctl(struct pvr_device *pvr_dev) 306 { 307 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; 308 309 pvr_fw_object_unmap_and_destroy(fw_dev->mem.fwif_connection_ctl_obj); 310 } 311 312 static void 313 fw_osinit_init(void *cpu_ptr, void *priv) 314 { 315 struct rogue_fwif_osinit *fwif_osinit = cpu_ptr; 316 struct pvr_device *pvr_dev = priv; 317 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; 318 struct pvr_fw_mem *fw_mem = &fw_dev->mem; 319 320 fwif_osinit->kernel_ccbctl_fw_addr = pvr_dev->kccb.ccb.ctrl_fw_addr; 321 fwif_osinit->kernel_ccb_fw_addr = pvr_dev->kccb.ccb.ccb_fw_addr; 322 pvr_fw_object_get_fw_addr(pvr_dev->kccb.rtn_obj, 323 &fwif_osinit->kernel_ccb_rtn_slots_fw_addr); 324 325 fwif_osinit->firmware_ccbctl_fw_addr = pvr_dev->fwccb.ctrl_fw_addr; 326 fwif_osinit->firmware_ccb_fw_addr = pvr_dev->fwccb.ccb_fw_addr; 327 328 fwif_osinit->work_est_firmware_ccbctl_fw_addr = 0; 329 fwif_osinit->work_est_firmware_ccb_fw_addr = 0; 330 331 pvr_fw_object_get_fw_addr(fw_mem->hwrinfobuf_obj, 332 &fwif_osinit->rogue_fwif_hwr_info_buf_ctl_fw_addr); 333 pvr_fw_object_get_fw_addr(fw_mem->osdata_obj, &fwif_osinit->fw_os_data_fw_addr); 334 335 fwif_osinit->hwr_debug_dump_limit = 0; 336 337 rogue_fwif_compchecks_bvnc_init(&fwif_osinit->rogue_comp_checks.hw_bvnc); 338 rogue_fwif_compchecks_bvnc_init(&fwif_osinit->rogue_comp_checks.fw_bvnc); 339 } 340 341 static void 342 fw_osdata_init(void *cpu_ptr, void *priv) 343 { 344 struct rogue_fwif_osdata *fwif_osdata = cpu_ptr; 345 struct pvr_device *pvr_dev = priv; 346 struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem; 347 348 pvr_fw_object_get_fw_addr(fw_mem->power_sync_obj, &fwif_osdata->power_sync_fw_addr); 349 } 350 351 static void 352 fw_fault_page_init(void *cpu_ptr, void *priv) 353 { 354 u32 *fault_page = cpu_ptr; 355 356 for (int i = 0; i < PVR_ROGUE_FAULT_PAGE_SIZE / sizeof(*fault_page); i++) 357 fault_page[i] = 0xdeadbee0; 358 } 359 360 static void 361 fw_sysinit_init(void *cpu_ptr, void *priv) 362 { 363 struct rogue_fwif_sysinit *fwif_sysinit = cpu_ptr; 364 struct pvr_device *pvr_dev = priv; 365 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; 366 struct pvr_fw_mem *fw_mem = &fw_dev->mem; 367 dma_addr_t fault_dma_addr = 0; 368 u32 clock_speed_hz = clk_get_rate(pvr_dev->core_clk); 369 370 WARN_ON(!clock_speed_hz); 371 372 WARN_ON(pvr_fw_object_get_dma_addr(fw_mem->fault_page_obj, 0, &fault_dma_addr)); 373 fwif_sysinit->fault_phys_addr = (u64)fault_dma_addr; 374 375 fwif_sysinit->pds_exec_base = ROGUE_PDSCODEDATA_HEAP_BASE; 376 fwif_sysinit->usc_exec_base = ROGUE_USCCODE_HEAP_BASE; 377 378 pvr_fw_object_get_fw_addr(fw_mem->runtime_cfg_obj, &fwif_sysinit->runtime_cfg_fw_addr); 379 pvr_fw_object_get_fw_addr(fw_dev->fw_trace.tracebuf_ctrl_obj, 380 &fwif_sysinit->trace_buf_ctl_fw_addr); 381 pvr_fw_object_get_fw_addr(fw_mem->sysdata_obj, &fwif_sysinit->fw_sys_data_fw_addr); 382 pvr_fw_object_get_fw_addr(fw_mem->gpu_util_fwcb_obj, 383 &fwif_sysinit->gpu_util_fw_cb_ctl_fw_addr); 384 if (fw_mem->core_data_obj) { 385 pvr_fw_object_get_fw_addr(fw_mem->core_data_obj, 386 &fwif_sysinit->coremem_data_store.fw_addr); 387 } 388 389 /* Currently unsupported. */ 390 fwif_sysinit->counter_dump_ctl.buffer_fw_addr = 0; 391 fwif_sysinit->counter_dump_ctl.size_in_dwords = 0; 392 393 /* Skip alignment checks. */ 394 fwif_sysinit->align_checks = 0; 395 396 fwif_sysinit->filter_flags = 0; 397 fwif_sysinit->hw_perf_filter = 0; 398 fwif_sysinit->firmware_perf = FW_PERF_CONF_NONE; 399 fwif_sysinit->initial_core_clock_speed = clock_speed_hz; 400 fwif_sysinit->active_pm_latency_ms = 0; 401 fwif_sysinit->gpio_validation_mode = ROGUE_FWIF_GPIO_VAL_OFF; 402 fwif_sysinit->firmware_started = false; 403 fwif_sysinit->marker_val = 1; 404 405 memset(&fwif_sysinit->bvnc_km_feature_flags, 0, 406 sizeof(fwif_sysinit->bvnc_km_feature_flags)); 407 } 408 409 #define ROGUE_FWIF_SLC_MIN_SIZE_FOR_DM_OVERLAP_KB 4 410 411 static void 412 fw_sysdata_init(void *cpu_ptr, void *priv) 413 { 414 struct rogue_fwif_sysdata *fwif_sysdata = cpu_ptr; 415 struct pvr_device *pvr_dev = priv; 416 u32 slc_size_in_kilobytes = 0; 417 u32 config_flags = 0; 418 419 WARN_ON(PVR_FEATURE_VALUE(pvr_dev, slc_size_in_kilobytes, &slc_size_in_kilobytes)); 420 421 if (slc_size_in_kilobytes < ROGUE_FWIF_SLC_MIN_SIZE_FOR_DM_OVERLAP_KB) 422 config_flags |= ROGUE_FWIF_INICFG_DISABLE_DM_OVERLAP; 423 424 fwif_sysdata->config_flags = config_flags; 425 } 426 427 static void 428 fw_runtime_cfg_init(void *cpu_ptr, void *priv) 429 { 430 struct rogue_fwif_runtime_cfg *runtime_cfg = cpu_ptr; 431 struct pvr_device *pvr_dev = priv; 432 u32 clock_speed_hz = clk_get_rate(pvr_dev->core_clk); 433 434 WARN_ON(!clock_speed_hz); 435 436 runtime_cfg->core_clock_speed = clock_speed_hz; 437 runtime_cfg->active_pm_latency_ms = 0; 438 runtime_cfg->active_pm_latency_persistant = true; 439 WARN_ON(PVR_FEATURE_VALUE(pvr_dev, num_clusters, 440 &runtime_cfg->default_dusts_num_init) != 0); 441 442 /* Keep watchdog timer disabled. */ 443 runtime_cfg->wdg_period_us = 0; 444 } 445 446 static void 447 fw_gpu_util_fwcb_init(void *cpu_ptr, void *priv) 448 { 449 struct rogue_fwif_gpu_util_fwcb *gpu_util_fwcb = cpu_ptr; 450 451 gpu_util_fwcb->last_word = PVR_FWIF_GPU_UTIL_STATE_IDLE; 452 } 453 454 static int 455 pvr_fw_create_structures(struct pvr_device *pvr_dev) 456 { 457 struct drm_device *drm_dev = from_pvr_device(pvr_dev); 458 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; 459 struct pvr_fw_mem *fw_mem = &fw_dev->mem; 460 int err; 461 462 fw_dev->power_sync = pvr_fw_object_create_and_map(pvr_dev, sizeof(*fw_dev->power_sync), 463 PVR_BO_FW_FLAGS_DEVICE_UNCACHED, 464 NULL, NULL, &fw_mem->power_sync_obj); 465 if (IS_ERR(fw_dev->power_sync)) { 466 drm_err(drm_dev, "Unable to allocate FW power_sync structure\n"); 467 return PTR_ERR(fw_dev->power_sync); 468 } 469 470 fw_dev->hwrinfobuf = pvr_fw_object_create_and_map(pvr_dev, sizeof(*fw_dev->hwrinfobuf), 471 PVR_BO_FW_FLAGS_DEVICE_UNCACHED, 472 NULL, NULL, &fw_mem->hwrinfobuf_obj); 473 if (IS_ERR(fw_dev->hwrinfobuf)) { 474 drm_err(drm_dev, 475 "Unable to allocate FW hwrinfobuf structure\n"); 476 err = PTR_ERR(fw_dev->hwrinfobuf); 477 goto err_release_power_sync; 478 } 479 480 err = pvr_fw_object_create(pvr_dev, PVR_SYNC_OBJ_SIZE, 481 PVR_BO_FW_FLAGS_DEVICE_UNCACHED, 482 NULL, NULL, &fw_mem->mmucache_sync_obj); 483 if (err) { 484 drm_err(drm_dev, 485 "Unable to allocate MMU cache sync object\n"); 486 goto err_release_hwrinfobuf; 487 } 488 489 fw_dev->fwif_sysdata = pvr_fw_object_create_and_map(pvr_dev, 490 sizeof(*fw_dev->fwif_sysdata), 491 PVR_BO_FW_FLAGS_DEVICE_UNCACHED, 492 fw_sysdata_init, pvr_dev, 493 &fw_mem->sysdata_obj); 494 if (IS_ERR(fw_dev->fwif_sysdata)) { 495 drm_err(drm_dev, "Unable to allocate FW SYSDATA structure\n"); 496 err = PTR_ERR(fw_dev->fwif_sysdata); 497 goto err_release_mmucache_sync_obj; 498 } 499 500 err = pvr_fw_object_create(pvr_dev, PVR_ROGUE_FAULT_PAGE_SIZE, 501 PVR_BO_FW_FLAGS_DEVICE_UNCACHED, 502 fw_fault_page_init, NULL, &fw_mem->fault_page_obj); 503 if (err) { 504 drm_err(drm_dev, "Unable to allocate FW fault page\n"); 505 goto err_release_sysdata; 506 } 507 508 err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_gpu_util_fwcb), 509 PVR_BO_FW_FLAGS_DEVICE_UNCACHED, 510 fw_gpu_util_fwcb_init, pvr_dev, &fw_mem->gpu_util_fwcb_obj); 511 if (err) { 512 drm_err(drm_dev, "Unable to allocate GPU util FWCB\n"); 513 goto err_release_fault_page; 514 } 515 516 err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_runtime_cfg), 517 PVR_BO_FW_FLAGS_DEVICE_UNCACHED, 518 fw_runtime_cfg_init, pvr_dev, &fw_mem->runtime_cfg_obj); 519 if (err) { 520 drm_err(drm_dev, "Unable to allocate FW runtime config\n"); 521 goto err_release_gpu_util_fwcb; 522 } 523 524 err = pvr_fw_trace_init(pvr_dev); 525 if (err) 526 goto err_release_runtime_cfg; 527 528 fw_dev->fwif_osdata = pvr_fw_object_create_and_map(pvr_dev, 529 sizeof(*fw_dev->fwif_osdata), 530 PVR_BO_FW_FLAGS_DEVICE_UNCACHED, 531 fw_osdata_init, pvr_dev, 532 &fw_mem->osdata_obj); 533 if (IS_ERR(fw_dev->fwif_osdata)) { 534 drm_err(drm_dev, "Unable to allocate FW OSDATA structure\n"); 535 err = PTR_ERR(fw_dev->fwif_osdata); 536 goto err_fw_trace_fini; 537 } 538 539 fw_dev->fwif_osinit = 540 pvr_fw_object_create_and_map_offset(pvr_dev, 541 fw_dev->fw_heap_info.config_offset + 542 PVR_ROGUE_FWIF_OSINIT_OFFSET, 543 sizeof(*fw_dev->fwif_osinit), 544 PVR_BO_FW_FLAGS_DEVICE_UNCACHED, 545 fw_osinit_init, pvr_dev, &fw_mem->osinit_obj); 546 if (IS_ERR(fw_dev->fwif_osinit)) { 547 drm_err(drm_dev, "Unable to allocate FW OSINIT structure\n"); 548 err = PTR_ERR(fw_dev->fwif_osinit); 549 goto err_release_osdata; 550 } 551 552 fw_dev->fwif_sysinit = 553 pvr_fw_object_create_and_map_offset(pvr_dev, 554 fw_dev->fw_heap_info.config_offset + 555 PVR_ROGUE_FWIF_SYSINIT_OFFSET, 556 sizeof(*fw_dev->fwif_sysinit), 557 PVR_BO_FW_FLAGS_DEVICE_UNCACHED, 558 fw_sysinit_init, pvr_dev, &fw_mem->sysinit_obj); 559 if (IS_ERR(fw_dev->fwif_sysinit)) { 560 drm_err(drm_dev, "Unable to allocate FW SYSINIT structure\n"); 561 err = PTR_ERR(fw_dev->fwif_sysinit); 562 goto err_release_osinit; 563 } 564 565 return 0; 566 567 err_release_osinit: 568 pvr_fw_object_unmap_and_destroy(fw_mem->osinit_obj); 569 570 err_release_osdata: 571 pvr_fw_object_unmap_and_destroy(fw_mem->osdata_obj); 572 573 err_fw_trace_fini: 574 pvr_fw_trace_fini(pvr_dev); 575 576 err_release_runtime_cfg: 577 pvr_fw_object_destroy(fw_mem->runtime_cfg_obj); 578 579 err_release_gpu_util_fwcb: 580 pvr_fw_object_destroy(fw_mem->gpu_util_fwcb_obj); 581 582 err_release_fault_page: 583 pvr_fw_object_destroy(fw_mem->fault_page_obj); 584 585 err_release_sysdata: 586 pvr_fw_object_unmap_and_destroy(fw_mem->sysdata_obj); 587 588 err_release_mmucache_sync_obj: 589 pvr_fw_object_destroy(fw_mem->mmucache_sync_obj); 590 591 err_release_hwrinfobuf: 592 pvr_fw_object_unmap_and_destroy(fw_mem->hwrinfobuf_obj); 593 594 err_release_power_sync: 595 pvr_fw_object_unmap_and_destroy(fw_mem->power_sync_obj); 596 597 return err; 598 } 599 600 static void 601 pvr_fw_destroy_structures(struct pvr_device *pvr_dev) 602 { 603 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; 604 struct pvr_fw_mem *fw_mem = &fw_dev->mem; 605 606 pvr_fw_trace_fini(pvr_dev); 607 pvr_fw_object_destroy(fw_mem->runtime_cfg_obj); 608 pvr_fw_object_destroy(fw_mem->gpu_util_fwcb_obj); 609 pvr_fw_object_destroy(fw_mem->fault_page_obj); 610 pvr_fw_object_unmap_and_destroy(fw_mem->sysdata_obj); 611 pvr_fw_object_unmap_and_destroy(fw_mem->sysinit_obj); 612 613 pvr_fw_object_destroy(fw_mem->mmucache_sync_obj); 614 pvr_fw_object_unmap_and_destroy(fw_mem->hwrinfobuf_obj); 615 pvr_fw_object_unmap_and_destroy(fw_mem->power_sync_obj); 616 pvr_fw_object_unmap_and_destroy(fw_mem->osdata_obj); 617 pvr_fw_object_unmap_and_destroy(fw_mem->osinit_obj); 618 } 619 620 /** 621 * pvr_fw_process() - Process firmware image, allocate FW memory and create boot 622 * arguments 623 * @pvr_dev: Device pointer. 624 * 625 * Returns: 626 * * 0 on success, or 627 * * Any error returned by pvr_fw_object_create_and_map_offset(), or 628 * * Any error returned by pvr_fw_object_create_and_map(). 629 */ 630 static int 631 pvr_fw_process(struct pvr_device *pvr_dev) 632 { 633 struct drm_device *drm_dev = from_pvr_device(pvr_dev); 634 struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem; 635 const u8 *fw = pvr_dev->fw_dev.firmware->data; 636 const struct pvr_fw_layout_entry *private_data; 637 u8 *fw_code_ptr; 638 u8 *fw_data_ptr; 639 u8 *fw_core_code_ptr; 640 u8 *fw_core_data_ptr; 641 int err; 642 643 layout_get_sizes(pvr_dev); 644 645 private_data = pvr_fw_find_private_data(pvr_dev); 646 if (!private_data) 647 return -EINVAL; 648 649 /* Allocate and map memory for firmware sections. */ 650 651 /* 652 * Code allocation must be at the start of the firmware heap, otherwise 653 * firmware processor will be unable to boot. 654 * 655 * This has the useful side-effect that for every other object in the 656 * driver, a firmware address of 0 is invalid. 657 */ 658 fw_code_ptr = pvr_fw_object_create_and_map_offset(pvr_dev, 0, fw_mem->code_alloc_size, 659 PVR_BO_FW_FLAGS_DEVICE_UNCACHED, 660 NULL, NULL, &fw_mem->code_obj); 661 if (IS_ERR(fw_code_ptr)) { 662 drm_err(drm_dev, "Unable to allocate FW code memory\n"); 663 return PTR_ERR(fw_code_ptr); 664 } 665 666 if (pvr_dev->fw_dev.defs->has_fixed_data_addr) { 667 u32 base_addr = private_data->base_addr & pvr_dev->fw_dev.fw_heap_info.offset_mask; 668 669 fw_data_ptr = 670 pvr_fw_object_create_and_map_offset(pvr_dev, base_addr, 671 fw_mem->data_alloc_size, 672 PVR_BO_FW_FLAGS_DEVICE_UNCACHED, 673 NULL, NULL, &fw_mem->data_obj); 674 } else { 675 fw_data_ptr = pvr_fw_object_create_and_map(pvr_dev, fw_mem->data_alloc_size, 676 PVR_BO_FW_FLAGS_DEVICE_UNCACHED, 677 NULL, NULL, &fw_mem->data_obj); 678 } 679 if (IS_ERR(fw_data_ptr)) { 680 drm_err(drm_dev, "Unable to allocate FW data memory\n"); 681 err = PTR_ERR(fw_data_ptr); 682 goto err_free_fw_code_obj; 683 } 684 685 /* Core code and data sections are optional. */ 686 if (fw_mem->core_code_alloc_size) { 687 fw_core_code_ptr = 688 pvr_fw_object_create_and_map(pvr_dev, fw_mem->core_code_alloc_size, 689 PVR_BO_FW_FLAGS_DEVICE_UNCACHED, 690 NULL, NULL, &fw_mem->core_code_obj); 691 if (IS_ERR(fw_core_code_ptr)) { 692 drm_err(drm_dev, 693 "Unable to allocate FW core code memory\n"); 694 err = PTR_ERR(fw_core_code_ptr); 695 goto err_free_fw_data_obj; 696 } 697 } else { 698 fw_core_code_ptr = NULL; 699 } 700 701 if (fw_mem->core_data_alloc_size) { 702 fw_core_data_ptr = 703 pvr_fw_object_create_and_map(pvr_dev, fw_mem->core_data_alloc_size, 704 PVR_BO_FW_FLAGS_DEVICE_UNCACHED, 705 NULL, NULL, &fw_mem->core_data_obj); 706 if (IS_ERR(fw_core_data_ptr)) { 707 drm_err(drm_dev, 708 "Unable to allocate FW core data memory\n"); 709 err = PTR_ERR(fw_core_data_ptr); 710 goto err_free_fw_core_code_obj; 711 } 712 } else { 713 fw_core_data_ptr = NULL; 714 } 715 716 fw_mem->code = kzalloc(fw_mem->code_alloc_size, GFP_KERNEL); 717 fw_mem->data = kzalloc(fw_mem->data_alloc_size, GFP_KERNEL); 718 if (fw_mem->core_code_alloc_size) 719 fw_mem->core_code = kzalloc(fw_mem->core_code_alloc_size, GFP_KERNEL); 720 if (fw_mem->core_data_alloc_size) 721 fw_mem->core_data = kzalloc(fw_mem->core_data_alloc_size, GFP_KERNEL); 722 723 if (!fw_mem->code || !fw_mem->data || 724 (!fw_mem->core_code && fw_mem->core_code_alloc_size) || 725 (!fw_mem->core_data && fw_mem->core_data_alloc_size)) { 726 err = -ENOMEM; 727 goto err_free_kdata; 728 } 729 730 err = pvr_dev->fw_dev.defs->fw_process(pvr_dev, fw, 731 fw_mem->code, fw_mem->data, fw_mem->core_code, 732 fw_mem->core_data, fw_mem->core_code_alloc_size); 733 734 if (err) 735 goto err_free_kdata; 736 737 memcpy(fw_code_ptr, fw_mem->code, fw_mem->code_alloc_size); 738 memcpy(fw_data_ptr, fw_mem->data, fw_mem->data_alloc_size); 739 if (fw_mem->core_code) 740 memcpy(fw_core_code_ptr, fw_mem->core_code, fw_mem->core_code_alloc_size); 741 if (fw_mem->core_data) 742 memcpy(fw_core_data_ptr, fw_mem->core_data, fw_mem->core_data_alloc_size); 743 744 /* We're finished with the firmware section memory on the CPU, unmap. */ 745 if (fw_core_data_ptr) { 746 pvr_fw_object_vunmap(fw_mem->core_data_obj); 747 fw_core_data_ptr = NULL; 748 } 749 if (fw_core_code_ptr) { 750 pvr_fw_object_vunmap(fw_mem->core_code_obj); 751 fw_core_code_ptr = NULL; 752 } 753 pvr_fw_object_vunmap(fw_mem->data_obj); 754 fw_data_ptr = NULL; 755 pvr_fw_object_vunmap(fw_mem->code_obj); 756 fw_code_ptr = NULL; 757 758 err = pvr_fw_create_fwif_connection_ctl(pvr_dev); 759 if (err) 760 goto err_free_kdata; 761 762 return 0; 763 764 err_free_kdata: 765 kfree(fw_mem->core_data); 766 kfree(fw_mem->core_code); 767 kfree(fw_mem->data); 768 kfree(fw_mem->code); 769 770 if (fw_core_data_ptr) 771 pvr_fw_object_vunmap(fw_mem->core_data_obj); 772 if (fw_mem->core_data_obj) 773 pvr_fw_object_destroy(fw_mem->core_data_obj); 774 775 err_free_fw_core_code_obj: 776 if (fw_core_code_ptr) 777 pvr_fw_object_vunmap(fw_mem->core_code_obj); 778 if (fw_mem->core_code_obj) 779 pvr_fw_object_destroy(fw_mem->core_code_obj); 780 781 err_free_fw_data_obj: 782 if (fw_data_ptr) 783 pvr_fw_object_vunmap(fw_mem->data_obj); 784 pvr_fw_object_destroy(fw_mem->data_obj); 785 786 err_free_fw_code_obj: 787 if (fw_code_ptr) 788 pvr_fw_object_vunmap(fw_mem->code_obj); 789 pvr_fw_object_destroy(fw_mem->code_obj); 790 791 return err; 792 } 793 794 static int 795 pvr_copy_to_fw(struct pvr_fw_object *dest_obj, u8 *src_ptr, u32 size) 796 { 797 u8 *dest_ptr = pvr_fw_object_vmap(dest_obj); 798 799 if (IS_ERR(dest_ptr)) 800 return PTR_ERR(dest_ptr); 801 802 memcpy(dest_ptr, src_ptr, size); 803 804 pvr_fw_object_vunmap(dest_obj); 805 806 return 0; 807 } 808 809 static int 810 pvr_fw_reinit_code_data(struct pvr_device *pvr_dev) 811 { 812 struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem; 813 int err; 814 815 err = pvr_copy_to_fw(fw_mem->code_obj, fw_mem->code, fw_mem->code_alloc_size); 816 if (err) 817 return err; 818 819 err = pvr_copy_to_fw(fw_mem->data_obj, fw_mem->data, fw_mem->data_alloc_size); 820 if (err) 821 return err; 822 823 if (fw_mem->core_code) { 824 err = pvr_copy_to_fw(fw_mem->core_code_obj, fw_mem->core_code, 825 fw_mem->core_code_alloc_size); 826 if (err) 827 return err; 828 } 829 830 if (fw_mem->core_data) { 831 err = pvr_copy_to_fw(fw_mem->core_data_obj, fw_mem->core_data, 832 fw_mem->core_data_alloc_size); 833 if (err) 834 return err; 835 } 836 837 return 0; 838 } 839 840 static void 841 pvr_fw_cleanup(struct pvr_device *pvr_dev) 842 { 843 struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem; 844 845 pvr_fw_fini_fwif_connection_ctl(pvr_dev); 846 847 kfree(fw_mem->core_data); 848 kfree(fw_mem->core_code); 849 kfree(fw_mem->data); 850 kfree(fw_mem->code); 851 852 if (fw_mem->core_code_obj) 853 pvr_fw_object_destroy(fw_mem->core_code_obj); 854 if (fw_mem->core_data_obj) 855 pvr_fw_object_destroy(fw_mem->core_data_obj); 856 pvr_fw_object_destroy(fw_mem->code_obj); 857 pvr_fw_object_destroy(fw_mem->data_obj); 858 } 859 860 /** 861 * pvr_wait_for_fw_boot() - Wait for firmware to finish booting 862 * @pvr_dev: Target PowerVR device. 863 * 864 * Returns: 865 * * 0 on success, or 866 * * -%ETIMEDOUT if firmware fails to boot within timeout. 867 */ 868 int 869 pvr_wait_for_fw_boot(struct pvr_device *pvr_dev) 870 { 871 ktime_t deadline = ktime_add_us(ktime_get(), FW_BOOT_TIMEOUT_USEC); 872 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; 873 874 while (ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0) { 875 if (READ_ONCE(fw_dev->fwif_sysinit->firmware_started)) 876 return 0; 877 } 878 879 return -ETIMEDOUT; 880 } 881 882 /* 883 * pvr_fw_heap_info_init() - Calculate size and masks for FW heap 884 * @pvr_dev: Target PowerVR device. 885 * @log2_size: Log2 of raw heap size. 886 * @reserved_size: Size of reserved area of heap, in bytes. May be zero. 887 */ 888 void 889 pvr_fw_heap_info_init(struct pvr_device *pvr_dev, u32 log2_size, u32 reserved_size) 890 { 891 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; 892 893 fw_dev->fw_heap_info.gpu_addr = PVR_ROGUE_FW_MAIN_HEAP_BASE; 894 fw_dev->fw_heap_info.log2_size = log2_size; 895 fw_dev->fw_heap_info.reserved_size = reserved_size; 896 fw_dev->fw_heap_info.raw_size = 1 << fw_dev->fw_heap_info.log2_size; 897 fw_dev->fw_heap_info.offset_mask = fw_dev->fw_heap_info.raw_size - 1; 898 fw_dev->fw_heap_info.config_offset = fw_dev->fw_heap_info.raw_size - 899 PVR_ROGUE_FW_CONFIG_HEAP_SIZE; 900 fw_dev->fw_heap_info.size = fw_dev->fw_heap_info.raw_size - 901 (PVR_ROGUE_FW_CONFIG_HEAP_SIZE + reserved_size); 902 } 903 904 /** 905 * pvr_fw_validate_init_device_info() - Validate firmware and initialise device information 906 * @pvr_dev: Target PowerVR device. 907 * 908 * This function must be called before querying device information. 909 * 910 * Returns: 911 * * 0 on success, or 912 * * -%EINVAL if firmware validation fails. 913 */ 914 int 915 pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev) 916 { 917 int err; 918 919 err = pvr_fw_validate(pvr_dev); 920 if (err) 921 return err; 922 923 return pvr_fw_get_device_info(pvr_dev); 924 } 925 926 /** 927 * pvr_fw_init() - Initialise and boot firmware 928 * @pvr_dev: Target PowerVR device 929 * 930 * On successful completion of the function the PowerVR device will be 931 * initialised and ready to use. 932 * 933 * Returns: 934 * * 0 on success, 935 * * -%EINVAL on invalid firmware image, 936 * * -%ENOMEM on out of memory, or 937 * * -%ETIMEDOUT if firmware processor fails to boot or on register poll timeout. 938 */ 939 int 940 pvr_fw_init(struct pvr_device *pvr_dev) 941 { 942 static const struct pvr_fw_defs *fw_defs[PVR_FW_PROCESSOR_TYPE_COUNT] = { 943 [PVR_FW_PROCESSOR_TYPE_META] = &pvr_fw_defs_meta, 944 [PVR_FW_PROCESSOR_TYPE_MIPS] = &pvr_fw_defs_mips, 945 [PVR_FW_PROCESSOR_TYPE_RISCV] = &pvr_fw_defs_riscv, 946 }; 947 948 u32 kccb_size_log2 = ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT; 949 u32 kccb_rtn_size = (1 << kccb_size_log2) * sizeof(*pvr_dev->kccb.rtn); 950 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; 951 int err; 952 953 if (fw_dev->processor_type >= PVR_FW_PROCESSOR_TYPE_COUNT) 954 return -EINVAL; 955 956 fw_dev->defs = fw_defs[fw_dev->processor_type]; 957 958 err = fw_dev->defs->init(pvr_dev); 959 if (err) 960 return err; 961 962 drm_mm_init(&fw_dev->fw_mm, ROGUE_FW_HEAP_BASE, fw_dev->fw_heap_info.raw_size); 963 fw_dev->fw_mm_base = ROGUE_FW_HEAP_BASE; 964 spin_lock_init(&fw_dev->fw_mm_lock); 965 966 INIT_LIST_HEAD(&fw_dev->fw_objs.list); 967 err = drmm_mutex_init(from_pvr_device(pvr_dev), &fw_dev->fw_objs.lock); 968 if (err) 969 goto err_mm_takedown; 970 971 err = pvr_fw_process(pvr_dev); 972 if (err) 973 goto err_mm_takedown; 974 975 /* Initialise KCCB and FWCCB. */ 976 err = pvr_kccb_init(pvr_dev); 977 if (err) 978 goto err_fw_cleanup; 979 980 err = pvr_fwccb_init(pvr_dev); 981 if (err) 982 goto err_kccb_fini; 983 984 /* Allocate memory for KCCB return slots. */ 985 pvr_dev->kccb.rtn = pvr_fw_object_create_and_map(pvr_dev, kccb_rtn_size, 986 PVR_BO_FW_FLAGS_DEVICE_UNCACHED, 987 NULL, NULL, &pvr_dev->kccb.rtn_obj); 988 if (IS_ERR(pvr_dev->kccb.rtn)) { 989 err = PTR_ERR(pvr_dev->kccb.rtn); 990 goto err_fwccb_fini; 991 } 992 993 err = pvr_fw_create_structures(pvr_dev); 994 if (err) 995 goto err_kccb_rtn_release; 996 997 err = pvr_fw_start(pvr_dev); 998 if (err) 999 goto err_destroy_structures; 1000 1001 err = pvr_wait_for_fw_boot(pvr_dev); 1002 if (err) { 1003 drm_err(from_pvr_device(pvr_dev), "Firmware failed to boot\n"); 1004 goto err_fw_stop; 1005 } 1006 1007 fw_dev->booted = true; 1008 1009 return 0; 1010 1011 err_fw_stop: 1012 pvr_fw_stop(pvr_dev); 1013 1014 err_destroy_structures: 1015 pvr_fw_destroy_structures(pvr_dev); 1016 1017 err_kccb_rtn_release: 1018 pvr_fw_object_unmap_and_destroy(pvr_dev->kccb.rtn_obj); 1019 1020 err_fwccb_fini: 1021 pvr_ccb_fini(&pvr_dev->fwccb); 1022 1023 err_kccb_fini: 1024 pvr_kccb_fini(pvr_dev); 1025 1026 err_fw_cleanup: 1027 pvr_fw_cleanup(pvr_dev); 1028 1029 err_mm_takedown: 1030 drm_mm_takedown(&fw_dev->fw_mm); 1031 1032 if (fw_dev->defs->fini) 1033 fw_dev->defs->fini(pvr_dev); 1034 1035 return err; 1036 } 1037 1038 /** 1039 * pvr_fw_fini() - Shutdown firmware processor and free associated memory 1040 * @pvr_dev: Target PowerVR device 1041 */ 1042 void 1043 pvr_fw_fini(struct pvr_device *pvr_dev) 1044 { 1045 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; 1046 1047 fw_dev->booted = false; 1048 1049 pvr_fw_destroy_structures(pvr_dev); 1050 pvr_fw_object_unmap_and_destroy(pvr_dev->kccb.rtn_obj); 1051 1052 /* 1053 * Ensure FWCCB worker has finished executing before destroying FWCCB. The IRQ handler has 1054 * been unregistered at this point so no new work should be being submitted. 1055 */ 1056 pvr_ccb_fini(&pvr_dev->fwccb); 1057 pvr_kccb_fini(pvr_dev); 1058 pvr_fw_cleanup(pvr_dev); 1059 1060 mutex_lock(&pvr_dev->fw_dev.fw_objs.lock); 1061 WARN_ON(!list_empty(&pvr_dev->fw_dev.fw_objs.list)); 1062 mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock); 1063 1064 drm_mm_takedown(&fw_dev->fw_mm); 1065 1066 if (fw_dev->defs->fini) 1067 fw_dev->defs->fini(pvr_dev); 1068 } 1069 1070 /** 1071 * pvr_fw_mts_schedule() - Schedule work via an MTS kick 1072 * @pvr_dev: Target PowerVR device 1073 * @val: Kick mask. Should be a combination of %ROGUE_CR_MTS_SCHEDULE_* 1074 */ 1075 void 1076 pvr_fw_mts_schedule(struct pvr_device *pvr_dev, u32 val) 1077 { 1078 /* Ensure memory is flushed before kicking MTS. */ 1079 wmb(); 1080 1081 pvr_cr_write32(pvr_dev, ROGUE_CR_MTS_SCHEDULE, val); 1082 1083 /* Ensure the MTS kick goes through before continuing. */ 1084 mb(); 1085 } 1086 1087 /** 1088 * pvr_fw_structure_cleanup() - Send FW cleanup request for an object 1089 * @pvr_dev: Target PowerVR device. 1090 * @type: Type of object to cleanup. Must be one of &enum rogue_fwif_cleanup_type. 1091 * @fw_obj: Pointer to FW object containing object to cleanup. 1092 * @offset: Offset within FW object of object to cleanup. 1093 * 1094 * Returns: 1095 * * 0 on success, 1096 * * -EBUSY if object is busy, 1097 * * -ETIMEDOUT on timeout, or 1098 * * -EIO if device is lost. 1099 */ 1100 int 1101 pvr_fw_structure_cleanup(struct pvr_device *pvr_dev, u32 type, struct pvr_fw_object *fw_obj, 1102 u32 offset) 1103 { 1104 struct rogue_fwif_kccb_cmd cmd; 1105 int slot_nr; 1106 int idx; 1107 int err; 1108 u32 rtn; 1109 1110 struct rogue_fwif_cleanup_request *cleanup_req = &cmd.cmd_data.cleanup_data; 1111 1112 down_read(&pvr_dev->reset_sem); 1113 1114 if (!drm_dev_enter(from_pvr_device(pvr_dev), &idx)) { 1115 err = -EIO; 1116 goto err_up_read; 1117 } 1118 1119 cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_CLEANUP; 1120 cmd.kccb_flags = 0; 1121 cleanup_req->cleanup_type = type; 1122 1123 switch (type) { 1124 case ROGUE_FWIF_CLEANUP_FWCOMMONCONTEXT: 1125 pvr_fw_object_get_fw_addr_offset(fw_obj, offset, 1126 &cleanup_req->cleanup_data.context_fw_addr); 1127 break; 1128 case ROGUE_FWIF_CLEANUP_HWRTDATA: 1129 pvr_fw_object_get_fw_addr_offset(fw_obj, offset, 1130 &cleanup_req->cleanup_data.hwrt_data_fw_addr); 1131 break; 1132 case ROGUE_FWIF_CLEANUP_FREELIST: 1133 pvr_fw_object_get_fw_addr_offset(fw_obj, offset, 1134 &cleanup_req->cleanup_data.freelist_fw_addr); 1135 break; 1136 default: 1137 err = -EINVAL; 1138 goto err_drm_dev_exit; 1139 } 1140 1141 err = pvr_kccb_send_cmd(pvr_dev, &cmd, &slot_nr); 1142 if (err) 1143 goto err_drm_dev_exit; 1144 1145 err = pvr_kccb_wait_for_completion(pvr_dev, slot_nr, HZ, &rtn); 1146 if (err) 1147 goto err_drm_dev_exit; 1148 1149 if (rtn & ROGUE_FWIF_KCCB_RTN_SLOT_CLEANUP_BUSY) 1150 err = -EBUSY; 1151 1152 err_drm_dev_exit: 1153 drm_dev_exit(idx); 1154 1155 err_up_read: 1156 up_read(&pvr_dev->reset_sem); 1157 1158 return err; 1159 } 1160 1161 /** 1162 * pvr_fw_object_fw_map() - Map a FW object in firmware address space 1163 * @pvr_dev: Device pointer. 1164 * @fw_obj: FW object to map. 1165 * @dev_addr: Desired address in device space, if a specific address is 1166 * required. 0 otherwise. 1167 * 1168 * Returns: 1169 * * 0 on success, or 1170 * * -%EINVAL if @fw_obj is already mapped but has no references, or 1171 * * Any error returned by DRM. 1172 */ 1173 static int 1174 pvr_fw_object_fw_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj, u64 dev_addr) 1175 { 1176 struct pvr_gem_object *pvr_obj = fw_obj->gem; 1177 struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj); 1178 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; 1179 1180 int err; 1181 1182 spin_lock(&fw_dev->fw_mm_lock); 1183 1184 if (drm_mm_node_allocated(&fw_obj->fw_mm_node)) { 1185 err = -EINVAL; 1186 goto err_unlock; 1187 } 1188 1189 if (!dev_addr) { 1190 /* 1191 * Allocate from the main heap only (firmware heap minus 1192 * config space). 1193 */ 1194 err = drm_mm_insert_node_in_range(&fw_dev->fw_mm, &fw_obj->fw_mm_node, 1195 gem_obj->size, 0, 0, 1196 fw_dev->fw_heap_info.gpu_addr, 1197 fw_dev->fw_heap_info.gpu_addr + 1198 fw_dev->fw_heap_info.size, 0); 1199 if (err) 1200 goto err_unlock; 1201 } else { 1202 fw_obj->fw_mm_node.start = dev_addr; 1203 fw_obj->fw_mm_node.size = gem_obj->size; 1204 err = drm_mm_reserve_node(&fw_dev->fw_mm, &fw_obj->fw_mm_node); 1205 if (err) 1206 goto err_unlock; 1207 } 1208 1209 spin_unlock(&fw_dev->fw_mm_lock); 1210 1211 /* Map object on GPU. */ 1212 err = fw_dev->defs->vm_map(pvr_dev, fw_obj); 1213 if (err) 1214 goto err_remove_node; 1215 1216 fw_obj->fw_addr_offset = (u32)(fw_obj->fw_mm_node.start - fw_dev->fw_mm_base); 1217 1218 return 0; 1219 1220 err_remove_node: 1221 spin_lock(&fw_dev->fw_mm_lock); 1222 drm_mm_remove_node(&fw_obj->fw_mm_node); 1223 1224 err_unlock: 1225 spin_unlock(&fw_dev->fw_mm_lock); 1226 1227 return err; 1228 } 1229 1230 /** 1231 * pvr_fw_object_fw_unmap() - Unmap a previously mapped FW object 1232 * @fw_obj: FW object to unmap. 1233 * 1234 * Returns: 1235 * * 0 on success, or 1236 * * -%EINVAL if object is not currently mapped. 1237 */ 1238 static int 1239 pvr_fw_object_fw_unmap(struct pvr_fw_object *fw_obj) 1240 { 1241 struct pvr_gem_object *pvr_obj = fw_obj->gem; 1242 struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj); 1243 struct pvr_device *pvr_dev = to_pvr_device(gem_obj->dev); 1244 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; 1245 1246 fw_dev->defs->vm_unmap(pvr_dev, fw_obj); 1247 1248 spin_lock(&fw_dev->fw_mm_lock); 1249 1250 if (!drm_mm_node_allocated(&fw_obj->fw_mm_node)) { 1251 spin_unlock(&fw_dev->fw_mm_lock); 1252 return -EINVAL; 1253 } 1254 1255 drm_mm_remove_node(&fw_obj->fw_mm_node); 1256 1257 spin_unlock(&fw_dev->fw_mm_lock); 1258 1259 return 0; 1260 } 1261 1262 static void * 1263 pvr_fw_object_create_and_map_common(struct pvr_device *pvr_dev, size_t size, 1264 u64 flags, u64 dev_addr, 1265 void (*init)(void *cpu_ptr, void *priv), 1266 void *init_priv, struct pvr_fw_object **fw_obj_out) 1267 { 1268 struct pvr_fw_object *fw_obj; 1269 void *cpu_ptr; 1270 int err; 1271 1272 /* %DRM_PVR_BO_PM_FW_PROTECT is implicit for FW objects. */ 1273 flags |= DRM_PVR_BO_PM_FW_PROTECT; 1274 1275 fw_obj = kzalloc(sizeof(*fw_obj), GFP_KERNEL); 1276 if (!fw_obj) 1277 return ERR_PTR(-ENOMEM); 1278 1279 INIT_LIST_HEAD(&fw_obj->node); 1280 fw_obj->init = init; 1281 fw_obj->init_priv = init_priv; 1282 1283 fw_obj->gem = pvr_gem_object_create(pvr_dev, size, flags); 1284 if (IS_ERR(fw_obj->gem)) { 1285 err = PTR_ERR(fw_obj->gem); 1286 fw_obj->gem = NULL; 1287 goto err_put_object; 1288 } 1289 1290 err = pvr_fw_object_fw_map(pvr_dev, fw_obj, dev_addr); 1291 if (err) 1292 goto err_put_object; 1293 1294 cpu_ptr = pvr_fw_object_vmap(fw_obj); 1295 if (IS_ERR(cpu_ptr)) { 1296 err = PTR_ERR(cpu_ptr); 1297 goto err_put_object; 1298 } 1299 1300 *fw_obj_out = fw_obj; 1301 1302 if (fw_obj->init) 1303 fw_obj->init(cpu_ptr, fw_obj->init_priv); 1304 1305 mutex_lock(&pvr_dev->fw_dev.fw_objs.lock); 1306 list_add_tail(&fw_obj->node, &pvr_dev->fw_dev.fw_objs.list); 1307 mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock); 1308 1309 return cpu_ptr; 1310 1311 err_put_object: 1312 pvr_fw_object_destroy(fw_obj); 1313 1314 return ERR_PTR(err); 1315 } 1316 1317 /** 1318 * pvr_fw_object_create() - Create a FW object and map to firmware 1319 * @pvr_dev: PowerVR device pointer. 1320 * @size: Size of object, in bytes. 1321 * @flags: Options which affect both this operation and future mapping 1322 * operations performed on the returned object. Must be a combination of 1323 * DRM_PVR_BO_* and/or PVR_BO_* flags. 1324 * @init: Initialisation callback. 1325 * @init_priv: Private pointer to pass to initialisation callback. 1326 * @fw_obj_out: Pointer to location to store created object pointer. 1327 * 1328 * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently, 1329 * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS 1330 * set. 1331 * 1332 * Returns: 1333 * * 0 on success, or 1334 * * Any error returned by pvr_fw_object_create_common(). 1335 */ 1336 int 1337 pvr_fw_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags, 1338 void (*init)(void *cpu_ptr, void *priv), void *init_priv, 1339 struct pvr_fw_object **fw_obj_out) 1340 { 1341 void *cpu_ptr; 1342 1343 cpu_ptr = pvr_fw_object_create_and_map_common(pvr_dev, size, flags, 0, init, init_priv, 1344 fw_obj_out); 1345 if (IS_ERR(cpu_ptr)) 1346 return PTR_ERR(cpu_ptr); 1347 1348 pvr_fw_object_vunmap(*fw_obj_out); 1349 1350 return 0; 1351 } 1352 1353 /** 1354 * pvr_fw_object_create_and_map() - Create a FW object and map to firmware and CPU 1355 * @pvr_dev: PowerVR device pointer. 1356 * @size: Size of object, in bytes. 1357 * @flags: Options which affect both this operation and future mapping 1358 * operations performed on the returned object. Must be a combination of 1359 * DRM_PVR_BO_* and/or PVR_BO_* flags. 1360 * @init: Initialisation callback. 1361 * @init_priv: Private pointer to pass to initialisation callback. 1362 * @fw_obj_out: Pointer to location to store created object pointer. 1363 * 1364 * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently, 1365 * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS 1366 * set. 1367 * 1368 * Caller is responsible for calling pvr_fw_object_vunmap() to release the CPU 1369 * mapping. 1370 * 1371 * Returns: 1372 * * Pointer to CPU mapping of newly created object, or 1373 * * Any error returned by pvr_fw_object_create(), or 1374 * * Any error returned by pvr_fw_object_vmap(). 1375 */ 1376 void * 1377 pvr_fw_object_create_and_map(struct pvr_device *pvr_dev, size_t size, u64 flags, 1378 void (*init)(void *cpu_ptr, void *priv), 1379 void *init_priv, struct pvr_fw_object **fw_obj_out) 1380 { 1381 return pvr_fw_object_create_and_map_common(pvr_dev, size, flags, 0, init, init_priv, 1382 fw_obj_out); 1383 } 1384 1385 /** 1386 * pvr_fw_object_create_and_map_offset() - Create a FW object and map to 1387 * firmware at the provided offset and to the CPU. 1388 * @pvr_dev: PowerVR device pointer. 1389 * @dev_offset: Base address of desired FW mapping, offset from start of FW heap. 1390 * @size: Size of object, in bytes. 1391 * @flags: Options which affect both this operation and future mapping 1392 * operations performed on the returned object. Must be a combination of 1393 * DRM_PVR_BO_* and/or PVR_BO_* flags. 1394 * @init: Initialisation callback. 1395 * @init_priv: Private pointer to pass to initialisation callback. 1396 * @fw_obj_out: Pointer to location to store created object pointer. 1397 * 1398 * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently, 1399 * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS 1400 * set. 1401 * 1402 * Caller is responsible for calling pvr_fw_object_vunmap() to release the CPU 1403 * mapping. 1404 * 1405 * Returns: 1406 * * Pointer to CPU mapping of newly created object, or 1407 * * Any error returned by pvr_fw_object_create(), or 1408 * * Any error returned by pvr_fw_object_vmap(). 1409 */ 1410 void * 1411 pvr_fw_object_create_and_map_offset(struct pvr_device *pvr_dev, 1412 u32 dev_offset, size_t size, u64 flags, 1413 void (*init)(void *cpu_ptr, void *priv), 1414 void *init_priv, struct pvr_fw_object **fw_obj_out) 1415 { 1416 u64 dev_addr = pvr_dev->fw_dev.fw_mm_base + dev_offset; 1417 1418 return pvr_fw_object_create_and_map_common(pvr_dev, size, flags, dev_addr, init, init_priv, 1419 fw_obj_out); 1420 } 1421 1422 /** 1423 * pvr_fw_object_destroy() - Destroy a pvr_fw_object 1424 * @fw_obj: Pointer to object to destroy. 1425 */ 1426 void pvr_fw_object_destroy(struct pvr_fw_object *fw_obj) 1427 { 1428 struct pvr_gem_object *pvr_obj = fw_obj->gem; 1429 struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj); 1430 struct pvr_device *pvr_dev = to_pvr_device(gem_obj->dev); 1431 1432 mutex_lock(&pvr_dev->fw_dev.fw_objs.lock); 1433 list_del(&fw_obj->node); 1434 mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock); 1435 1436 if (drm_mm_node_allocated(&fw_obj->fw_mm_node)) { 1437 /* If we can't unmap, leak the memory. */ 1438 if (WARN_ON(pvr_fw_object_fw_unmap(fw_obj))) 1439 return; 1440 } 1441 1442 if (fw_obj->gem) 1443 pvr_gem_object_put(fw_obj->gem); 1444 1445 kfree(fw_obj); 1446 } 1447 1448 /** 1449 * pvr_fw_object_get_fw_addr_offset() - Return address of object in firmware address space, with 1450 * given offset. 1451 * @fw_obj: Pointer to object. 1452 * @offset: Desired offset from start of object. 1453 * @fw_addr_out: Location to store address to. 1454 */ 1455 void pvr_fw_object_get_fw_addr_offset(struct pvr_fw_object *fw_obj, u32 offset, u32 *fw_addr_out) 1456 { 1457 struct pvr_gem_object *pvr_obj = fw_obj->gem; 1458 struct pvr_device *pvr_dev = to_pvr_device(gem_from_pvr_gem(pvr_obj)->dev); 1459 1460 *fw_addr_out = pvr_dev->fw_dev.defs->get_fw_addr_with_offset(fw_obj, offset); 1461 } 1462 1463 u64 1464 pvr_fw_obj_get_gpu_addr(struct pvr_fw_object *fw_obj) 1465 { 1466 struct pvr_device *pvr_dev = to_pvr_device(gem_from_pvr_gem(fw_obj->gem)->dev); 1467 struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; 1468 1469 return fw_dev->fw_heap_info.gpu_addr + fw_obj->fw_addr_offset; 1470 } 1471 1472 /* 1473 * pvr_fw_hard_reset() - Re-initialise the FW code and data segments, and reset all global FW 1474 * structures 1475 * @pvr_dev: Device pointer 1476 * 1477 * If this function returns an error then the caller must regard the device as lost. 1478 * 1479 * Returns: 1480 * * 0 on success, or 1481 * * Any error returned by pvr_fw_init_dev_structures() or pvr_fw_reset_all(). 1482 */ 1483 int 1484 pvr_fw_hard_reset(struct pvr_device *pvr_dev) 1485 { 1486 struct list_head *pos; 1487 int err; 1488 1489 /* Reset all FW objects */ 1490 mutex_lock(&pvr_dev->fw_dev.fw_objs.lock); 1491 1492 list_for_each(pos, &pvr_dev->fw_dev.fw_objs.list) { 1493 struct pvr_fw_object *fw_obj = container_of(pos, struct pvr_fw_object, node); 1494 void *cpu_ptr = pvr_fw_object_vmap(fw_obj); 1495 1496 WARN_ON(IS_ERR(cpu_ptr)); 1497 1498 if (!(fw_obj->gem->flags & PVR_BO_FW_NO_CLEAR_ON_RESET)) { 1499 memset(cpu_ptr, 0, pvr_gem_object_size(fw_obj->gem)); 1500 1501 if (fw_obj->init) 1502 fw_obj->init(cpu_ptr, fw_obj->init_priv); 1503 } 1504 1505 pvr_fw_object_vunmap(fw_obj); 1506 } 1507 1508 mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock); 1509 1510 err = pvr_fw_reinit_code_data(pvr_dev); 1511 if (err) 1512 return err; 1513 1514 return 0; 1515 } 1516