1 /* 2 * Copyright 2019 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "priv.h" 23 24 #include <core/firmware.h> 25 #include <core/memory.h> 26 #include <subdev/mmu.h> 27 #include <subdev/gsp.h> 28 #include <subdev/pmu.h> 29 #include <engine/sec2.h> 30 #include <engine/nvdec.h> 31 32 static struct nvkm_acr_hsfw * 33 nvkm_acr_hsfw_find(struct nvkm_acr *acr, const char *name) 34 { 35 struct nvkm_acr_hsfw *hsfw; 36 37 list_for_each_entry(hsfw, &acr->hsfw, head) { 38 if (!strcmp(hsfw->fw.fw.name, name)) 39 return hsfw; 40 } 41 42 return NULL; 43 } 44 45 int 46 nvkm_acr_hsfw_boot(struct nvkm_acr *acr, const char *name) 47 { 48 struct nvkm_subdev *subdev = &acr->subdev; 49 struct nvkm_acr_hsfw *hsfw; 50 51 hsfw = nvkm_acr_hsfw_find(acr, name); 52 if (!hsfw) 53 return -EINVAL; 54 55 return nvkm_falcon_fw_boot(&hsfw->fw, subdev, true, NULL, NULL, 56 hsfw->boot_mbox0, hsfw->intr_clear); 57 } 58 59 static struct nvkm_acr_lsf * 60 nvkm_acr_rtos(struct nvkm_acr *acr) 61 { 62 struct nvkm_acr_lsf *lsf; 63 64 if (acr) { 65 list_for_each_entry(lsf, &acr->lsf, head) { 66 if (lsf->func->bootstrap_falcon) 67 return lsf; 68 } 69 } 70 71 return NULL; 72 } 73 74 static void 75 nvkm_acr_unload(struct nvkm_acr *acr) 76 { 77 if (acr->done) { 78 if (acr->rtos) { 79 nvkm_subdev_unref(acr->rtos->falcon->owner); 80 acr->rtos = NULL; 81 } 82 83 nvkm_acr_hsfw_boot(acr, "unload"); 84 acr->done = false; 85 } 86 } 87 88 static int 89 nvkm_acr_load(struct nvkm_acr *acr) 90 { 91 struct nvkm_subdev *subdev = &acr->subdev; 92 struct nvkm_acr_lsf *rtos = nvkm_acr_rtos(acr); 93 u64 start, limit; 94 int ret; 95 96 if (list_empty(&acr->lsf)) { 97 nvkm_debug(subdev, "No LSF(s) present.\n"); 98 return 0; 99 } 100 101 ret = acr->func->init(acr); 102 if (ret) 103 return ret; 104 105 acr->func->wpr_check(acr, &start, &limit); 106 107 if (start != acr->wpr_start || limit != acr->wpr_end) { 108 nvkm_error(subdev, "WPR not configured as expected: " 109 "%016llx-%016llx vs %016llx-%016llx\n", 110 acr->wpr_start, acr->wpr_end, start, limit); 111 return -EIO; 112 } 113 114 acr->done = true; 115 116 if (rtos) { 117 ret = nvkm_subdev_ref(rtos->falcon->owner); 118 if (ret) 119 return ret; 120 121 acr->rtos = rtos; 122 } 123 124 return ret; 125 } 126 127 static int 128 nvkm_acr_reload(struct nvkm_acr *acr) 129 { 130 nvkm_acr_unload(acr); 131 return nvkm_acr_load(acr); 132 } 133 134 int 135 nvkm_acr_bootstrap_falcons(struct nvkm_device *device, unsigned long mask) 136 { 137 struct nvkm_acr *acr = device->acr; 138 struct nvkm_acr_lsf *rtos = nvkm_acr_rtos(acr); 139 unsigned long id; 140 141 /* If there's no LS FW managing bootstrapping of other LS falcons, 142 * we depend on the HS firmware being able to do it instead. 143 */ 144 if (!rtos) { 145 /* Which isn't possible everywhere... */ 146 if ((mask & acr->func->bootstrap_falcons) == mask) { 147 int ret = nvkm_acr_reload(acr); 148 if (ret) 149 return ret; 150 151 return acr->done ? 0 : -EINVAL; 152 } 153 return -ENOSYS; 154 } 155 156 if ((mask & rtos->func->bootstrap_falcons) != mask) 157 return -ENOSYS; 158 159 if (rtos->func->bootstrap_multiple_falcons) 160 return rtos->func->bootstrap_multiple_falcons(rtos->falcon, mask); 161 162 for_each_set_bit(id, &mask, NVKM_ACR_LSF_NUM) { 163 int ret = rtos->func->bootstrap_falcon(rtos->falcon, id); 164 if (ret) 165 return ret; 166 } 167 168 return 0; 169 } 170 171 bool 172 nvkm_acr_managed_falcon(struct nvkm_device *device, enum nvkm_acr_lsf_id id) 173 { 174 struct nvkm_acr *acr = device->acr; 175 176 if (acr) { 177 if (acr->managed_falcons & BIT_ULL(id)) 178 return true; 179 } 180 181 return false; 182 } 183 184 static int 185 nvkm_acr_fini(struct nvkm_subdev *subdev, bool suspend) 186 { 187 if (!subdev->use.enabled) 188 return 0; 189 190 nvkm_acr_unload(nvkm_acr(subdev)); 191 return 0; 192 } 193 194 static int 195 nvkm_acr_init(struct nvkm_subdev *subdev) 196 { 197 struct nvkm_acr *acr = nvkm_acr(subdev); 198 199 if (!nvkm_acr_rtos(acr)) 200 return 0; 201 202 return nvkm_acr_load(acr); 203 } 204 205 static void 206 nvkm_acr_cleanup(struct nvkm_acr *acr) 207 { 208 nvkm_acr_lsfw_del_all(acr); 209 210 nvkm_firmware_put(acr->wpr_fw); 211 acr->wpr_fw = NULL; 212 } 213 214 static int 215 nvkm_acr_oneinit(struct nvkm_subdev *subdev) 216 { 217 struct nvkm_device *device = subdev->device; 218 struct nvkm_acr *acr = nvkm_acr(subdev); 219 struct nvkm_acr_hsfw *hsfw; 220 struct nvkm_acr_lsfw *lsfw, *lsft; 221 struct nvkm_acr_lsf *lsf, *rtos; 222 struct nvkm_falcon *falcon; 223 u32 wpr_size = 0; 224 u64 falcons; 225 int ret, i; 226 227 if (list_empty(&acr->hsfw) || !acr->func || !acr->func->wpr_layout) { 228 nvkm_debug(subdev, "No HSFW(s)\n"); 229 nvkm_acr_cleanup(acr); 230 return 0; 231 } 232 233 /* Determine layout/size of WPR image up-front, as we need to know 234 * it to allocate memory before we begin constructing it. 235 */ 236 list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) { 237 /* Cull unknown falcons that are present in WPR image. */ 238 if (acr->wpr_fw) { 239 if (!lsfw->func) { 240 nvkm_acr_lsfw_del(lsfw); 241 continue; 242 } 243 244 wpr_size = acr->wpr_fw->size; 245 } 246 247 /* Ensure we've fetched falcon configuration. */ 248 ret = nvkm_falcon_get(lsfw->falcon, subdev); 249 if (ret) 250 return ret; 251 252 nvkm_falcon_put(lsfw->falcon, subdev); 253 254 if (!(lsf = kmalloc(sizeof(*lsf), GFP_KERNEL))) 255 return -ENOMEM; 256 lsf->func = lsfw->func; 257 lsf->falcon = lsfw->falcon; 258 lsf->id = lsfw->id; 259 list_add_tail(&lsf->head, &acr->lsf); 260 acr->managed_falcons |= BIT_ULL(lsf->id); 261 } 262 263 /* Ensure the falcon that'll provide ACR functions is booted first. */ 264 rtos = nvkm_acr_rtos(acr); 265 if (rtos) { 266 falcons = rtos->func->bootstrap_falcons; 267 list_move(&rtos->head, &acr->lsf); 268 } else { 269 falcons = acr->func->bootstrap_falcons; 270 } 271 272 /* Cull falcons that can't be bootstrapped, or the HSFW can fail to 273 * boot and leave the GPU in a weird state. 274 */ 275 list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) { 276 if (!(falcons & BIT_ULL(lsfw->id))) { 277 nvkm_warn(subdev, "%s falcon cannot be bootstrapped\n", 278 nvkm_acr_lsf_id(lsfw->id)); 279 nvkm_acr_lsfw_del(lsfw); 280 } 281 } 282 283 if (!acr->wpr_fw || acr->wpr_comp) 284 wpr_size = acr->func->wpr_layout(acr); 285 286 /* Allocate/Locate WPR + fill ucode blob pointer. 287 * 288 * dGPU: allocate WPR + shadow blob 289 * Tegra: locate WPR with regs, ensure size is sufficient, 290 * allocate ucode blob. 291 */ 292 ret = acr->func->wpr_alloc(acr, wpr_size); 293 if (ret) 294 return ret; 295 296 nvkm_debug(subdev, "WPR region is from 0x%llx-0x%llx (shadow 0x%llx)\n", 297 acr->wpr_start, acr->wpr_end, acr->shadow_start); 298 299 /* Write WPR to ucode blob. */ 300 nvkm_kmap(acr->wpr); 301 if (acr->wpr_fw && !acr->wpr_comp) 302 nvkm_wobj(acr->wpr, 0, acr->wpr_fw->data, acr->wpr_fw->size); 303 304 if (!acr->wpr_fw || acr->wpr_comp) 305 acr->func->wpr_build(acr, rtos); 306 acr->func->wpr_patch(acr, (s64)acr->wpr_start - acr->wpr_prev); 307 308 if (acr->wpr_fw && acr->wpr_comp) { 309 nvkm_kmap(acr->wpr); 310 for (i = 0; i < acr->wpr_fw->size; i += 4) { 311 u32 us = nvkm_ro32(acr->wpr, i); 312 u32 fw = ((u32 *)acr->wpr_fw->data)[i/4]; 313 if (fw != us) { 314 nvkm_warn(subdev, "%08x: %08x %08x\n", 315 i, us, fw); 316 } 317 } 318 return -EINVAL; 319 } 320 nvkm_done(acr->wpr); 321 322 /* Allocate instance block for ACR-related stuff. */ 323 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true, 324 &acr->inst); 325 if (ret) 326 return ret; 327 328 ret = nvkm_vmm_new(device, 0, 0, NULL, 0, NULL, "acr", &acr->vmm); 329 if (ret) 330 return ret; 331 332 acr->vmm->debug = acr->subdev.debug; 333 334 ret = nvkm_vmm_join(acr->vmm, acr->inst); 335 if (ret) 336 return ret; 337 338 /* Load HS firmware blobs into ACR VMM. */ 339 list_for_each_entry(hsfw, &acr->hsfw, head) { 340 switch (hsfw->falcon_id) { 341 case NVKM_ACR_HSF_PMU : falcon = &device->pmu->falcon; break; 342 case NVKM_ACR_HSF_SEC2: falcon = &device->sec2->falcon; break; 343 case NVKM_ACR_HSF_GSP : falcon = &device->gsp->falcon; break; 344 default: 345 WARN_ON(1); 346 return -EINVAL; 347 } 348 349 ret = nvkm_falcon_fw_oneinit(&hsfw->fw, falcon, acr->vmm, acr->inst); 350 if (ret) 351 return ret; 352 } 353 354 /* Kill temporary data. */ 355 nvkm_acr_cleanup(acr); 356 return 0; 357 } 358 359 static void * 360 nvkm_acr_dtor(struct nvkm_subdev *subdev) 361 { 362 struct nvkm_acr *acr = nvkm_acr(subdev); 363 struct nvkm_acr_hsfw *hsfw, *hsft; 364 struct nvkm_acr_lsf *lsf, *lst; 365 366 list_for_each_entry_safe(hsfw, hsft, &acr->hsfw, head) { 367 nvkm_falcon_fw_dtor(&hsfw->fw); 368 list_del(&hsfw->head); 369 kfree(hsfw); 370 } 371 372 nvkm_vmm_part(acr->vmm, acr->inst); 373 nvkm_vmm_unref(&acr->vmm); 374 nvkm_memory_unref(&acr->inst); 375 376 nvkm_memory_unref(&acr->wpr); 377 378 list_for_each_entry_safe(lsf, lst, &acr->lsf, head) { 379 list_del(&lsf->head); 380 kfree(lsf); 381 } 382 383 nvkm_acr_cleanup(acr); 384 return acr; 385 } 386 387 static const struct nvkm_subdev_func 388 nvkm_acr = { 389 .dtor = nvkm_acr_dtor, 390 .oneinit = nvkm_acr_oneinit, 391 .init = nvkm_acr_init, 392 .fini = nvkm_acr_fini, 393 }; 394 395 static int 396 nvkm_acr_ctor_wpr(struct nvkm_acr *acr, int ver) 397 { 398 struct nvkm_subdev *subdev = &acr->subdev; 399 struct nvkm_device *device = subdev->device; 400 int ret; 401 402 ret = nvkm_firmware_get(subdev, "acr/wpr", ver, &acr->wpr_fw); 403 if (ret < 0) 404 return ret; 405 406 /* Pre-add LSFs in the order they appear in the FW WPR image so that 407 * we're able to do a binary comparison with our own generator. 408 */ 409 ret = acr->func->wpr_parse(acr); 410 if (ret) 411 return ret; 412 413 acr->wpr_comp = nvkm_boolopt(device->cfgopt, "NvAcrWprCompare", false); 414 acr->wpr_prev = nvkm_longopt(device->cfgopt, "NvAcrWprPrevAddr", 0); 415 return 0; 416 } 417 418 int 419 nvkm_acr_new_(const struct nvkm_acr_fwif *fwif, struct nvkm_device *device, 420 enum nvkm_subdev_type type, int inst, struct nvkm_acr **pacr) 421 { 422 struct nvkm_acr *acr; 423 long wprfw; 424 425 if (!(acr = *pacr = kzalloc(sizeof(*acr), GFP_KERNEL))) 426 return -ENOMEM; 427 nvkm_subdev_ctor(&nvkm_acr, device, type, inst, &acr->subdev); 428 INIT_LIST_HEAD(&acr->hsfw); 429 INIT_LIST_HEAD(&acr->lsfw); 430 INIT_LIST_HEAD(&acr->lsf); 431 432 fwif = nvkm_firmware_load(&acr->subdev, fwif, "Acr", acr); 433 if (IS_ERR(fwif)) 434 return PTR_ERR(fwif); 435 436 acr->func = fwif->func; 437 438 wprfw = nvkm_longopt(device->cfgopt, "NvAcrWpr", -1); 439 if (wprfw >= 0) { 440 int ret = nvkm_acr_ctor_wpr(acr, wprfw); 441 if (ret) 442 return ret; 443 } 444 445 return 0; 446 } 447