1 /* 2 * Copyright 2019 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "priv.h" 23 24 #include <core/firmware.h> 25 #include <core/memory.h> 26 #include <subdev/mmu.h> 27 28 static struct nvkm_acr_hsf * 29 nvkm_acr_hsf_find(struct nvkm_acr *acr, const char *name) 30 { 31 struct nvkm_acr_hsf *hsf; 32 list_for_each_entry(hsf, &acr->hsf, head) { 33 if (!strcmp(hsf->name, name)) 34 return hsf; 35 } 36 return NULL; 37 } 38 39 int 40 nvkm_acr_hsf_boot(struct nvkm_acr *acr, const char *name) 41 { 42 struct nvkm_subdev *subdev = &acr->subdev; 43 struct nvkm_acr_hsf *hsf; 44 int ret; 45 46 hsf = nvkm_acr_hsf_find(acr, name); 47 if (!hsf) 48 return -EINVAL; 49 50 nvkm_debug(subdev, "executing %s binary\n", hsf->name); 51 ret = nvkm_falcon_get(hsf->falcon, subdev); 52 if (ret) 53 return ret; 54 55 ret = hsf->func->boot(acr, hsf); 56 nvkm_falcon_put(hsf->falcon, subdev); 57 if (ret) { 58 nvkm_error(subdev, "%s binary failed\n", hsf->name); 59 return ret; 60 } 61 62 nvkm_debug(subdev, "%s binary completed successfully\n", hsf->name); 63 return 0; 64 } 65 66 static struct nvkm_acr_lsf * 67 nvkm_acr_rtos(struct nvkm_acr *acr) 68 { 69 struct nvkm_acr_lsf *lsf; 70 71 if (acr) { 72 list_for_each_entry(lsf, &acr->lsf, head) { 73 if (lsf->func->bootstrap_falcon) 74 return lsf; 75 } 76 } 77 78 return NULL; 79 } 80 81 static void 82 nvkm_acr_unload(struct nvkm_acr *acr) 83 { 84 if (acr->done) { 85 if (acr->rtos) { 86 nvkm_subdev_unref(acr->rtos->falcon->owner); 87 acr->rtos = NULL; 88 } 89 90 nvkm_acr_hsf_boot(acr, "unload"); 91 acr->done = false; 92 } 93 } 94 95 static int 96 nvkm_acr_load(struct nvkm_acr *acr) 97 { 98 struct nvkm_subdev *subdev = &acr->subdev; 99 struct nvkm_acr_lsf *rtos = nvkm_acr_rtos(acr); 100 u64 start, limit; 101 int ret; 102 103 if (list_empty(&acr->lsf)) { 104 nvkm_debug(subdev, "No LSF(s) present.\n"); 105 return 0; 106 } 107 108 ret = acr->func->init(acr); 109 if (ret) 110 return ret; 111 112 acr->func->wpr_check(acr, &start, &limit); 113 114 if (start != acr->wpr_start || limit != acr->wpr_end) { 115 nvkm_error(subdev, "WPR not configured as expected: " 116 "%016llx-%016llx vs %016llx-%016llx\n", 117 acr->wpr_start, acr->wpr_end, start, limit); 118 return -EIO; 119 } 120 121 acr->done = true; 122 123 if (rtos) { 124 ret = nvkm_subdev_ref(rtos->falcon->owner); 125 if (ret) 126 return ret; 127 128 acr->rtos = rtos; 129 } 130 131 return ret; 132 } 133 134 static int 135 nvkm_acr_reload(struct nvkm_acr *acr) 136 { 137 nvkm_acr_unload(acr); 138 return nvkm_acr_load(acr); 139 } 140 141 int 142 nvkm_acr_bootstrap_falcons(struct nvkm_device *device, unsigned long mask) 143 { 144 struct nvkm_acr *acr = device->acr; 145 struct nvkm_acr_lsf *rtos = nvkm_acr_rtos(acr); 146 unsigned long id; 147 148 /* If there's no LS FW managing bootstrapping of other LS falcons, 149 * we depend on the HS firmware being able to do it instead. 150 */ 151 if (!rtos) { 152 /* Which isn't possible everywhere... */ 153 if ((mask & acr->func->bootstrap_falcons) == mask) { 154 int ret = nvkm_acr_reload(acr); 155 if (ret) 156 return ret; 157 158 return acr->done ? 0 : -EINVAL; 159 } 160 return -ENOSYS; 161 } 162 163 if ((mask & rtos->func->bootstrap_falcons) != mask) 164 return -ENOSYS; 165 166 if (rtos->func->bootstrap_multiple_falcons) 167 return rtos->func->bootstrap_multiple_falcons(rtos->falcon, mask); 168 169 for_each_set_bit(id, &mask, NVKM_ACR_LSF_NUM) { 170 int ret = rtos->func->bootstrap_falcon(rtos->falcon, id); 171 if (ret) 172 return ret; 173 } 174 175 return 0; 176 } 177 178 bool 179 nvkm_acr_managed_falcon(struct nvkm_device *device, enum nvkm_acr_lsf_id id) 180 { 181 struct nvkm_acr *acr = device->acr; 182 183 if (acr) { 184 if (acr->managed_falcons & BIT_ULL(id)) 185 return true; 186 } 187 188 return false; 189 } 190 191 static int 192 nvkm_acr_fini(struct nvkm_subdev *subdev, bool suspend) 193 { 194 if (!subdev->use.enabled) 195 return 0; 196 197 nvkm_acr_unload(nvkm_acr(subdev)); 198 return 0; 199 } 200 201 static int 202 nvkm_acr_init(struct nvkm_subdev *subdev) 203 { 204 struct nvkm_acr *acr = nvkm_acr(subdev); 205 206 if (!nvkm_acr_rtos(acr)) 207 return 0; 208 209 return nvkm_acr_load(acr); 210 } 211 212 static void 213 nvkm_acr_cleanup(struct nvkm_acr *acr) 214 { 215 nvkm_acr_lsfw_del_all(acr); 216 nvkm_acr_hsfw_del_all(acr); 217 nvkm_firmware_put(acr->wpr_fw); 218 acr->wpr_fw = NULL; 219 } 220 221 static int 222 nvkm_acr_oneinit(struct nvkm_subdev *subdev) 223 { 224 struct nvkm_device *device = subdev->device; 225 struct nvkm_acr *acr = nvkm_acr(subdev); 226 struct nvkm_acr_hsfw *hsfw; 227 struct nvkm_acr_lsfw *lsfw, *lsft; 228 struct nvkm_acr_lsf *lsf, *rtos; 229 u32 wpr_size = 0; 230 u64 falcons; 231 int ret, i; 232 233 if (list_empty(&acr->hsfw)) { 234 nvkm_debug(subdev, "No HSFW(s)\n"); 235 nvkm_acr_cleanup(acr); 236 return 0; 237 } 238 239 /* Determine layout/size of WPR image up-front, as we need to know 240 * it to allocate memory before we begin constructing it. 241 */ 242 list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) { 243 /* Cull unknown falcons that are present in WPR image. */ 244 if (acr->wpr_fw) { 245 if (!lsfw->func) { 246 nvkm_acr_lsfw_del(lsfw); 247 continue; 248 } 249 250 wpr_size = acr->wpr_fw->size; 251 } 252 253 /* Ensure we've fetched falcon configuration. */ 254 ret = nvkm_falcon_get(lsfw->falcon, subdev); 255 if (ret) 256 return ret; 257 258 nvkm_falcon_put(lsfw->falcon, subdev); 259 260 if (!(lsf = kmalloc(sizeof(*lsf), GFP_KERNEL))) 261 return -ENOMEM; 262 lsf->func = lsfw->func; 263 lsf->falcon = lsfw->falcon; 264 lsf->id = lsfw->id; 265 list_add_tail(&lsf->head, &acr->lsf); 266 acr->managed_falcons |= BIT_ULL(lsf->id); 267 } 268 269 /* Ensure the falcon that'll provide ACR functions is booted first. */ 270 rtos = nvkm_acr_rtos(acr); 271 if (rtos) { 272 falcons = rtos->func->bootstrap_falcons; 273 list_move(&rtos->head, &acr->lsf); 274 } else { 275 falcons = acr->func->bootstrap_falcons; 276 } 277 278 /* Cull falcons that can't be bootstrapped, or the HSFW can fail to 279 * boot and leave the GPU in a weird state. 280 */ 281 list_for_each_entry_safe(lsfw, lsft, &acr->lsfw, head) { 282 if (!(falcons & BIT_ULL(lsfw->id))) { 283 nvkm_warn(subdev, "%s falcon cannot be bootstrapped\n", 284 nvkm_acr_lsf_id(lsfw->id)); 285 nvkm_acr_lsfw_del(lsfw); 286 } 287 } 288 289 if (!acr->wpr_fw || acr->wpr_comp) 290 wpr_size = acr->func->wpr_layout(acr); 291 292 /* Allocate/Locate WPR + fill ucode blob pointer. 293 * 294 * dGPU: allocate WPR + shadow blob 295 * Tegra: locate WPR with regs, ensure size is sufficient, 296 * allocate ucode blob. 297 */ 298 ret = acr->func->wpr_alloc(acr, wpr_size); 299 if (ret) 300 return ret; 301 302 nvkm_debug(subdev, "WPR region is from 0x%llx-0x%llx (shadow 0x%llx)\n", 303 acr->wpr_start, acr->wpr_end, acr->shadow_start); 304 305 /* Write WPR to ucode blob. */ 306 nvkm_kmap(acr->wpr); 307 if (acr->wpr_fw && !acr->wpr_comp) 308 nvkm_wobj(acr->wpr, 0, acr->wpr_fw->data, acr->wpr_fw->size); 309 310 if (!acr->wpr_fw || acr->wpr_comp) 311 acr->func->wpr_build(acr, rtos); 312 acr->func->wpr_patch(acr, (s64)acr->wpr_start - acr->wpr_prev); 313 314 if (acr->wpr_fw && acr->wpr_comp) { 315 nvkm_kmap(acr->wpr); 316 for (i = 0; i < acr->wpr_fw->size; i += 4) { 317 u32 us = nvkm_ro32(acr->wpr, i); 318 u32 fw = ((u32 *)acr->wpr_fw->data)[i/4]; 319 if (fw != us) { 320 nvkm_warn(subdev, "%08x: %08x %08x\n", 321 i, us, fw); 322 } 323 } 324 return -EINVAL; 325 } 326 nvkm_done(acr->wpr); 327 328 /* Allocate instance block for ACR-related stuff. */ 329 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true, 330 &acr->inst); 331 if (ret) 332 return ret; 333 334 ret = nvkm_vmm_new(device, 0, 0, NULL, 0, NULL, "acr", &acr->vmm); 335 if (ret) 336 return ret; 337 338 acr->vmm->debug = acr->subdev.debug; 339 340 ret = nvkm_vmm_join(acr->vmm, acr->inst); 341 if (ret) 342 return ret; 343 344 /* Load HS firmware blobs into ACR VMM. */ 345 list_for_each_entry(hsfw, &acr->hsfw, head) { 346 nvkm_debug(subdev, "loading %s fw\n", hsfw->name); 347 ret = hsfw->func->load(acr, hsfw); 348 if (ret) 349 return ret; 350 } 351 352 /* Kill temporary data. */ 353 nvkm_acr_cleanup(acr); 354 return 0; 355 } 356 357 static void * 358 nvkm_acr_dtor(struct nvkm_subdev *subdev) 359 { 360 struct nvkm_acr *acr = nvkm_acr(subdev); 361 struct nvkm_acr_hsf *hsf, *hst; 362 struct nvkm_acr_lsf *lsf, *lst; 363 364 list_for_each_entry_safe(hsf, hst, &acr->hsf, head) { 365 nvkm_vmm_put(acr->vmm, &hsf->vma); 366 nvkm_memory_unref(&hsf->ucode); 367 kfree(hsf->imem); 368 list_del(&hsf->head); 369 kfree(hsf); 370 } 371 372 nvkm_vmm_part(acr->vmm, acr->inst); 373 nvkm_vmm_unref(&acr->vmm); 374 nvkm_memory_unref(&acr->inst); 375 376 nvkm_memory_unref(&acr->wpr); 377 378 list_for_each_entry_safe(lsf, lst, &acr->lsf, head) { 379 list_del(&lsf->head); 380 kfree(lsf); 381 } 382 383 nvkm_acr_cleanup(acr); 384 return acr; 385 } 386 387 static const struct nvkm_subdev_func 388 nvkm_acr = { 389 .dtor = nvkm_acr_dtor, 390 .oneinit = nvkm_acr_oneinit, 391 .init = nvkm_acr_init, 392 .fini = nvkm_acr_fini, 393 }; 394 395 static int 396 nvkm_acr_ctor_wpr(struct nvkm_acr *acr, int ver) 397 { 398 struct nvkm_subdev *subdev = &acr->subdev; 399 struct nvkm_device *device = subdev->device; 400 int ret; 401 402 ret = nvkm_firmware_get(subdev, "acr/wpr", ver, &acr->wpr_fw); 403 if (ret < 0) 404 return ret; 405 406 /* Pre-add LSFs in the order they appear in the FW WPR image so that 407 * we're able to do a binary comparison with our own generator. 408 */ 409 ret = acr->func->wpr_parse(acr); 410 if (ret) 411 return ret; 412 413 acr->wpr_comp = nvkm_boolopt(device->cfgopt, "NvAcrWprCompare", false); 414 acr->wpr_prev = nvkm_longopt(device->cfgopt, "NvAcrWprPrevAddr", 0); 415 return 0; 416 } 417 418 int 419 nvkm_acr_new_(const struct nvkm_acr_fwif *fwif, struct nvkm_device *device, 420 enum nvkm_subdev_type type, int inst, struct nvkm_acr **pacr) 421 { 422 struct nvkm_acr *acr; 423 long wprfw; 424 425 if (!(acr = *pacr = kzalloc(sizeof(*acr), GFP_KERNEL))) 426 return -ENOMEM; 427 nvkm_subdev_ctor(&nvkm_acr, device, type, inst, &acr->subdev); 428 INIT_LIST_HEAD(&acr->hsfw); 429 INIT_LIST_HEAD(&acr->lsfw); 430 INIT_LIST_HEAD(&acr->hsf); 431 INIT_LIST_HEAD(&acr->lsf); 432 433 fwif = nvkm_firmware_load(&acr->subdev, fwif, "Acr", acr); 434 if (IS_ERR(fwif)) 435 return PTR_ERR(fwif); 436 437 acr->func = fwif->func; 438 439 wprfw = nvkm_longopt(device->cfgopt, "NvAcrWpr", -1); 440 if (wprfw >= 0) { 441 int ret = nvkm_acr_ctor_wpr(acr, wprfw); 442 if (ret) 443 return ret; 444 } 445 446 return 0; 447 } 448