1 /* 2 * Copyright 2023 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "priv.h" 23 24 #include <core/pci.h> 25 #include <subdev/timer.h> 26 #include <subdev/vfn.h> 27 #include <engine/fifo/chan.h> 28 #include <engine/sec2.h> 29 30 #include <nvfw/fw.h> 31 32 #include <nvrm/nvtypes.h> 33 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h> 34 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h> 35 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h> 36 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h> 37 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h> 38 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h> 39 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h> 40 #include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h> 41 #include <nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h> 42 #include <nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h> 43 #include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h> 44 #include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h> 45 #include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h> 46 #include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h> 47 #include <nvrm/535.113.01/nvidia/generated/g_allclasses.h> 48 #include <nvrm/535.113.01/nvidia/generated/g_os_nvoc.h> 49 #include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h> 50 #include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h> 51 #include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h> 52 #include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h> 53 #include <nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h> 54 #include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h> 55 56 #include <linux/acpi.h> 57 58 #define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE 59 #define GSP_MSG_MAX_SIZE GSP_PAGE_MIN_SIZE * 16 60 61 struct r535_gsp_msg { 62 u8 auth_tag_buffer[16]; 63 u8 aad_buffer[16]; 64 u32 checksum; 65 u32 sequence; 66 u32 elem_count; 67 u32 pad; 68 u8 data[]; 69 }; 70 71 #define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data) 72 73 static int 74 r535_rpc_status_to_errno(uint32_t rpc_status) 75 { 76 switch (rpc_status) { 77 case 0x55: /* NV_ERR_NOT_READY */ 78 case 0x66: /* NV_ERR_TIMEOUT_RETRY */ 79 return -EAGAIN; 80 case 0x51: /* NV_ERR_NO_MEMORY */ 81 return -ENOMEM; 82 default: 83 return -EINVAL; 84 } 85 } 86 87 static void * 88 r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime) 89 { 90 struct r535_gsp_msg *mqe; 91 u32 size, rptr = *gsp->msgq.rptr; 92 int used; 93 u8 *msg; 94 u32 len; 95 96 size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + repc, GSP_PAGE_SIZE); 97 if (WARN_ON(!size || size >= gsp->msgq.cnt)) 98 return ERR_PTR(-EINVAL); 99 100 do { 101 u32 wptr = *gsp->msgq.wptr; 102 103 used = wptr + gsp->msgq.cnt - rptr; 104 if (used >= gsp->msgq.cnt) 105 used -= gsp->msgq.cnt; 106 if (used >= size) 107 break; 108 109 usleep_range(1, 2); 110 } while (--(*ptime)); 111 112 if (WARN_ON(!*ptime)) 113 return ERR_PTR(-ETIMEDOUT); 114 115 mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + rptr * 0x1000); 116 117 if (prepc) { 118 *prepc = (used * GSP_PAGE_SIZE) - sizeof(*mqe); 119 return mqe->data; 120 } 121 122 msg = kvmalloc(repc, GFP_KERNEL); 123 if (!msg) 124 return ERR_PTR(-ENOMEM); 125 126 len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe); 127 len = min_t(u32, repc, len); 128 memcpy(msg, mqe->data, len); 129 130 rptr += DIV_ROUND_UP(len, GSP_PAGE_SIZE); 131 if (rptr == gsp->msgq.cnt) 132 rptr = 0; 133 134 repc -= len; 135 136 if (repc) { 137 mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000); 138 memcpy(msg + len, mqe, repc); 139 140 rptr += DIV_ROUND_UP(repc, GSP_PAGE_SIZE); 141 } 142 143 mb(); 144 (*gsp->msgq.rptr) = rptr; 145 return msg; 146 } 147 148 static void * 149 r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 repc, int *ptime) 150 { 151 return r535_gsp_msgq_wait(gsp, repc, NULL, ptime); 152 } 153 154 static int 155 r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv) 156 { 157 struct r535_gsp_msg *cmd = container_of(argv, typeof(*cmd), data); 158 struct r535_gsp_msg *cqe; 159 u32 argc = cmd->checksum; 160 u64 *ptr = (void *)cmd; 161 u64 *end; 162 u64 csum = 0; 163 int free, time = 1000000; 164 u32 wptr, size; 165 u32 off = 0; 166 167 argc = ALIGN(GSP_MSG_HDR_SIZE + argc, GSP_PAGE_SIZE); 168 169 end = (u64 *)((char *)ptr + argc); 170 cmd->pad = 0; 171 cmd->checksum = 0; 172 cmd->sequence = gsp->cmdq.seq++; 173 cmd->elem_count = DIV_ROUND_UP(argc, 0x1000); 174 175 while (ptr < end) 176 csum ^= *ptr++; 177 178 cmd->checksum = upper_32_bits(csum) ^ lower_32_bits(csum); 179 180 wptr = *gsp->cmdq.wptr; 181 do { 182 do { 183 free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1; 184 if (free >= gsp->cmdq.cnt) 185 free -= gsp->cmdq.cnt; 186 if (free >= 1) 187 break; 188 189 usleep_range(1, 2); 190 } while(--time); 191 192 if (WARN_ON(!time)) { 193 kvfree(cmd); 194 return -ETIMEDOUT; 195 } 196 197 cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000); 198 size = min_t(u32, argc, (gsp->cmdq.cnt - wptr) * GSP_PAGE_SIZE); 199 memcpy(cqe, (u8 *)cmd + off, size); 200 201 wptr += DIV_ROUND_UP(size, 0x1000); 202 if (wptr == gsp->cmdq.cnt) 203 wptr = 0; 204 205 off += size; 206 argc -= size; 207 } while(argc); 208 209 nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr); 210 wmb(); 211 (*gsp->cmdq.wptr) = wptr; 212 mb(); 213 214 nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000); 215 216 kvfree(cmd); 217 return 0; 218 } 219 220 static void * 221 r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 argc) 222 { 223 struct r535_gsp_msg *cmd; 224 u32 size = GSP_MSG_HDR_SIZE + argc; 225 226 size = ALIGN(size, GSP_MSG_MIN_SIZE); 227 cmd = kvzalloc(size, GFP_KERNEL); 228 if (!cmd) 229 return ERR_PTR(-ENOMEM); 230 231 cmd->checksum = argc; 232 return cmd->data; 233 } 234 235 struct nvfw_gsp_rpc { 236 u32 header_version; 237 u32 signature; 238 u32 length; 239 u32 function; 240 u32 rpc_result; 241 u32 rpc_result_private; 242 u32 sequence; 243 union { 244 u32 spare; 245 u32 cpuRmGfid; 246 }; 247 u8 data[]; 248 }; 249 250 static void 251 r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg) 252 { 253 kvfree(msg); 254 } 255 256 static void 257 r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl) 258 { 259 if (gsp->subdev.debug >= lvl) { 260 nvkm_printk__(&gsp->subdev, lvl, info, 261 "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n", 262 msg->function, msg->length, msg->length - sizeof(*msg), 263 msg->rpc_result, msg->rpc_result_private); 264 print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1, 265 msg->data, msg->length - sizeof(*msg), true); 266 } 267 } 268 269 static struct nvfw_gsp_rpc * 270 r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 repc) 271 { 272 struct nvkm_subdev *subdev = &gsp->subdev; 273 struct nvfw_gsp_rpc *msg; 274 int time = 4000000, i; 275 u32 size; 276 277 retry: 278 msg = r535_gsp_msgq_wait(gsp, sizeof(*msg), &size, &time); 279 if (IS_ERR_OR_NULL(msg)) 280 return msg; 281 282 msg = r535_gsp_msgq_recv(gsp, msg->length, &time); 283 if (IS_ERR_OR_NULL(msg)) 284 return msg; 285 286 if (msg->rpc_result) { 287 r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR); 288 r535_gsp_msg_done(gsp, msg); 289 return ERR_PTR(-EINVAL); 290 } 291 292 r535_gsp_msg_dump(gsp, msg, NV_DBG_TRACE); 293 294 if (fn && msg->function == fn) { 295 if (repc) { 296 if (msg->length < sizeof(*msg) + repc) { 297 nvkm_error(subdev, "msg len %d < %zd\n", 298 msg->length, sizeof(*msg) + repc); 299 r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR); 300 r535_gsp_msg_done(gsp, msg); 301 return ERR_PTR(-EIO); 302 } 303 304 return msg; 305 } 306 307 r535_gsp_msg_done(gsp, msg); 308 return NULL; 309 } 310 311 for (i = 0; i < gsp->msgq.ntfy_nr; i++) { 312 struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i]; 313 314 if (ntfy->fn == msg->function) { 315 if (ntfy->func) 316 ntfy->func(ntfy->priv, ntfy->fn, msg->data, msg->length - sizeof(*msg)); 317 break; 318 } 319 } 320 321 if (i == gsp->msgq.ntfy_nr) 322 r535_gsp_msg_dump(gsp, msg, NV_DBG_WARN); 323 324 r535_gsp_msg_done(gsp, msg); 325 if (fn) 326 goto retry; 327 328 if (*gsp->msgq.rptr != *gsp->msgq.wptr) 329 goto retry; 330 331 return NULL; 332 } 333 334 static int 335 r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv) 336 { 337 int ret = 0; 338 339 mutex_lock(&gsp->msgq.mutex); 340 if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) { 341 ret = -ENOSPC; 342 } else { 343 gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn; 344 gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func; 345 gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv; 346 gsp->msgq.ntfy_nr++; 347 } 348 mutex_unlock(&gsp->msgq.mutex); 349 return ret; 350 } 351 352 static int 353 r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn) 354 { 355 void *repv; 356 357 mutex_lock(&gsp->cmdq.mutex); 358 repv = r535_gsp_msg_recv(gsp, fn, 0); 359 mutex_unlock(&gsp->cmdq.mutex); 360 if (IS_ERR(repv)) 361 return PTR_ERR(repv); 362 363 return 0; 364 } 365 366 static void * 367 r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc) 368 { 369 struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data); 370 struct nvfw_gsp_rpc *msg; 371 u32 fn = rpc->function; 372 void *repv = NULL; 373 int ret; 374 375 if (gsp->subdev.debug >= NV_DBG_TRACE) { 376 nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function, 377 rpc->length, rpc->length - sizeof(*rpc)); 378 print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1, 379 rpc->data, rpc->length - sizeof(*rpc), true); 380 } 381 382 ret = r535_gsp_cmdq_push(gsp, rpc); 383 if (ret) 384 return ERR_PTR(ret); 385 386 if (wait) { 387 msg = r535_gsp_msg_recv(gsp, fn, repc); 388 if (!IS_ERR_OR_NULL(msg)) 389 repv = msg->data; 390 else 391 repv = msg; 392 } 393 394 return repv; 395 } 396 397 static void 398 r535_gsp_event_dtor(struct nvkm_gsp_event *event) 399 { 400 struct nvkm_gsp_device *device = event->device; 401 struct nvkm_gsp_client *client = device->object.client; 402 struct nvkm_gsp *gsp = client->gsp; 403 404 mutex_lock(&gsp->client_id.mutex); 405 if (event->func) { 406 list_del(&event->head); 407 event->func = NULL; 408 } 409 mutex_unlock(&gsp->client_id.mutex); 410 411 nvkm_gsp_rm_free(&event->object); 412 event->device = NULL; 413 } 414 415 static int 416 r535_gsp_device_event_get(struct nvkm_gsp_event *event) 417 { 418 struct nvkm_gsp_device *device = event->device; 419 NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl; 420 421 ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice, 422 NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl)); 423 if (IS_ERR(ctrl)) 424 return PTR_ERR(ctrl); 425 426 ctrl->event = event->id; 427 ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; 428 return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl); 429 } 430 431 static int 432 r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id, 433 nvkm_gsp_event_func func, struct nvkm_gsp_event *event) 434 { 435 struct nvkm_gsp_client *client = device->object.client; 436 struct nvkm_gsp *gsp = client->gsp; 437 NV0005_ALLOC_PARAMETERS *args; 438 int ret; 439 440 args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle, 441 NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args), 442 &event->object); 443 if (IS_ERR(args)) 444 return PTR_ERR(args); 445 446 args->hParentClient = client->object.handle; 447 args->hSrcResource = 0; 448 args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX; 449 args->notifyIndex = NV01_EVENT_CLIENT_RM | id; 450 args->data = NULL; 451 452 ret = nvkm_gsp_rm_alloc_wr(&event->object, args); 453 if (ret) 454 return ret; 455 456 event->device = device; 457 event->id = id; 458 459 ret = r535_gsp_device_event_get(event); 460 if (ret) { 461 nvkm_gsp_event_dtor(event); 462 return ret; 463 } 464 465 mutex_lock(&gsp->client_id.mutex); 466 event->func = func; 467 list_add(&event->head, &client->events); 468 mutex_unlock(&gsp->client_id.mutex); 469 return 0; 470 } 471 472 static void 473 r535_gsp_device_dtor(struct nvkm_gsp_device *device) 474 { 475 nvkm_gsp_rm_free(&device->subdevice); 476 nvkm_gsp_rm_free(&device->object); 477 } 478 479 static int 480 r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device) 481 { 482 NV2080_ALLOC_PARAMETERS *args; 483 484 return nvkm_gsp_rm_alloc(&device->object, 0x5d1d0000, NV20_SUBDEVICE_0, sizeof(*args), 485 &device->subdevice); 486 } 487 488 static int 489 r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device) 490 { 491 NV0080_ALLOC_PARAMETERS *args; 492 int ret; 493 494 args = nvkm_gsp_rm_alloc_get(&client->object, 0xde1d0000, NV01_DEVICE_0, sizeof(*args), 495 &device->object); 496 if (IS_ERR(args)) 497 return PTR_ERR(args); 498 499 args->hClientShare = client->object.handle; 500 501 ret = nvkm_gsp_rm_alloc_wr(&device->object, args); 502 if (ret) 503 return ret; 504 505 ret = r535_gsp_subdevice_ctor(device); 506 if (ret) 507 nvkm_gsp_rm_free(&device->object); 508 509 return ret; 510 } 511 512 static void 513 r535_gsp_client_dtor(struct nvkm_gsp_client *client) 514 { 515 struct nvkm_gsp *gsp = client->gsp; 516 517 nvkm_gsp_rm_free(&client->object); 518 519 mutex_lock(&gsp->client_id.mutex); 520 idr_remove(&gsp->client_id.idr, client->object.handle & 0xffff); 521 mutex_unlock(&gsp->client_id.mutex); 522 523 client->gsp = NULL; 524 } 525 526 static int 527 r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client) 528 { 529 NV0000_ALLOC_PARAMETERS *args; 530 int ret; 531 532 mutex_lock(&gsp->client_id.mutex); 533 ret = idr_alloc(&gsp->client_id.idr, client, 0, 0xffff + 1, GFP_KERNEL); 534 mutex_unlock(&gsp->client_id.mutex); 535 if (ret < 0) 536 return ret; 537 538 client->gsp = gsp; 539 client->object.client = client; 540 INIT_LIST_HEAD(&client->events); 541 542 args = nvkm_gsp_rm_alloc_get(&client->object, 0xc1d00000 | ret, NV01_ROOT, sizeof(*args), 543 &client->object); 544 if (IS_ERR(args)) { 545 r535_gsp_client_dtor(client); 546 return ret; 547 } 548 549 args->hClient = client->object.handle; 550 args->processID = ~0; 551 552 ret = nvkm_gsp_rm_alloc_wr(&client->object, args); 553 if (ret) { 554 r535_gsp_client_dtor(client); 555 return ret; 556 } 557 558 return 0; 559 } 560 561 static int 562 r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object) 563 { 564 struct nvkm_gsp_client *client = object->client; 565 struct nvkm_gsp *gsp = client->gsp; 566 rpc_free_v03_00 *rpc; 567 568 nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n", 569 client->object.handle, object->handle); 570 571 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc)); 572 if (WARN_ON(IS_ERR_OR_NULL(rpc))) 573 return -EIO; 574 575 rpc->params.hRoot = client->object.handle; 576 rpc->params.hObjectParent = 0; 577 rpc->params.hObjectOld = object->handle; 578 return nvkm_gsp_rpc_wr(gsp, rpc, true); 579 } 580 581 static void 582 r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *repv) 583 { 584 rpc_gsp_rm_alloc_v03_00 *rpc = container_of(repv, typeof(*rpc), params); 585 586 nvkm_gsp_rpc_done(object->client->gsp, rpc); 587 } 588 589 static void * 590 r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc) 591 { 592 rpc_gsp_rm_alloc_v03_00 *rpc = container_of(argv, typeof(*rpc), params); 593 struct nvkm_gsp *gsp = object->client->gsp; 594 void *ret; 595 596 rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc) + repc); 597 if (IS_ERR_OR_NULL(rpc)) 598 return rpc; 599 600 if (rpc->status) { 601 ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status)); 602 if (PTR_ERR(ret) != -EAGAIN) 603 nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status); 604 } else { 605 ret = repc ? rpc->params : NULL; 606 } 607 608 nvkm_gsp_rpc_done(gsp, rpc); 609 610 return ret; 611 } 612 613 static void * 614 r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass, u32 argc) 615 { 616 struct nvkm_gsp_client *client = object->client; 617 struct nvkm_gsp *gsp = client->gsp; 618 rpc_gsp_rm_alloc_v03_00 *rpc; 619 620 nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x cls:0x%08x argc:%d\n", 621 client->object.handle, object->parent->handle, object->handle, oclass, argc); 622 623 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC, sizeof(*rpc) + argc); 624 if (IS_ERR(rpc)) 625 return rpc; 626 627 rpc->hClient = client->object.handle; 628 rpc->hParent = object->parent->handle; 629 rpc->hObject = object->handle; 630 rpc->hClass = oclass; 631 rpc->status = 0; 632 rpc->paramsSize = argc; 633 return rpc->params; 634 } 635 636 static void 637 r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv) 638 { 639 rpc_gsp_rm_control_v03_00 *rpc = container_of(repv, typeof(*rpc), params); 640 641 if (!repv) 642 return; 643 nvkm_gsp_rpc_done(object->client->gsp, rpc); 644 } 645 646 static int 647 r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **argv, u32 repc) 648 { 649 rpc_gsp_rm_control_v03_00 *rpc = container_of((*argv), typeof(*rpc), params); 650 struct nvkm_gsp *gsp = object->client->gsp; 651 int ret = 0; 652 653 rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc); 654 if (IS_ERR_OR_NULL(rpc)) { 655 *argv = NULL; 656 return PTR_ERR(rpc); 657 } 658 659 if (rpc->status) { 660 ret = r535_rpc_status_to_errno(rpc->status); 661 if (ret != -EAGAIN) 662 nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n", 663 object->client->object.handle, object->handle, rpc->cmd, rpc->status); 664 } 665 666 if (repc) 667 *argv = rpc->params; 668 else 669 nvkm_gsp_rpc_done(gsp, rpc); 670 671 return ret; 672 } 673 674 static void * 675 r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc) 676 { 677 struct nvkm_gsp_client *client = object->client; 678 struct nvkm_gsp *gsp = client->gsp; 679 rpc_gsp_rm_control_v03_00 *rpc; 680 681 nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x argc:%d\n", 682 client->object.handle, object->handle, cmd, argc); 683 684 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, sizeof(*rpc) + argc); 685 if (IS_ERR(rpc)) 686 return rpc; 687 688 rpc->hClient = client->object.handle; 689 rpc->hObject = object->handle; 690 rpc->cmd = cmd; 691 rpc->status = 0; 692 rpc->paramsSize = argc; 693 return rpc->params; 694 } 695 696 static void 697 r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv) 698 { 699 struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data); 700 701 r535_gsp_msg_done(gsp, rpc); 702 } 703 704 static void * 705 r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc) 706 { 707 struct nvfw_gsp_rpc *rpc; 708 709 rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + argc, sizeof(u64))); 710 if (IS_ERR(rpc)) 711 return ERR_CAST(rpc); 712 713 rpc->header_version = 0x03000000; 714 rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V'; 715 rpc->function = fn; 716 rpc->rpc_result = 0xffffffff; 717 rpc->rpc_result_private = 0xffffffff; 718 rpc->length = sizeof(*rpc) + argc; 719 return rpc->data; 720 } 721 722 static void * 723 r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc) 724 { 725 struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data); 726 struct r535_gsp_msg *cmd = container_of((void *)rpc, typeof(*cmd), data); 727 const u32 max_msg_size = (16 * 0x1000) - sizeof(struct r535_gsp_msg); 728 const u32 max_rpc_size = max_msg_size - sizeof(*rpc); 729 u32 rpc_size = rpc->length - sizeof(*rpc); 730 void *repv; 731 732 mutex_lock(&gsp->cmdq.mutex); 733 if (rpc_size > max_rpc_size) { 734 const u32 fn = rpc->function; 735 736 /* Adjust length, and send initial RPC. */ 737 rpc->length = sizeof(*rpc) + max_rpc_size; 738 cmd->checksum = rpc->length; 739 740 repv = r535_gsp_rpc_send(gsp, argv, false, 0); 741 if (IS_ERR(repv)) 742 goto done; 743 744 argv += max_rpc_size; 745 rpc_size -= max_rpc_size; 746 747 /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */ 748 while (rpc_size) { 749 u32 size = min(rpc_size, max_rpc_size); 750 void *next; 751 752 next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size); 753 if (IS_ERR(next)) { 754 repv = next; 755 goto done; 756 } 757 758 memcpy(next, argv, size); 759 760 repv = r535_gsp_rpc_send(gsp, next, false, 0); 761 if (IS_ERR(repv)) 762 goto done; 763 764 argv += size; 765 rpc_size -= size; 766 } 767 768 /* Wait for reply. */ 769 if (wait) { 770 rpc = r535_gsp_msg_recv(gsp, fn, repc); 771 if (!IS_ERR_OR_NULL(rpc)) 772 repv = rpc->data; 773 else 774 repv = rpc; 775 } else { 776 repv = NULL; 777 } 778 } else { 779 repv = r535_gsp_rpc_send(gsp, argv, wait, repc); 780 } 781 782 done: 783 mutex_unlock(&gsp->cmdq.mutex); 784 return repv; 785 } 786 787 const struct nvkm_gsp_rm 788 r535_gsp_rm = { 789 .rpc_get = r535_gsp_rpc_get, 790 .rpc_push = r535_gsp_rpc_push, 791 .rpc_done = r535_gsp_rpc_done, 792 793 .rm_ctrl_get = r535_gsp_rpc_rm_ctrl_get, 794 .rm_ctrl_push = r535_gsp_rpc_rm_ctrl_push, 795 .rm_ctrl_done = r535_gsp_rpc_rm_ctrl_done, 796 797 .rm_alloc_get = r535_gsp_rpc_rm_alloc_get, 798 .rm_alloc_push = r535_gsp_rpc_rm_alloc_push, 799 .rm_alloc_done = r535_gsp_rpc_rm_alloc_done, 800 801 .rm_free = r535_gsp_rpc_rm_free, 802 803 .client_ctor = r535_gsp_client_ctor, 804 .client_dtor = r535_gsp_client_dtor, 805 806 .device_ctor = r535_gsp_device_ctor, 807 .device_dtor = r535_gsp_device_dtor, 808 809 .event_ctor = r535_gsp_device_event_ctor, 810 .event_dtor = r535_gsp_event_dtor, 811 }; 812 813 static void 814 r535_gsp_msgq_work(struct work_struct *work) 815 { 816 struct nvkm_gsp *gsp = container_of(work, typeof(*gsp), msgq.work); 817 818 mutex_lock(&gsp->cmdq.mutex); 819 if (*gsp->msgq.rptr != *gsp->msgq.wptr) 820 r535_gsp_msg_recv(gsp, 0, 0); 821 mutex_unlock(&gsp->cmdq.mutex); 822 } 823 824 static irqreturn_t 825 r535_gsp_intr(struct nvkm_inth *inth) 826 { 827 struct nvkm_gsp *gsp = container_of(inth, typeof(*gsp), subdev.inth); 828 struct nvkm_subdev *subdev = &gsp->subdev; 829 u32 intr = nvkm_falcon_rd32(&gsp->falcon, 0x0008); 830 u32 inte = nvkm_falcon_rd32(&gsp->falcon, gsp->falcon.func->addr2 + 831 gsp->falcon.func->riscv_irqmask); 832 u32 stat = intr & inte; 833 834 if (!stat) { 835 nvkm_debug(subdev, "inte %08x %08x\n", intr, inte); 836 return IRQ_NONE; 837 } 838 839 if (stat & 0x00000040) { 840 nvkm_falcon_wr32(&gsp->falcon, 0x004, 0x00000040); 841 schedule_work(&gsp->msgq.work); 842 stat &= ~0x00000040; 843 } 844 845 if (stat) { 846 nvkm_error(subdev, "intr %08x\n", stat); 847 nvkm_falcon_wr32(&gsp->falcon, 0x014, stat); 848 nvkm_falcon_wr32(&gsp->falcon, 0x004, stat); 849 } 850 851 nvkm_falcon_intr_retrigger(&gsp->falcon); 852 return IRQ_HANDLED; 853 } 854 855 static int 856 r535_gsp_intr_get_table(struct nvkm_gsp *gsp) 857 { 858 NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *ctrl; 859 int ret = 0; 860 861 ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, 862 NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE, sizeof(*ctrl)); 863 if (IS_ERR(ctrl)) 864 return PTR_ERR(ctrl); 865 866 ret = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, &ctrl, sizeof(*ctrl)); 867 if (WARN_ON(ret)) { 868 nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); 869 return ret; 870 } 871 872 for (unsigned i = 0; i < ctrl->tableLen; i++) { 873 enum nvkm_subdev_type type; 874 int inst; 875 876 nvkm_debug(&gsp->subdev, 877 "%2d: engineIdx %3d pmcIntrMask %08x stall %08x nonStall %08x\n", i, 878 ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask, 879 ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall); 880 881 switch (ctrl->table[i].engineIdx) { 882 case MC_ENGINE_IDX_GSP: 883 type = NVKM_SUBDEV_GSP; 884 inst = 0; 885 break; 886 case MC_ENGINE_IDX_DISP: 887 type = NVKM_ENGINE_DISP; 888 inst = 0; 889 break; 890 case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9: 891 type = NVKM_ENGINE_CE; 892 inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_CE0; 893 break; 894 case MC_ENGINE_IDX_GR0: 895 type = NVKM_ENGINE_GR; 896 inst = 0; 897 break; 898 case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7: 899 type = NVKM_ENGINE_NVDEC; 900 inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVDEC0; 901 break; 902 case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2: 903 type = NVKM_ENGINE_NVENC; 904 inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_MSENC; 905 break; 906 case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7: 907 type = NVKM_ENGINE_NVJPG; 908 inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVJPEG0; 909 break; 910 case MC_ENGINE_IDX_OFA0: 911 type = NVKM_ENGINE_OFA; 912 inst = 0; 913 break; 914 default: 915 continue; 916 } 917 918 if (WARN_ON(gsp->intr_nr == ARRAY_SIZE(gsp->intr))) { 919 ret = -ENOSPC; 920 break; 921 } 922 923 gsp->intr[gsp->intr_nr].type = type; 924 gsp->intr[gsp->intr_nr].inst = inst; 925 gsp->intr[gsp->intr_nr].stall = ctrl->table[i].vectorStall; 926 gsp->intr[gsp->intr_nr].nonstall = ctrl->table[i].vectorNonStall; 927 gsp->intr_nr++; 928 } 929 930 nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); 931 return ret; 932 } 933 934 static int 935 r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp) 936 { 937 GspStaticConfigInfo *rpc; 938 int last_usable = -1; 939 940 rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc)); 941 if (IS_ERR(rpc)) 942 return PTR_ERR(rpc); 943 944 gsp->internal.client.object.client = &gsp->internal.client; 945 gsp->internal.client.object.parent = NULL; 946 gsp->internal.client.object.handle = rpc->hInternalClient; 947 gsp->internal.client.gsp = gsp; 948 949 gsp->internal.device.object.client = &gsp->internal.client; 950 gsp->internal.device.object.parent = &gsp->internal.client.object; 951 gsp->internal.device.object.handle = rpc->hInternalDevice; 952 953 gsp->internal.device.subdevice.client = &gsp->internal.client; 954 gsp->internal.device.subdevice.parent = &gsp->internal.device.object; 955 gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice; 956 957 gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase; 958 gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase; 959 960 for (int i = 0; i < rpc->fbRegionInfoParams.numFBRegions; i++) { 961 NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg = 962 &rpc->fbRegionInfoParams.fbRegion[i]; 963 964 nvkm_debug(&gsp->subdev, "fb region %d: " 965 "%016llx-%016llx rsvd:%016llx perf:%08x comp:%d iso:%d prot:%d\n", i, 966 reg->base, reg->limit, reg->reserved, reg->performance, 967 reg->supportCompressed, reg->supportISO, reg->bProtected); 968 969 if (!reg->reserved && !reg->bProtected) { 970 if (reg->supportCompressed && reg->supportISO && 971 !WARN_ON_ONCE(gsp->fb.region_nr >= ARRAY_SIZE(gsp->fb.region))) { 972 const u64 size = (reg->limit + 1) - reg->base; 973 974 gsp->fb.region[gsp->fb.region_nr].addr = reg->base; 975 gsp->fb.region[gsp->fb.region_nr].size = size; 976 gsp->fb.region_nr++; 977 } 978 979 last_usable = i; 980 } 981 } 982 983 if (last_usable >= 0) { 984 u32 rsvd_base = rpc->fbRegionInfoParams.fbRegion[last_usable].limit + 1; 985 986 gsp->fb.rsvd_size = gsp->fb.heap.addr - rsvd_base; 987 } 988 989 for (int gpc = 0; gpc < ARRAY_SIZE(rpc->tpcInfo); gpc++) { 990 if (rpc->gpcInfo.gpcMask & BIT(gpc)) { 991 gsp->gr.tpcs += hweight32(rpc->tpcInfo[gpc].tpcMask); 992 gsp->gr.gpcs++; 993 } 994 } 995 996 nvkm_gsp_rpc_done(gsp, rpc); 997 return 0; 998 } 999 1000 static void 1001 nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem) 1002 { 1003 if (mem->data) { 1004 /* 1005 * Poison the buffer to catch any unexpected access from 1006 * GSP-RM if the buffer was prematurely freed. 1007 */ 1008 memset(mem->data, 0xFF, mem->size); 1009 1010 dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr); 1011 memset(mem, 0, sizeof(*mem)); 1012 } 1013 } 1014 1015 static int 1016 nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem) 1017 { 1018 mem->size = size; 1019 mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL); 1020 if (WARN_ON(!mem->data)) 1021 return -ENOMEM; 1022 1023 return 0; 1024 } 1025 1026 static int 1027 r535_gsp_postinit(struct nvkm_gsp *gsp) 1028 { 1029 struct nvkm_device *device = gsp->subdev.device; 1030 int ret; 1031 1032 ret = r535_gsp_rpc_get_gsp_static_info(gsp); 1033 if (WARN_ON(ret)) 1034 return ret; 1035 1036 INIT_WORK(&gsp->msgq.work, r535_gsp_msgq_work); 1037 1038 ret = r535_gsp_intr_get_table(gsp); 1039 if (WARN_ON(ret)) 1040 return ret; 1041 1042 ret = nvkm_gsp_intr_stall(gsp, gsp->subdev.type, gsp->subdev.inst); 1043 if (WARN_ON(ret < 0)) 1044 return ret; 1045 1046 ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &gsp->subdev, 1047 r535_gsp_intr, &gsp->subdev.inth); 1048 if (WARN_ON(ret)) 1049 return ret; 1050 1051 nvkm_inth_allow(&gsp->subdev.inth); 1052 nvkm_wr32(device, 0x110004, 0x00000040); 1053 1054 /* Release the DMA buffers that were needed only for boot and init */ 1055 nvkm_gsp_mem_dtor(gsp, &gsp->boot.fw); 1056 nvkm_gsp_mem_dtor(gsp, &gsp->libos); 1057 nvkm_gsp_mem_dtor(gsp, &gsp->rmargs); 1058 nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta); 1059 1060 return ret; 1061 } 1062 1063 static int 1064 r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend) 1065 { 1066 rpc_unloading_guest_driver_v1F_07 *rpc; 1067 1068 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER, sizeof(*rpc)); 1069 if (IS_ERR(rpc)) 1070 return PTR_ERR(rpc); 1071 1072 if (suspend) { 1073 rpc->bInPMTransition = 1; 1074 rpc->bGc6Entering = 0; 1075 rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3; 1076 } else { 1077 rpc->bInPMTransition = 0; 1078 rpc->bGc6Entering = 0; 1079 rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0; 1080 } 1081 1082 return nvkm_gsp_rpc_wr(gsp, rpc, true); 1083 } 1084 1085 /* dword only */ 1086 struct nv_gsp_registry_entries { 1087 const char *name; 1088 u32 value; 1089 }; 1090 1091 static const struct nv_gsp_registry_entries r535_registry_entries[] = { 1092 { "RMSecBusResetEnable", 1 }, 1093 { "RMForcePcieConfigSave", 1 }, 1094 }; 1095 #define NV_GSP_REG_NUM_ENTRIES ARRAY_SIZE(r535_registry_entries) 1096 1097 static int 1098 r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp) 1099 { 1100 PACKED_REGISTRY_TABLE *rpc; 1101 char *strings; 1102 int str_offset; 1103 int i; 1104 size_t rpc_size = struct_size(rpc, entries, NV_GSP_REG_NUM_ENTRIES); 1105 1106 /* add strings + null terminator */ 1107 for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) 1108 rpc_size += strlen(r535_registry_entries[i].name) + 1; 1109 1110 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_SET_REGISTRY, rpc_size); 1111 if (IS_ERR(rpc)) 1112 return PTR_ERR(rpc); 1113 1114 rpc->numEntries = NV_GSP_REG_NUM_ENTRIES; 1115 1116 str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]); 1117 strings = (char *)&rpc->entries[NV_GSP_REG_NUM_ENTRIES]; 1118 for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) { 1119 int name_len = strlen(r535_registry_entries[i].name) + 1; 1120 1121 rpc->entries[i].nameOffset = str_offset; 1122 rpc->entries[i].type = 1; 1123 rpc->entries[i].data = r535_registry_entries[i].value; 1124 rpc->entries[i].length = 4; 1125 memcpy(strings, r535_registry_entries[i].name, name_len); 1126 strings += name_len; 1127 str_offset += name_len; 1128 } 1129 rpc->size = str_offset; 1130 1131 return nvkm_gsp_rpc_wr(gsp, rpc, false); 1132 } 1133 1134 #if defined(CONFIG_ACPI) && defined(CONFIG_X86) 1135 static void 1136 r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) 1137 { 1138 const guid_t NVOP_DSM_GUID = 1139 GUID_INIT(0xA486D8F8, 0x0BDA, 0x471B, 1140 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0); 1141 u64 NVOP_DSM_REV = 0x00000100; 1142 union acpi_object argv4 = { 1143 .buffer.type = ACPI_TYPE_BUFFER, 1144 .buffer.length = 4, 1145 .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), 1146 }, *obj; 1147 1148 caps->status = 0xffff; 1149 1150 if (!acpi_check_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, BIT_ULL(0x1a))) 1151 return; 1152 1153 obj = acpi_evaluate_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, 0x1a, &argv4); 1154 if (!obj) 1155 return; 1156 1157 if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || 1158 WARN_ON(obj->buffer.length != 4)) 1159 return; 1160 1161 caps->status = 0; 1162 caps->optimusCaps = *(u32 *)obj->buffer.pointer; 1163 1164 ACPI_FREE(obj); 1165 1166 kfree(argv4.buffer.pointer); 1167 } 1168 1169 static void 1170 r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt) 1171 { 1172 const guid_t JT_DSM_GUID = 1173 GUID_INIT(0xCBECA351L, 0x067B, 0x4924, 1174 0x9C, 0xBD, 0xB4, 0x6B, 0x00, 0xB8, 0x6F, 0x34); 1175 u64 JT_DSM_REV = 0x00000103; 1176 u32 caps; 1177 union acpi_object argv4 = { 1178 .buffer.type = ACPI_TYPE_BUFFER, 1179 .buffer.length = sizeof(caps), 1180 .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), 1181 }, *obj; 1182 1183 jt->status = 0xffff; 1184 1185 obj = acpi_evaluate_dsm(handle, &JT_DSM_GUID, JT_DSM_REV, 0x1, &argv4); 1186 if (!obj) 1187 return; 1188 1189 if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || 1190 WARN_ON(obj->buffer.length != 4)) 1191 return; 1192 1193 jt->status = 0; 1194 jt->jtCaps = *(u32 *)obj->buffer.pointer; 1195 jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20; 1196 jt->bSBIOSCaps = 0; 1197 1198 ACPI_FREE(obj); 1199 1200 kfree(argv4.buffer.pointer); 1201 } 1202 1203 static void 1204 r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode, 1205 MUX_METHOD_DATA_ELEMENT *part) 1206 { 1207 union acpi_object mux_arg = { ACPI_TYPE_INTEGER }; 1208 struct acpi_object_list input = { 1, &mux_arg }; 1209 acpi_handle iter = NULL, handle_mux = NULL; 1210 acpi_status status; 1211 unsigned long long value; 1212 1213 mode->status = 0xffff; 1214 part->status = 0xffff; 1215 1216 do { 1217 status = acpi_get_next_object(ACPI_TYPE_DEVICE, handle, iter, &iter); 1218 if (ACPI_FAILURE(status) || !iter) 1219 return; 1220 1221 status = acpi_evaluate_integer(iter, "_ADR", NULL, &value); 1222 if (ACPI_FAILURE(status) || value != id) 1223 continue; 1224 1225 handle_mux = iter; 1226 } while (!handle_mux); 1227 1228 if (!handle_mux) 1229 return; 1230 1231 /* I -think- 0 means "acquire" according to nvidia's driver source */ 1232 input.pointer->integer.type = ACPI_TYPE_INTEGER; 1233 input.pointer->integer.value = 0; 1234 1235 status = acpi_evaluate_integer(handle_mux, "MXDM", &input, &value); 1236 if (ACPI_SUCCESS(status)) { 1237 mode->acpiId = id; 1238 mode->mode = value; 1239 mode->status = 0; 1240 } 1241 1242 status = acpi_evaluate_integer(handle_mux, "MXDS", &input, &value); 1243 if (ACPI_SUCCESS(status)) { 1244 part->acpiId = id; 1245 part->mode = value; 1246 part->status = 0; 1247 } 1248 } 1249 1250 static void 1251 r535_gsp_acpi_mux(acpi_handle handle, DOD_METHOD_DATA *dod, MUX_METHOD_DATA *mux) 1252 { 1253 mux->tableLen = dod->acpiIdListLen / sizeof(dod->acpiIdList[0]); 1254 1255 for (int i = 0; i < mux->tableLen; i++) { 1256 r535_gsp_acpi_mux_id(handle, dod->acpiIdList[i], &mux->acpiIdMuxModeTable[i], 1257 &mux->acpiIdMuxPartTable[i]); 1258 } 1259 } 1260 1261 static void 1262 r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod) 1263 { 1264 acpi_status status; 1265 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; 1266 union acpi_object *_DOD; 1267 1268 dod->status = 0xffff; 1269 1270 status = acpi_evaluate_object(handle, "_DOD", NULL, &output); 1271 if (ACPI_FAILURE(status)) 1272 return; 1273 1274 _DOD = output.pointer; 1275 1276 if (WARN_ON(_DOD->type != ACPI_TYPE_PACKAGE) || 1277 WARN_ON(_DOD->package.count > ARRAY_SIZE(dod->acpiIdList))) 1278 return; 1279 1280 for (int i = 0; i < _DOD->package.count; i++) { 1281 if (WARN_ON(_DOD->package.elements[i].type != ACPI_TYPE_INTEGER)) 1282 return; 1283 1284 dod->acpiIdList[i] = _DOD->package.elements[i].integer.value; 1285 dod->acpiIdListLen += sizeof(dod->acpiIdList[0]); 1286 } 1287 1288 dod->status = 0; 1289 kfree(output.pointer); 1290 } 1291 #endif 1292 1293 static void 1294 r535_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi) 1295 { 1296 #if defined(CONFIG_ACPI) && defined(CONFIG_X86) 1297 acpi_handle handle = ACPI_HANDLE(gsp->subdev.device->dev); 1298 1299 if (!handle) 1300 return; 1301 1302 acpi->bValid = 1; 1303 1304 r535_gsp_acpi_dod(handle, &acpi->dodMethodData); 1305 if (acpi->dodMethodData.status == 0) 1306 r535_gsp_acpi_mux(handle, &acpi->dodMethodData, &acpi->muxMethodData); 1307 1308 r535_gsp_acpi_jt(handle, &acpi->jtMethodData); 1309 r535_gsp_acpi_caps(handle, &acpi->capsMethodData); 1310 #endif 1311 } 1312 1313 static int 1314 r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp) 1315 { 1316 struct nvkm_device *device = gsp->subdev.device; 1317 struct nvkm_device_pci *pdev = container_of(device, typeof(*pdev), device); 1318 GspSystemInfo *info; 1319 1320 if (WARN_ON(device->type == NVKM_DEVICE_TEGRA)) 1321 return -ENOSYS; 1322 1323 info = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, sizeof(*info)); 1324 if (IS_ERR(info)) 1325 return PTR_ERR(info); 1326 1327 info->gpuPhysAddr = device->func->resource_addr(device, 0); 1328 info->gpuPhysFbAddr = device->func->resource_addr(device, 1); 1329 info->gpuPhysInstAddr = device->func->resource_addr(device, 3); 1330 info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev); 1331 info->maxUserVa = TASK_SIZE; 1332 info->pciConfigMirrorBase = 0x088000; 1333 info->pciConfigMirrorSize = 0x001000; 1334 r535_gsp_acpi_info(gsp, &info->acpiMethodData); 1335 1336 return nvkm_gsp_rpc_wr(gsp, info, false); 1337 } 1338 1339 static int 1340 r535_gsp_msg_os_error_log(void *priv, u32 fn, void *repv, u32 repc) 1341 { 1342 struct nvkm_gsp *gsp = priv; 1343 struct nvkm_subdev *subdev = &gsp->subdev; 1344 rpc_os_error_log_v17_00 *msg = repv; 1345 1346 if (WARN_ON(repc < sizeof(*msg))) 1347 return -EINVAL; 1348 1349 nvkm_error(subdev, "Xid:%d %s\n", msg->exceptType, msg->errString); 1350 return 0; 1351 } 1352 1353 static int 1354 r535_gsp_msg_rc_triggered(void *priv, u32 fn, void *repv, u32 repc) 1355 { 1356 rpc_rc_triggered_v17_02 *msg = repv; 1357 struct nvkm_gsp *gsp = priv; 1358 struct nvkm_subdev *subdev = &gsp->subdev; 1359 struct nvkm_chan *chan; 1360 unsigned long flags; 1361 1362 if (WARN_ON(repc < sizeof(*msg))) 1363 return -EINVAL; 1364 1365 nvkm_error(subdev, "rc engn:%08x chid:%d type:%d scope:%d part:%d\n", 1366 msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope, 1367 msg->partitionAttributionId); 1368 1369 chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid / 8, &flags); 1370 if (!chan) { 1371 nvkm_error(subdev, "rc chid:%d not found!\n", msg->chid); 1372 return 0; 1373 } 1374 1375 nvkm_chan_error(chan, false); 1376 nvkm_chan_put(&chan, flags); 1377 return 0; 1378 } 1379 1380 static int 1381 r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc) 1382 { 1383 struct nvkm_gsp *gsp = priv; 1384 struct nvkm_subdev *subdev = &gsp->subdev; 1385 1386 WARN_ON(repc != 0); 1387 1388 nvkm_error(subdev, "mmu fault queued\n"); 1389 return 0; 1390 } 1391 1392 static int 1393 r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc) 1394 { 1395 struct nvkm_gsp *gsp = priv; 1396 struct nvkm_gsp_client *client; 1397 struct nvkm_subdev *subdev = &gsp->subdev; 1398 rpc_post_event_v17_00 *msg = repv; 1399 1400 if (WARN_ON(repc < sizeof(*msg))) 1401 return -EINVAL; 1402 if (WARN_ON(repc != sizeof(*msg) + msg->eventDataSize)) 1403 return -EINVAL; 1404 1405 nvkm_debug(subdev, "event: %08x %08x %d %08x %08x %d %d\n", 1406 msg->hClient, msg->hEvent, msg->notifyIndex, msg->data, 1407 msg->status, msg->eventDataSize, msg->bNotifyList); 1408 1409 mutex_lock(&gsp->client_id.mutex); 1410 client = idr_find(&gsp->client_id.idr, msg->hClient & 0xffff); 1411 if (client) { 1412 struct nvkm_gsp_event *event; 1413 bool handled = false; 1414 1415 list_for_each_entry(event, &client->events, head) { 1416 if (event->object.handle == msg->hEvent) { 1417 event->func(event, msg->eventData, msg->eventDataSize); 1418 handled = true; 1419 } 1420 } 1421 1422 if (!handled) { 1423 nvkm_error(subdev, "event: cid 0x%08x event 0x%08x not found!\n", 1424 msg->hClient, msg->hEvent); 1425 } 1426 } else { 1427 nvkm_error(subdev, "event: cid 0x%08x not found!\n", msg->hClient); 1428 } 1429 mutex_unlock(&gsp->client_id.mutex); 1430 return 0; 1431 } 1432 1433 /** 1434 * r535_gsp_msg_run_cpu_sequencer() -- process I/O commands from the GSP 1435 * 1436 * The GSP sequencer is a list of I/O commands that the GSP can send to 1437 * the driver to perform for various purposes. The most common usage is to 1438 * perform a special mid-initialization reset. 1439 */ 1440 static int 1441 r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc) 1442 { 1443 struct nvkm_gsp *gsp = priv; 1444 struct nvkm_subdev *subdev = &gsp->subdev; 1445 struct nvkm_device *device = subdev->device; 1446 rpc_run_cpu_sequencer_v17_00 *seq = repv; 1447 int ptr = 0, ret; 1448 1449 nvkm_debug(subdev, "seq: %08x %08x\n", seq->bufferSizeDWord, seq->cmdIndex); 1450 1451 while (ptr < seq->cmdIndex) { 1452 GSP_SEQUENCER_BUFFER_CMD *cmd = (void *)&seq->commandBuffer[ptr]; 1453 1454 ptr += 1; 1455 ptr += GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(cmd->opCode); 1456 1457 switch (cmd->opCode) { 1458 case GSP_SEQ_BUF_OPCODE_REG_WRITE: { 1459 u32 addr = cmd->payload.regWrite.addr; 1460 u32 data = cmd->payload.regWrite.val; 1461 1462 nvkm_trace(subdev, "seq wr32 %06x %08x\n", addr, data); 1463 nvkm_wr32(device, addr, data); 1464 } 1465 break; 1466 case GSP_SEQ_BUF_OPCODE_REG_MODIFY: { 1467 u32 addr = cmd->payload.regModify.addr; 1468 u32 mask = cmd->payload.regModify.mask; 1469 u32 data = cmd->payload.regModify.val; 1470 1471 nvkm_trace(subdev, "seq mask %06x %08x %08x\n", addr, mask, data); 1472 nvkm_mask(device, addr, mask, data); 1473 } 1474 break; 1475 case GSP_SEQ_BUF_OPCODE_REG_POLL: { 1476 u32 addr = cmd->payload.regPoll.addr; 1477 u32 mask = cmd->payload.regPoll.mask; 1478 u32 data = cmd->payload.regPoll.val; 1479 u32 usec = cmd->payload.regPoll.timeout ?: 4000000; 1480 //u32 error = cmd->payload.regPoll.error; 1481 1482 nvkm_trace(subdev, "seq poll %06x %08x %08x %d\n", addr, mask, data, usec); 1483 nvkm_rd32(device, addr); 1484 nvkm_usec(device, usec, 1485 if ((nvkm_rd32(device, addr) & mask) == data) 1486 break; 1487 ); 1488 } 1489 break; 1490 case GSP_SEQ_BUF_OPCODE_DELAY_US: { 1491 u32 usec = cmd->payload.delayUs.val; 1492 1493 nvkm_trace(subdev, "seq usec %d\n", usec); 1494 udelay(usec); 1495 } 1496 break; 1497 case GSP_SEQ_BUF_OPCODE_REG_STORE: { 1498 u32 addr = cmd->payload.regStore.addr; 1499 u32 slot = cmd->payload.regStore.index; 1500 1501 seq->regSaveArea[slot] = nvkm_rd32(device, addr); 1502 nvkm_trace(subdev, "seq save %08x -> %d: %08x\n", addr, slot, 1503 seq->regSaveArea[slot]); 1504 } 1505 break; 1506 case GSP_SEQ_BUF_OPCODE_CORE_RESET: 1507 nvkm_trace(subdev, "seq core reset\n"); 1508 nvkm_falcon_reset(&gsp->falcon); 1509 nvkm_falcon_mask(&gsp->falcon, 0x624, 0x00000080, 0x00000080); 1510 nvkm_falcon_wr32(&gsp->falcon, 0x10c, 0x00000000); 1511 break; 1512 case GSP_SEQ_BUF_OPCODE_CORE_START: 1513 nvkm_trace(subdev, "seq core start\n"); 1514 if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000040) 1515 nvkm_falcon_wr32(&gsp->falcon, 0x130, 0x00000002); 1516 else 1517 nvkm_falcon_wr32(&gsp->falcon, 0x100, 0x00000002); 1518 break; 1519 case GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT: 1520 nvkm_trace(subdev, "seq core wait halt\n"); 1521 nvkm_msec(device, 2000, 1522 if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000010) 1523 break; 1524 ); 1525 break; 1526 case GSP_SEQ_BUF_OPCODE_CORE_RESUME: { 1527 struct nvkm_sec2 *sec2 = device->sec2; 1528 u32 mbox0; 1529 1530 nvkm_trace(subdev, "seq core resume\n"); 1531 1532 ret = gsp->func->reset(gsp); 1533 if (WARN_ON(ret)) 1534 return ret; 1535 1536 nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr)); 1537 nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr)); 1538 1539 nvkm_falcon_start(&sec2->falcon); 1540 1541 if (nvkm_msec(device, 2000, 1542 if (nvkm_rd32(device, 0x1180f8) & 0x04000000) 1543 break; 1544 ) < 0) 1545 return -ETIMEDOUT; 1546 1547 mbox0 = nvkm_falcon_rd32(&sec2->falcon, 0x040); 1548 if (WARN_ON(mbox0)) { 1549 nvkm_error(&gsp->subdev, "seq core resume sec2: 0x%x\n", mbox0); 1550 return -EIO; 1551 } 1552 1553 nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version); 1554 1555 if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon))) 1556 return -EIO; 1557 } 1558 break; 1559 default: 1560 nvkm_error(subdev, "unknown sequencer opcode %08x\n", cmd->opCode); 1561 return -EINVAL; 1562 } 1563 } 1564 1565 return 0; 1566 } 1567 1568 static int 1569 r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) 1570 { 1571 struct nvkm_subdev *subdev = &gsp->subdev; 1572 struct nvkm_device *device = subdev->device; 1573 u32 wpr2_hi; 1574 int ret; 1575 1576 wpr2_hi = nvkm_rd32(device, 0x1fa828); 1577 if (!wpr2_hi) { 1578 nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n"); 1579 return 0; 1580 } 1581 1582 ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0); 1583 if (WARN_ON(ret)) 1584 return ret; 1585 1586 wpr2_hi = nvkm_rd32(device, 0x1fa828); 1587 if (WARN_ON(wpr2_hi)) 1588 return -EIO; 1589 1590 return 0; 1591 } 1592 1593 static int 1594 r535_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) 1595 { 1596 int ret; 1597 1598 ret = nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0); 1599 if (ret) 1600 return ret; 1601 1602 nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version); 1603 1604 if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon))) 1605 return -EIO; 1606 1607 return 0; 1608 } 1609 1610 static int 1611 r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp) 1612 { 1613 GspFwWprMeta *meta; 1614 int ret; 1615 1616 ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->wpr_meta); 1617 if (ret) 1618 return ret; 1619 1620 meta = gsp->wpr_meta.data; 1621 1622 meta->magic = GSP_FW_WPR_META_MAGIC; 1623 meta->revision = GSP_FW_WPR_META_REVISION; 1624 1625 meta->sysmemAddrOfRadix3Elf = gsp->radix3.mem[0].addr; 1626 meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size; 1627 1628 meta->sysmemAddrOfBootloader = gsp->boot.fw.addr; 1629 meta->sizeOfBootloader = gsp->boot.fw.size; 1630 meta->bootloaderCodeOffset = gsp->boot.code_offset; 1631 meta->bootloaderDataOffset = gsp->boot.data_offset; 1632 meta->bootloaderManifestOffset = gsp->boot.manifest_offset; 1633 1634 meta->sysmemAddrOfSignature = gsp->sig.addr; 1635 meta->sizeOfSignature = gsp->sig.size; 1636 1637 meta->gspFwRsvdStart = gsp->fb.heap.addr; 1638 meta->nonWprHeapOffset = gsp->fb.heap.addr; 1639 meta->nonWprHeapSize = gsp->fb.heap.size; 1640 meta->gspFwWprStart = gsp->fb.wpr2.addr; 1641 meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr; 1642 meta->gspFwHeapSize = gsp->fb.wpr2.heap.size; 1643 meta->gspFwOffset = gsp->fb.wpr2.elf.addr; 1644 meta->bootBinOffset = gsp->fb.wpr2.boot.addr; 1645 meta->frtsOffset = gsp->fb.wpr2.frts.addr; 1646 meta->frtsSize = gsp->fb.wpr2.frts.size; 1647 meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000); 1648 meta->fbSize = gsp->fb.size; 1649 meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr; 1650 meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size; 1651 meta->bootCount = 0; 1652 meta->partitionRpcAddr = 0; 1653 meta->partitionRpcRequestOffset = 0; 1654 meta->partitionRpcReplyOffset = 0; 1655 meta->verified = 0; 1656 return 0; 1657 } 1658 1659 static int 1660 r535_gsp_shared_init(struct nvkm_gsp *gsp) 1661 { 1662 struct { 1663 msgqTxHeader tx; 1664 msgqRxHeader rx; 1665 } *cmdq, *msgq; 1666 int ret, i; 1667 1668 gsp->shm.cmdq.size = 0x40000; 1669 gsp->shm.msgq.size = 0x40000; 1670 1671 gsp->shm.ptes.nr = (gsp->shm.cmdq.size + gsp->shm.msgq.size) >> GSP_PAGE_SHIFT; 1672 gsp->shm.ptes.nr += DIV_ROUND_UP(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE); 1673 gsp->shm.ptes.size = ALIGN(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE); 1674 1675 ret = nvkm_gsp_mem_ctor(gsp, gsp->shm.ptes.size + 1676 gsp->shm.cmdq.size + 1677 gsp->shm.msgq.size, 1678 &gsp->shm.mem); 1679 if (ret) 1680 return ret; 1681 1682 gsp->shm.ptes.ptr = gsp->shm.mem.data; 1683 gsp->shm.cmdq.ptr = (u8 *)gsp->shm.ptes.ptr + gsp->shm.ptes.size; 1684 gsp->shm.msgq.ptr = (u8 *)gsp->shm.cmdq.ptr + gsp->shm.cmdq.size; 1685 1686 for (i = 0; i < gsp->shm.ptes.nr; i++) 1687 gsp->shm.ptes.ptr[i] = gsp->shm.mem.addr + (i << GSP_PAGE_SHIFT); 1688 1689 cmdq = gsp->shm.cmdq.ptr; 1690 cmdq->tx.version = 0; 1691 cmdq->tx.size = gsp->shm.cmdq.size; 1692 cmdq->tx.entryOff = GSP_PAGE_SIZE; 1693 cmdq->tx.msgSize = GSP_PAGE_SIZE; 1694 cmdq->tx.msgCount = (cmdq->tx.size - cmdq->tx.entryOff) / cmdq->tx.msgSize; 1695 cmdq->tx.writePtr = 0; 1696 cmdq->tx.flags = 1; 1697 cmdq->tx.rxHdrOff = offsetof(typeof(*cmdq), rx.readPtr); 1698 1699 msgq = gsp->shm.msgq.ptr; 1700 1701 gsp->cmdq.cnt = cmdq->tx.msgCount; 1702 gsp->cmdq.wptr = &cmdq->tx.writePtr; 1703 gsp->cmdq.rptr = &msgq->rx.readPtr; 1704 gsp->msgq.cnt = cmdq->tx.msgCount; 1705 gsp->msgq.wptr = &msgq->tx.writePtr; 1706 gsp->msgq.rptr = &cmdq->rx.readPtr; 1707 return 0; 1708 } 1709 1710 static int 1711 r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume) 1712 { 1713 GSP_ARGUMENTS_CACHED *args; 1714 int ret; 1715 1716 if (!resume) { 1717 ret = r535_gsp_shared_init(gsp); 1718 if (ret) 1719 return ret; 1720 1721 ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs); 1722 if (ret) 1723 return ret; 1724 } 1725 1726 args = gsp->rmargs.data; 1727 args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr; 1728 args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr; 1729 args->messageQueueInitArguments.cmdQueueOffset = 1730 (u8 *)gsp->shm.cmdq.ptr - (u8 *)gsp->shm.mem.data; 1731 args->messageQueueInitArguments.statQueueOffset = 1732 (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data; 1733 1734 if (!resume) { 1735 args->srInitArguments.oldLevel = 0; 1736 args->srInitArguments.flags = 0; 1737 args->srInitArguments.bInPMTransition = 0; 1738 } else { 1739 args->srInitArguments.oldLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3; 1740 args->srInitArguments.flags = 0; 1741 args->srInitArguments.bInPMTransition = 1; 1742 } 1743 1744 return 0; 1745 } 1746 1747 static inline u64 1748 r535_gsp_libos_id8(const char *name) 1749 { 1750 u64 id = 0; 1751 1752 for (int i = 0; i < sizeof(id) && *name; i++, name++) 1753 id = (id << 8) | *name; 1754 1755 return id; 1756 } 1757 1758 /** 1759 * create_pte_array() - creates a PTE array of a physically contiguous buffer 1760 * @ptes: pointer to the array 1761 * @addr: base address of physically contiguous buffer (GSP_PAGE_SIZE aligned) 1762 * @size: size of the buffer 1763 * 1764 * GSP-RM sometimes expects physically-contiguous buffers to have an array of 1765 * "PTEs" for each page in that buffer. Although in theory that allows for 1766 * the buffer to be physically discontiguous, GSP-RM does not currently 1767 * support that. 1768 * 1769 * In this case, the PTEs are DMA addresses of each page of the buffer. Since 1770 * the buffer is physically contiguous, calculating all the PTEs is simple 1771 * math. 1772 * 1773 * See memdescGetPhysAddrsForGpu() 1774 */ 1775 static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size) 1776 { 1777 unsigned int num_pages = DIV_ROUND_UP_ULL(size, GSP_PAGE_SIZE); 1778 unsigned int i; 1779 1780 for (i = 0; i < num_pages; i++) 1781 ptes[i] = (u64)addr + (i << GSP_PAGE_SHIFT); 1782 } 1783 1784 /** 1785 * r535_gsp_libos_init() -- create the libos arguments structure 1786 * 1787 * The logging buffers are byte queues that contain encoded printf-like 1788 * messages from GSP-RM. They need to be decoded by a special application 1789 * that can parse the buffers. 1790 * 1791 * The 'loginit' buffer contains logs from early GSP-RM init and 1792 * exception dumps. The 'logrm' buffer contains the subsequent logs. Both are 1793 * written to directly by GSP-RM and can be any multiple of GSP_PAGE_SIZE. 1794 * 1795 * The physical address map for the log buffer is stored in the buffer 1796 * itself, starting with offset 1. Offset 0 contains the "put" pointer. 1797 * 1798 * The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is 1799 * configured for a larger page size (e.g. 64K pages), we need to give 1800 * the GSP an array of 4K pages. Fortunately, since the buffer is 1801 * physically contiguous, it's simple math to calculate the addresses. 1802 * 1803 * The buffers must be a multiple of GSP_PAGE_SIZE. GSP-RM also currently 1804 * ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the 1805 * buffers to be physically contiguous anyway. 1806 * 1807 * The memory allocated for the arguments must remain until the GSP sends the 1808 * init_done RPC. 1809 * 1810 * See _kgspInitLibosLoggingStructures (allocates memory for buffers) 1811 * See kgspSetupLibosInitArgs_IMPL (creates pLibosInitArgs[] array) 1812 */ 1813 static int 1814 r535_gsp_libos_init(struct nvkm_gsp *gsp) 1815 { 1816 LibosMemoryRegionInitArgument *args; 1817 int ret; 1818 1819 ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->libos); 1820 if (ret) 1821 return ret; 1822 1823 args = gsp->libos.data; 1824 1825 ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->loginit); 1826 if (ret) 1827 return ret; 1828 1829 args[0].id8 = r535_gsp_libos_id8("LOGINIT"); 1830 args[0].pa = gsp->loginit.addr; 1831 args[0].size = gsp->loginit.size; 1832 args[0].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; 1833 args[0].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; 1834 create_pte_array(gsp->loginit.data + sizeof(u64), gsp->loginit.addr, gsp->loginit.size); 1835 1836 ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logintr); 1837 if (ret) 1838 return ret; 1839 1840 args[1].id8 = r535_gsp_libos_id8("LOGINTR"); 1841 args[1].pa = gsp->logintr.addr; 1842 args[1].size = gsp->logintr.size; 1843 args[1].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; 1844 args[1].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; 1845 create_pte_array(gsp->logintr.data + sizeof(u64), gsp->logintr.addr, gsp->logintr.size); 1846 1847 ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logrm); 1848 if (ret) 1849 return ret; 1850 1851 args[2].id8 = r535_gsp_libos_id8("LOGRM"); 1852 args[2].pa = gsp->logrm.addr; 1853 args[2].size = gsp->logrm.size; 1854 args[2].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; 1855 args[2].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; 1856 create_pte_array(gsp->logrm.data + sizeof(u64), gsp->logrm.addr, gsp->logrm.size); 1857 1858 ret = r535_gsp_rmargs_init(gsp, false); 1859 if (ret) 1860 return ret; 1861 1862 args[3].id8 = r535_gsp_libos_id8("RMARGS"); 1863 args[3].pa = gsp->rmargs.addr; 1864 args[3].size = gsp->rmargs.size; 1865 args[3].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; 1866 args[3].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; 1867 return 0; 1868 } 1869 1870 void 1871 nvkm_gsp_sg_free(struct nvkm_device *device, struct sg_table *sgt) 1872 { 1873 struct scatterlist *sgl; 1874 int i; 1875 1876 dma_unmap_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0); 1877 1878 for_each_sgtable_sg(sgt, sgl, i) { 1879 struct page *page = sg_page(sgl); 1880 1881 __free_page(page); 1882 } 1883 1884 sg_free_table(sgt); 1885 } 1886 1887 int 1888 nvkm_gsp_sg(struct nvkm_device *device, u64 size, struct sg_table *sgt) 1889 { 1890 const u64 pages = DIV_ROUND_UP(size, PAGE_SIZE); 1891 struct scatterlist *sgl; 1892 int ret, i; 1893 1894 ret = sg_alloc_table(sgt, pages, GFP_KERNEL); 1895 if (ret) 1896 return ret; 1897 1898 for_each_sgtable_sg(sgt, sgl, i) { 1899 struct page *page = alloc_page(GFP_KERNEL); 1900 1901 if (!page) { 1902 nvkm_gsp_sg_free(device, sgt); 1903 return -ENOMEM; 1904 } 1905 1906 sg_set_page(sgl, page, PAGE_SIZE, 0); 1907 } 1908 1909 ret = dma_map_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0); 1910 if (ret) 1911 nvkm_gsp_sg_free(device, sgt); 1912 1913 return ret; 1914 } 1915 1916 static void 1917 nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3) 1918 { 1919 for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) 1920 nvkm_gsp_mem_dtor(gsp, &rx3->mem[i]); 1921 } 1922 1923 /** 1924 * nvkm_gsp_radix3_sg - build a radix3 table from a S/G list 1925 * 1926 * The GSP uses a three-level page table, called radix3, to map the firmware. 1927 * Each 64-bit "pointer" in the table is either the bus address of an entry in 1928 * the next table (for levels 0 and 1) or the bus address of the next page in 1929 * the GSP firmware image itself. 1930 * 1931 * Level 0 contains a single entry in one page that points to the first page 1932 * of level 1. 1933 * 1934 * Level 1, since it's also only one page in size, contains up to 512 entries, 1935 * one for each page in Level 2. 1936 * 1937 * Level 2 can be up to 512 pages in size, and each of those entries points to 1938 * the next page of the firmware image. Since there can be up to 512*512 1939 * pages, that limits the size of the firmware to 512*512*GSP_PAGE_SIZE = 1GB. 1940 * 1941 * Internally, the GSP has its window into system memory, but the base 1942 * physical address of the aperture is not 0. In fact, it varies depending on 1943 * the GPU architecture. Since the GPU is a PCI device, this window is 1944 * accessed via DMA and is therefore bound by IOMMU translation. The end 1945 * result is that GSP-RM must translate the bus addresses in the table to GSP 1946 * physical addresses. All this should happen transparently. 1947 * 1948 * Returns 0 on success, or negative error code 1949 * 1950 * See kgspCreateRadix3_IMPL 1951 */ 1952 static int 1953 nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size, 1954 struct nvkm_gsp_radix3 *rx3) 1955 { 1956 u64 addr; 1957 1958 for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) { 1959 u64 *ptes; 1960 size_t bufsize; 1961 int ret, idx; 1962 1963 bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE); 1964 ret = nvkm_gsp_mem_ctor(gsp, bufsize, &rx3->mem[i]); 1965 if (ret) 1966 return ret; 1967 1968 ptes = rx3->mem[i].data; 1969 if (i == 2) { 1970 struct scatterlist *sgl; 1971 1972 for_each_sgtable_dma_sg(sgt, sgl, idx) { 1973 for (int j = 0; j < sg_dma_len(sgl) / GSP_PAGE_SIZE; j++) 1974 *ptes++ = sg_dma_address(sgl) + (GSP_PAGE_SIZE * j); 1975 } 1976 } else { 1977 for (int j = 0; j < size / GSP_PAGE_SIZE; j++) 1978 *ptes++ = addr + GSP_PAGE_SIZE * j; 1979 } 1980 1981 size = rx3->mem[i].size; 1982 addr = rx3->mem[i].addr; 1983 } 1984 1985 return 0; 1986 } 1987 1988 int 1989 r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) 1990 { 1991 u32 mbox0 = 0xff, mbox1 = 0xff; 1992 int ret; 1993 1994 if (!gsp->running) 1995 return 0; 1996 1997 if (suspend) { 1998 GspFwWprMeta *meta = gsp->wpr_meta.data; 1999 u64 len = meta->gspFwWprEnd - meta->gspFwWprStart; 2000 GspFwSRMeta *sr; 2001 2002 ret = nvkm_gsp_sg(gsp->subdev.device, len, &gsp->sr.sgt); 2003 if (ret) 2004 return ret; 2005 2006 ret = nvkm_gsp_radix3_sg(gsp, &gsp->sr.sgt, len, &gsp->sr.radix3); 2007 if (ret) 2008 return ret; 2009 2010 ret = nvkm_gsp_mem_ctor(gsp, sizeof(*sr), &gsp->sr.meta); 2011 if (ret) 2012 return ret; 2013 2014 sr = gsp->sr.meta.data; 2015 sr->magic = GSP_FW_SR_META_MAGIC; 2016 sr->revision = GSP_FW_SR_META_REVISION; 2017 sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.mem[0].addr; 2018 sr->sizeOfSuspendResumeData = len; 2019 2020 mbox0 = lower_32_bits(gsp->sr.meta.addr); 2021 mbox1 = upper_32_bits(gsp->sr.meta.addr); 2022 } 2023 2024 ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend); 2025 if (WARN_ON(ret)) 2026 return ret; 2027 2028 nvkm_msec(gsp->subdev.device, 2000, 2029 if (nvkm_falcon_rd32(&gsp->falcon, 0x040) & 0x80000000) 2030 break; 2031 ); 2032 2033 nvkm_falcon_reset(&gsp->falcon); 2034 2035 ret = nvkm_gsp_fwsec_sb(gsp); 2036 WARN_ON(ret); 2037 2038 ret = r535_gsp_booter_unload(gsp, mbox0, mbox1); 2039 WARN_ON(ret); 2040 2041 gsp->running = false; 2042 return 0; 2043 } 2044 2045 int 2046 r535_gsp_init(struct nvkm_gsp *gsp) 2047 { 2048 u32 mbox0, mbox1; 2049 int ret; 2050 2051 if (!gsp->sr.meta.data) { 2052 mbox0 = lower_32_bits(gsp->wpr_meta.addr); 2053 mbox1 = upper_32_bits(gsp->wpr_meta.addr); 2054 } else { 2055 r535_gsp_rmargs_init(gsp, true); 2056 2057 mbox0 = lower_32_bits(gsp->sr.meta.addr); 2058 mbox1 = upper_32_bits(gsp->sr.meta.addr); 2059 } 2060 2061 /* Execute booter to handle (eventually...) booting GSP-RM. */ 2062 ret = r535_gsp_booter_load(gsp, mbox0, mbox1); 2063 if (WARN_ON(ret)) 2064 goto done; 2065 2066 ret = r535_gsp_rpc_poll(gsp, NV_VGPU_MSG_EVENT_GSP_INIT_DONE); 2067 if (ret) 2068 goto done; 2069 2070 gsp->running = true; 2071 2072 done: 2073 if (gsp->sr.meta.data) { 2074 nvkm_gsp_mem_dtor(gsp, &gsp->sr.meta); 2075 nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3); 2076 nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt); 2077 return ret; 2078 } 2079 2080 if (ret == 0) 2081 ret = r535_gsp_postinit(gsp); 2082 2083 return ret; 2084 } 2085 2086 static int 2087 r535_gsp_rm_boot_ctor(struct nvkm_gsp *gsp) 2088 { 2089 const struct firmware *fw = gsp->fws.bl; 2090 const struct nvfw_bin_hdr *hdr; 2091 RM_RISCV_UCODE_DESC *desc; 2092 int ret; 2093 2094 hdr = nvfw_bin_hdr(&gsp->subdev, fw->data); 2095 desc = (void *)fw->data + hdr->header_offset; 2096 2097 ret = nvkm_gsp_mem_ctor(gsp, hdr->data_size, &gsp->boot.fw); 2098 if (ret) 2099 return ret; 2100 2101 memcpy(gsp->boot.fw.data, fw->data + hdr->data_offset, hdr->data_size); 2102 2103 gsp->boot.code_offset = desc->monitorCodeOffset; 2104 gsp->boot.data_offset = desc->monitorDataOffset; 2105 gsp->boot.manifest_offset = desc->manifestOffset; 2106 gsp->boot.app_version = desc->appVersion; 2107 return 0; 2108 } 2109 2110 static const struct nvkm_firmware_func 2111 r535_gsp_fw = { 2112 .type = NVKM_FIRMWARE_IMG_SGT, 2113 }; 2114 2115 static int 2116 r535_gsp_elf_section(struct nvkm_gsp *gsp, const char *name, const u8 **pdata, u64 *psize) 2117 { 2118 const u8 *img = gsp->fws.rm->data; 2119 const struct elf64_hdr *ehdr = (const struct elf64_hdr *)img; 2120 const struct elf64_shdr *shdr = (const struct elf64_shdr *)&img[ehdr->e_shoff]; 2121 const char *names = &img[shdr[ehdr->e_shstrndx].sh_offset]; 2122 2123 for (int i = 0; i < ehdr->e_shnum; i++, shdr++) { 2124 if (!strcmp(&names[shdr->sh_name], name)) { 2125 *pdata = &img[shdr->sh_offset]; 2126 *psize = shdr->sh_size; 2127 return 0; 2128 } 2129 } 2130 2131 nvkm_error(&gsp->subdev, "section '%s' not found\n", name); 2132 return -ENOENT; 2133 } 2134 2135 static void 2136 r535_gsp_dtor_fws(struct nvkm_gsp *gsp) 2137 { 2138 nvkm_firmware_put(gsp->fws.bl); 2139 gsp->fws.bl = NULL; 2140 nvkm_firmware_put(gsp->fws.booter.unload); 2141 gsp->fws.booter.unload = NULL; 2142 nvkm_firmware_put(gsp->fws.booter.load); 2143 gsp->fws.booter.load = NULL; 2144 nvkm_firmware_put(gsp->fws.rm); 2145 gsp->fws.rm = NULL; 2146 } 2147 2148 void 2149 r535_gsp_dtor(struct nvkm_gsp *gsp) 2150 { 2151 idr_destroy(&gsp->client_id.idr); 2152 mutex_destroy(&gsp->client_id.mutex); 2153 2154 nvkm_gsp_radix3_dtor(gsp, &gsp->radix3); 2155 nvkm_gsp_mem_dtor(gsp, &gsp->sig); 2156 nvkm_firmware_dtor(&gsp->fw); 2157 2158 nvkm_falcon_fw_dtor(&gsp->booter.unload); 2159 nvkm_falcon_fw_dtor(&gsp->booter.load); 2160 2161 mutex_destroy(&gsp->msgq.mutex); 2162 mutex_destroy(&gsp->cmdq.mutex); 2163 2164 r535_gsp_dtor_fws(gsp); 2165 2166 nvkm_gsp_mem_dtor(gsp, &gsp->shm.mem); 2167 nvkm_gsp_mem_dtor(gsp, &gsp->loginit); 2168 nvkm_gsp_mem_dtor(gsp, &gsp->logintr); 2169 nvkm_gsp_mem_dtor(gsp, &gsp->logrm); 2170 } 2171 2172 int 2173 r535_gsp_oneinit(struct nvkm_gsp *gsp) 2174 { 2175 struct nvkm_device *device = gsp->subdev.device; 2176 const u8 *data; 2177 u64 size; 2178 int ret; 2179 2180 mutex_init(&gsp->cmdq.mutex); 2181 mutex_init(&gsp->msgq.mutex); 2182 2183 ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load, 2184 &device->sec2->falcon, &gsp->booter.load); 2185 if (ret) 2186 return ret; 2187 2188 ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload, 2189 &device->sec2->falcon, &gsp->booter.unload); 2190 if (ret) 2191 return ret; 2192 2193 /* Load GSP firmware from ELF image into DMA-accessible memory. */ 2194 ret = r535_gsp_elf_section(gsp, ".fwimage", &data, &size); 2195 if (ret) 2196 return ret; 2197 2198 ret = nvkm_firmware_ctor(&r535_gsp_fw, "gsp-rm", device, data, size, &gsp->fw); 2199 if (ret) 2200 return ret; 2201 2202 /* Load relevant signature from ELF image. */ 2203 ret = r535_gsp_elf_section(gsp, gsp->func->sig_section, &data, &size); 2204 if (ret) 2205 return ret; 2206 2207 ret = nvkm_gsp_mem_ctor(gsp, ALIGN(size, 256), &gsp->sig); 2208 if (ret) 2209 return ret; 2210 2211 memcpy(gsp->sig.data, data, size); 2212 2213 /* Build radix3 page table for ELF image. */ 2214 ret = nvkm_gsp_radix3_sg(gsp, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3); 2215 if (ret) 2216 return ret; 2217 2218 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER, 2219 r535_gsp_msg_run_cpu_sequencer, gsp); 2220 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_POST_EVENT, r535_gsp_msg_post_event, gsp); 2221 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED, 2222 r535_gsp_msg_rc_triggered, gsp); 2223 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED, 2224 r535_gsp_msg_mmu_fault_queued, gsp); 2225 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp); 2226 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE, NULL, NULL); 2227 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, NULL, NULL); 2228 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL); 2229 ret = r535_gsp_rm_boot_ctor(gsp); 2230 if (ret) 2231 return ret; 2232 2233 /* Release FW images - we've copied them to DMA buffers now. */ 2234 r535_gsp_dtor_fws(gsp); 2235 2236 /* Calculate FB layout. */ 2237 gsp->fb.wpr2.frts.size = 0x100000; 2238 gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size; 2239 2240 gsp->fb.wpr2.boot.size = gsp->boot.fw.size; 2241 gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000); 2242 2243 gsp->fb.wpr2.elf.size = gsp->fw.len; 2244 gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000); 2245 2246 { 2247 u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30); 2248 2249 gsp->fb.wpr2.heap.size = 2250 gsp->func->wpr_heap.os_carveout_size + 2251 gsp->func->wpr_heap.base_size + 2252 ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) + 2253 ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20); 2254 2255 gsp->fb.wpr2.heap.size = max(gsp->fb.wpr2.heap.size, gsp->func->wpr_heap.min_size); 2256 } 2257 2258 gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000); 2259 gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000); 2260 2261 gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000); 2262 gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr; 2263 2264 gsp->fb.heap.size = 0x100000; 2265 gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size; 2266 2267 ret = nvkm_gsp_fwsec_frts(gsp); 2268 if (WARN_ON(ret)) 2269 return ret; 2270 2271 ret = r535_gsp_libos_init(gsp); 2272 if (WARN_ON(ret)) 2273 return ret; 2274 2275 ret = r535_gsp_wpr_meta_init(gsp); 2276 if (WARN_ON(ret)) 2277 return ret; 2278 2279 ret = r535_gsp_rpc_set_system_info(gsp); 2280 if (WARN_ON(ret)) 2281 return ret; 2282 2283 ret = r535_gsp_rpc_set_registry(gsp); 2284 if (WARN_ON(ret)) 2285 return ret; 2286 2287 /* Reset GSP into RISC-V mode. */ 2288 ret = gsp->func->reset(gsp); 2289 if (WARN_ON(ret)) 2290 return ret; 2291 2292 nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr)); 2293 nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr)); 2294 2295 mutex_init(&gsp->client_id.mutex); 2296 idr_init(&gsp->client_id.idr); 2297 return 0; 2298 } 2299 2300 static int 2301 r535_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver, 2302 const struct firmware **pfw) 2303 { 2304 char fwname[64]; 2305 2306 snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver); 2307 return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw); 2308 } 2309 2310 int 2311 r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) 2312 { 2313 struct nvkm_subdev *subdev = &gsp->subdev; 2314 int ret; 2315 2316 if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", fwif->enable)) 2317 return -EINVAL; 2318 2319 if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) || 2320 (ret = r535_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load)) || 2321 (ret = r535_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload)) || 2322 (ret = r535_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl))) { 2323 r535_gsp_dtor_fws(gsp); 2324 return ret; 2325 } 2326 2327 return 0; 2328 } 2329 2330 #define NVKM_GSP_FIRMWARE(chip) \ 2331 MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-535.113.01.bin"); \ 2332 MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-535.113.01.bin"); \ 2333 MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-535.113.01.bin"); \ 2334 MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-535.113.01.bin") 2335 2336 NVKM_GSP_FIRMWARE(tu102); 2337 NVKM_GSP_FIRMWARE(tu104); 2338 NVKM_GSP_FIRMWARE(tu106); 2339 2340 NVKM_GSP_FIRMWARE(tu116); 2341 NVKM_GSP_FIRMWARE(tu117); 2342 2343 NVKM_GSP_FIRMWARE(ga100); 2344 2345 NVKM_GSP_FIRMWARE(ga102); 2346 NVKM_GSP_FIRMWARE(ga103); 2347 NVKM_GSP_FIRMWARE(ga104); 2348 NVKM_GSP_FIRMWARE(ga106); 2349 NVKM_GSP_FIRMWARE(ga107); 2350 2351 NVKM_GSP_FIRMWARE(ad102); 2352 NVKM_GSP_FIRMWARE(ad103); 2353 NVKM_GSP_FIRMWARE(ad104); 2354 NVKM_GSP_FIRMWARE(ad106); 2355 NVKM_GSP_FIRMWARE(ad107); 2356