1 /* 2 * Copyright 2023 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "priv.h" 23 24 #include <core/pci.h> 25 #include <subdev/timer.h> 26 #include <subdev/vfn.h> 27 #include <engine/fifo/chan.h> 28 #include <engine/sec2.h> 29 30 #include <nvfw/fw.h> 31 32 #include <nvrm/nvtypes.h> 33 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h> 34 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h> 35 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h> 36 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h> 37 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h> 38 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h> 39 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h> 40 #include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h> 41 #include <nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h> 42 #include <nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h> 43 #include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h> 44 #include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h> 45 #include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h> 46 #include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h> 47 #include <nvrm/535.113.01/nvidia/generated/g_allclasses.h> 48 #include <nvrm/535.113.01/nvidia/generated/g_os_nvoc.h> 49 #include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h> 50 #include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h> 51 #include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h> 52 #include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h> 53 #include <nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h> 54 #include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h> 55 56 #include <linux/acpi.h> 57 58 #define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE 59 #define GSP_MSG_MAX_SIZE GSP_PAGE_MIN_SIZE * 16 60 61 struct r535_gsp_msg { 62 u8 auth_tag_buffer[16]; 63 u8 aad_buffer[16]; 64 u32 checksum; 65 u32 sequence; 66 u32 elem_count; 67 u32 pad; 68 u8 data[]; 69 }; 70 71 #define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data) 72 73 static int 74 r535_rpc_status_to_errno(uint32_t rpc_status) 75 { 76 switch (rpc_status) { 77 case 0x55: /* NV_ERR_NOT_READY */ 78 case 0x66: /* NV_ERR_TIMEOUT_RETRY */ 79 return -EAGAIN; 80 case 0x51: /* NV_ERR_NO_MEMORY */ 81 return -ENOMEM; 82 default: 83 return -EINVAL; 84 } 85 } 86 87 static void * 88 r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime) 89 { 90 struct r535_gsp_msg *mqe; 91 u32 size, rptr = *gsp->msgq.rptr; 92 int used; 93 u8 *msg; 94 u32 len; 95 96 size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + repc, GSP_PAGE_SIZE); 97 if (WARN_ON(!size || size >= gsp->msgq.cnt)) 98 return ERR_PTR(-EINVAL); 99 100 do { 101 u32 wptr = *gsp->msgq.wptr; 102 103 used = wptr + gsp->msgq.cnt - rptr; 104 if (used >= gsp->msgq.cnt) 105 used -= gsp->msgq.cnt; 106 if (used >= size) 107 break; 108 109 usleep_range(1, 2); 110 } while (--(*ptime)); 111 112 if (WARN_ON(!*ptime)) 113 return ERR_PTR(-ETIMEDOUT); 114 115 mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + rptr * 0x1000); 116 117 if (prepc) { 118 *prepc = (used * GSP_PAGE_SIZE) - sizeof(*mqe); 119 return mqe->data; 120 } 121 122 msg = kvmalloc(repc, GFP_KERNEL); 123 if (!msg) 124 return ERR_PTR(-ENOMEM); 125 126 len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe); 127 len = min_t(u32, repc, len); 128 memcpy(msg, mqe->data, len); 129 130 rptr += DIV_ROUND_UP(len, GSP_PAGE_SIZE); 131 if (rptr == gsp->msgq.cnt) 132 rptr = 0; 133 134 repc -= len; 135 136 if (repc) { 137 mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000); 138 memcpy(msg + len, mqe, repc); 139 140 rptr += DIV_ROUND_UP(repc, GSP_PAGE_SIZE); 141 } 142 143 mb(); 144 (*gsp->msgq.rptr) = rptr; 145 return msg; 146 } 147 148 static void * 149 r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 repc, int *ptime) 150 { 151 return r535_gsp_msgq_wait(gsp, repc, NULL, ptime); 152 } 153 154 static int 155 r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv) 156 { 157 struct r535_gsp_msg *cmd = container_of(argv, typeof(*cmd), data); 158 struct r535_gsp_msg *cqe; 159 u32 argc = cmd->checksum; 160 u64 *ptr = (void *)cmd; 161 u64 *end; 162 u64 csum = 0; 163 int free, time = 1000000; 164 u32 wptr, size; 165 u32 off = 0; 166 167 argc = ALIGN(GSP_MSG_HDR_SIZE + argc, GSP_PAGE_SIZE); 168 169 end = (u64 *)((char *)ptr + argc); 170 cmd->pad = 0; 171 cmd->checksum = 0; 172 cmd->sequence = gsp->cmdq.seq++; 173 cmd->elem_count = DIV_ROUND_UP(argc, 0x1000); 174 175 while (ptr < end) 176 csum ^= *ptr++; 177 178 cmd->checksum = upper_32_bits(csum) ^ lower_32_bits(csum); 179 180 wptr = *gsp->cmdq.wptr; 181 do { 182 do { 183 free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1; 184 if (free >= gsp->cmdq.cnt) 185 free -= gsp->cmdq.cnt; 186 if (free >= 1) 187 break; 188 189 usleep_range(1, 2); 190 } while(--time); 191 192 if (WARN_ON(!time)) { 193 kvfree(cmd); 194 return -ETIMEDOUT; 195 } 196 197 cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000); 198 size = min_t(u32, argc, (gsp->cmdq.cnt - wptr) * GSP_PAGE_SIZE); 199 memcpy(cqe, (u8 *)cmd + off, size); 200 201 wptr += DIV_ROUND_UP(size, 0x1000); 202 if (wptr == gsp->cmdq.cnt) 203 wptr = 0; 204 205 off += size; 206 argc -= size; 207 } while(argc); 208 209 nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr); 210 wmb(); 211 (*gsp->cmdq.wptr) = wptr; 212 mb(); 213 214 nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000); 215 216 kvfree(cmd); 217 return 0; 218 } 219 220 static void * 221 r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 argc) 222 { 223 struct r535_gsp_msg *cmd; 224 u32 size = GSP_MSG_HDR_SIZE + argc; 225 226 size = ALIGN(size, GSP_MSG_MIN_SIZE); 227 cmd = kvzalloc(size, GFP_KERNEL); 228 if (!cmd) 229 return ERR_PTR(-ENOMEM); 230 231 cmd->checksum = argc; 232 return cmd->data; 233 } 234 235 struct nvfw_gsp_rpc { 236 u32 header_version; 237 u32 signature; 238 u32 length; 239 u32 function; 240 u32 rpc_result; 241 u32 rpc_result_private; 242 u32 sequence; 243 union { 244 u32 spare; 245 u32 cpuRmGfid; 246 }; 247 u8 data[]; 248 }; 249 250 static void 251 r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg) 252 { 253 kvfree(msg); 254 } 255 256 static void 257 r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl) 258 { 259 if (gsp->subdev.debug >= lvl) { 260 nvkm_printk__(&gsp->subdev, lvl, info, 261 "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n", 262 msg->function, msg->length, msg->length - sizeof(*msg), 263 msg->rpc_result, msg->rpc_result_private); 264 print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1, 265 msg->data, msg->length - sizeof(*msg), true); 266 } 267 } 268 269 static struct nvfw_gsp_rpc * 270 r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 repc) 271 { 272 struct nvkm_subdev *subdev = &gsp->subdev; 273 struct nvfw_gsp_rpc *msg; 274 int time = 4000000, i; 275 u32 size; 276 277 retry: 278 msg = r535_gsp_msgq_wait(gsp, sizeof(*msg), &size, &time); 279 if (IS_ERR_OR_NULL(msg)) 280 return msg; 281 282 msg = r535_gsp_msgq_recv(gsp, msg->length, &time); 283 if (IS_ERR_OR_NULL(msg)) 284 return msg; 285 286 if (msg->rpc_result) { 287 r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR); 288 r535_gsp_msg_done(gsp, msg); 289 return ERR_PTR(-EINVAL); 290 } 291 292 r535_gsp_msg_dump(gsp, msg, NV_DBG_TRACE); 293 294 if (fn && msg->function == fn) { 295 if (repc) { 296 if (msg->length < sizeof(*msg) + repc) { 297 nvkm_error(subdev, "msg len %d < %zd\n", 298 msg->length, sizeof(*msg) + repc); 299 r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR); 300 r535_gsp_msg_done(gsp, msg); 301 return ERR_PTR(-EIO); 302 } 303 304 return msg; 305 } 306 307 r535_gsp_msg_done(gsp, msg); 308 return NULL; 309 } 310 311 for (i = 0; i < gsp->msgq.ntfy_nr; i++) { 312 struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i]; 313 314 if (ntfy->fn == msg->function) { 315 if (ntfy->func) 316 ntfy->func(ntfy->priv, ntfy->fn, msg->data, msg->length - sizeof(*msg)); 317 break; 318 } 319 } 320 321 if (i == gsp->msgq.ntfy_nr) 322 r535_gsp_msg_dump(gsp, msg, NV_DBG_WARN); 323 324 r535_gsp_msg_done(gsp, msg); 325 if (fn) 326 goto retry; 327 328 if (*gsp->msgq.rptr != *gsp->msgq.wptr) 329 goto retry; 330 331 return NULL; 332 } 333 334 static int 335 r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv) 336 { 337 int ret = 0; 338 339 mutex_lock(&gsp->msgq.mutex); 340 if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) { 341 ret = -ENOSPC; 342 } else { 343 gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn; 344 gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func; 345 gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv; 346 gsp->msgq.ntfy_nr++; 347 } 348 mutex_unlock(&gsp->msgq.mutex); 349 return ret; 350 } 351 352 static int 353 r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn) 354 { 355 void *repv; 356 357 mutex_lock(&gsp->cmdq.mutex); 358 repv = r535_gsp_msg_recv(gsp, fn, 0); 359 mutex_unlock(&gsp->cmdq.mutex); 360 if (IS_ERR(repv)) 361 return PTR_ERR(repv); 362 363 return 0; 364 } 365 366 static void * 367 r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc) 368 { 369 struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data); 370 struct nvfw_gsp_rpc *msg; 371 u32 fn = rpc->function; 372 void *repv = NULL; 373 int ret; 374 375 if (gsp->subdev.debug >= NV_DBG_TRACE) { 376 nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function, 377 rpc->length, rpc->length - sizeof(*rpc)); 378 print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1, 379 rpc->data, rpc->length - sizeof(*rpc), true); 380 } 381 382 ret = r535_gsp_cmdq_push(gsp, rpc); 383 if (ret) 384 return ERR_PTR(ret); 385 386 if (wait) { 387 msg = r535_gsp_msg_recv(gsp, fn, repc); 388 if (!IS_ERR_OR_NULL(msg)) 389 repv = msg->data; 390 else 391 repv = msg; 392 } 393 394 return repv; 395 } 396 397 static void 398 r535_gsp_event_dtor(struct nvkm_gsp_event *event) 399 { 400 struct nvkm_gsp_device *device = event->device; 401 struct nvkm_gsp_client *client = device->object.client; 402 struct nvkm_gsp *gsp = client->gsp; 403 404 mutex_lock(&gsp->client_id.mutex); 405 if (event->func) { 406 list_del(&event->head); 407 event->func = NULL; 408 } 409 mutex_unlock(&gsp->client_id.mutex); 410 411 nvkm_gsp_rm_free(&event->object); 412 event->device = NULL; 413 } 414 415 static int 416 r535_gsp_device_event_get(struct nvkm_gsp_event *event) 417 { 418 struct nvkm_gsp_device *device = event->device; 419 NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl; 420 421 ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice, 422 NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl)); 423 if (IS_ERR(ctrl)) 424 return PTR_ERR(ctrl); 425 426 ctrl->event = event->id; 427 ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; 428 return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl); 429 } 430 431 static int 432 r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id, 433 nvkm_gsp_event_func func, struct nvkm_gsp_event *event) 434 { 435 struct nvkm_gsp_client *client = device->object.client; 436 struct nvkm_gsp *gsp = client->gsp; 437 NV0005_ALLOC_PARAMETERS *args; 438 int ret; 439 440 args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle, 441 NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args), 442 &event->object); 443 if (IS_ERR(args)) 444 return PTR_ERR(args); 445 446 args->hParentClient = client->object.handle; 447 args->hSrcResource = 0; 448 args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX; 449 args->notifyIndex = NV01_EVENT_CLIENT_RM | id; 450 args->data = NULL; 451 452 ret = nvkm_gsp_rm_alloc_wr(&event->object, args); 453 if (ret) 454 return ret; 455 456 event->device = device; 457 event->id = id; 458 459 ret = r535_gsp_device_event_get(event); 460 if (ret) { 461 nvkm_gsp_event_dtor(event); 462 return ret; 463 } 464 465 mutex_lock(&gsp->client_id.mutex); 466 event->func = func; 467 list_add(&event->head, &client->events); 468 mutex_unlock(&gsp->client_id.mutex); 469 return 0; 470 } 471 472 static void 473 r535_gsp_device_dtor(struct nvkm_gsp_device *device) 474 { 475 nvkm_gsp_rm_free(&device->subdevice); 476 nvkm_gsp_rm_free(&device->object); 477 } 478 479 static int 480 r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device) 481 { 482 NV2080_ALLOC_PARAMETERS *args; 483 484 return nvkm_gsp_rm_alloc(&device->object, 0x5d1d0000, NV20_SUBDEVICE_0, sizeof(*args), 485 &device->subdevice); 486 } 487 488 static int 489 r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device) 490 { 491 NV0080_ALLOC_PARAMETERS *args; 492 int ret; 493 494 args = nvkm_gsp_rm_alloc_get(&client->object, 0xde1d0000, NV01_DEVICE_0, sizeof(*args), 495 &device->object); 496 if (IS_ERR(args)) 497 return PTR_ERR(args); 498 499 args->hClientShare = client->object.handle; 500 501 ret = nvkm_gsp_rm_alloc_wr(&device->object, args); 502 if (ret) 503 return ret; 504 505 ret = r535_gsp_subdevice_ctor(device); 506 if (ret) 507 nvkm_gsp_rm_free(&device->object); 508 509 return ret; 510 } 511 512 static void 513 r535_gsp_client_dtor(struct nvkm_gsp_client *client) 514 { 515 struct nvkm_gsp *gsp = client->gsp; 516 517 nvkm_gsp_rm_free(&client->object); 518 519 mutex_lock(&gsp->client_id.mutex); 520 idr_remove(&gsp->client_id.idr, client->object.handle & 0xffff); 521 mutex_unlock(&gsp->client_id.mutex); 522 523 client->gsp = NULL; 524 } 525 526 static int 527 r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client) 528 { 529 NV0000_ALLOC_PARAMETERS *args; 530 int ret; 531 532 mutex_lock(&gsp->client_id.mutex); 533 ret = idr_alloc(&gsp->client_id.idr, client, 0, 0xffff + 1, GFP_KERNEL); 534 mutex_unlock(&gsp->client_id.mutex); 535 if (ret < 0) 536 return ret; 537 538 client->gsp = gsp; 539 client->object.client = client; 540 INIT_LIST_HEAD(&client->events); 541 542 args = nvkm_gsp_rm_alloc_get(&client->object, 0xc1d00000 | ret, NV01_ROOT, sizeof(*args), 543 &client->object); 544 if (IS_ERR(args)) { 545 r535_gsp_client_dtor(client); 546 return ret; 547 } 548 549 args->hClient = client->object.handle; 550 args->processID = ~0; 551 552 ret = nvkm_gsp_rm_alloc_wr(&client->object, args); 553 if (ret) { 554 r535_gsp_client_dtor(client); 555 return ret; 556 } 557 558 return 0; 559 } 560 561 static int 562 r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object) 563 { 564 struct nvkm_gsp_client *client = object->client; 565 struct nvkm_gsp *gsp = client->gsp; 566 rpc_free_v03_00 *rpc; 567 568 nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n", 569 client->object.handle, object->handle); 570 571 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc)); 572 if (WARN_ON(IS_ERR_OR_NULL(rpc))) 573 return -EIO; 574 575 rpc->params.hRoot = client->object.handle; 576 rpc->params.hObjectParent = 0; 577 rpc->params.hObjectOld = object->handle; 578 return nvkm_gsp_rpc_wr(gsp, rpc, true); 579 } 580 581 static void 582 r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *repv) 583 { 584 rpc_gsp_rm_alloc_v03_00 *rpc = container_of(repv, typeof(*rpc), params); 585 586 nvkm_gsp_rpc_done(object->client->gsp, rpc); 587 } 588 589 static void * 590 r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc) 591 { 592 rpc_gsp_rm_alloc_v03_00 *rpc = container_of(argv, typeof(*rpc), params); 593 struct nvkm_gsp *gsp = object->client->gsp; 594 void *ret; 595 596 rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc) + repc); 597 if (IS_ERR_OR_NULL(rpc)) 598 return rpc; 599 600 if (rpc->status) { 601 ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status)); 602 if (PTR_ERR(ret) != -EAGAIN) 603 nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status); 604 } else { 605 ret = repc ? rpc->params : NULL; 606 } 607 608 nvkm_gsp_rpc_done(gsp, rpc); 609 610 return ret; 611 } 612 613 static void * 614 r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass, u32 argc) 615 { 616 struct nvkm_gsp_client *client = object->client; 617 struct nvkm_gsp *gsp = client->gsp; 618 rpc_gsp_rm_alloc_v03_00 *rpc; 619 620 nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x cls:0x%08x argc:%d\n", 621 client->object.handle, object->parent->handle, object->handle, oclass, argc); 622 623 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC, sizeof(*rpc) + argc); 624 if (IS_ERR(rpc)) 625 return rpc; 626 627 rpc->hClient = client->object.handle; 628 rpc->hParent = object->parent->handle; 629 rpc->hObject = object->handle; 630 rpc->hClass = oclass; 631 rpc->status = 0; 632 rpc->paramsSize = argc; 633 return rpc->params; 634 } 635 636 static void 637 r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv) 638 { 639 rpc_gsp_rm_control_v03_00 *rpc = container_of(repv, typeof(*rpc), params); 640 641 if (!repv) 642 return; 643 nvkm_gsp_rpc_done(object->client->gsp, rpc); 644 } 645 646 static int 647 r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **argv, u32 repc) 648 { 649 rpc_gsp_rm_control_v03_00 *rpc = container_of((*argv), typeof(*rpc), params); 650 struct nvkm_gsp *gsp = object->client->gsp; 651 int ret = 0; 652 653 rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc); 654 if (IS_ERR_OR_NULL(rpc)) { 655 *argv = NULL; 656 return PTR_ERR(rpc); 657 } 658 659 if (rpc->status) { 660 ret = r535_rpc_status_to_errno(rpc->status); 661 if (ret != -EAGAIN) 662 nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n", 663 object->client->object.handle, object->handle, rpc->cmd, rpc->status); 664 } 665 666 if (repc) 667 *argv = rpc->params; 668 else 669 nvkm_gsp_rpc_done(gsp, rpc); 670 671 return ret; 672 } 673 674 static void * 675 r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc) 676 { 677 struct nvkm_gsp_client *client = object->client; 678 struct nvkm_gsp *gsp = client->gsp; 679 rpc_gsp_rm_control_v03_00 *rpc; 680 681 nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x argc:%d\n", 682 client->object.handle, object->handle, cmd, argc); 683 684 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, sizeof(*rpc) + argc); 685 if (IS_ERR(rpc)) 686 return rpc; 687 688 rpc->hClient = client->object.handle; 689 rpc->hObject = object->handle; 690 rpc->cmd = cmd; 691 rpc->status = 0; 692 rpc->paramsSize = argc; 693 return rpc->params; 694 } 695 696 static void 697 r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv) 698 { 699 struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data); 700 701 r535_gsp_msg_done(gsp, rpc); 702 } 703 704 static void * 705 r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc) 706 { 707 struct nvfw_gsp_rpc *rpc; 708 709 rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + argc, sizeof(u64))); 710 if (IS_ERR(rpc)) 711 return ERR_CAST(rpc); 712 713 rpc->header_version = 0x03000000; 714 rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V'; 715 rpc->function = fn; 716 rpc->rpc_result = 0xffffffff; 717 rpc->rpc_result_private = 0xffffffff; 718 rpc->length = sizeof(*rpc) + argc; 719 return rpc->data; 720 } 721 722 static void * 723 r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc) 724 { 725 struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data); 726 struct r535_gsp_msg *cmd = container_of((void *)rpc, typeof(*cmd), data); 727 const u32 max_msg_size = (16 * 0x1000) - sizeof(struct r535_gsp_msg); 728 const u32 max_rpc_size = max_msg_size - sizeof(*rpc); 729 u32 rpc_size = rpc->length - sizeof(*rpc); 730 void *repv; 731 732 mutex_lock(&gsp->cmdq.mutex); 733 if (rpc_size > max_rpc_size) { 734 const u32 fn = rpc->function; 735 736 /* Adjust length, and send initial RPC. */ 737 rpc->length = sizeof(*rpc) + max_rpc_size; 738 cmd->checksum = rpc->length; 739 740 repv = r535_gsp_rpc_send(gsp, argv, false, 0); 741 if (IS_ERR(repv)) 742 goto done; 743 744 argv += max_rpc_size; 745 rpc_size -= max_rpc_size; 746 747 /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */ 748 while (rpc_size) { 749 u32 size = min(rpc_size, max_rpc_size); 750 void *next; 751 752 next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size); 753 if (IS_ERR(next)) { 754 repv = next; 755 goto done; 756 } 757 758 memcpy(next, argv, size); 759 760 repv = r535_gsp_rpc_send(gsp, next, false, 0); 761 if (IS_ERR(repv)) 762 goto done; 763 764 argv += size; 765 rpc_size -= size; 766 } 767 768 /* Wait for reply. */ 769 if (wait) { 770 rpc = r535_gsp_msg_recv(gsp, fn, repc); 771 if (!IS_ERR_OR_NULL(rpc)) 772 repv = rpc->data; 773 else 774 repv = rpc; 775 } else { 776 repv = NULL; 777 } 778 } else { 779 repv = r535_gsp_rpc_send(gsp, argv, wait, repc); 780 } 781 782 done: 783 mutex_unlock(&gsp->cmdq.mutex); 784 return repv; 785 } 786 787 const struct nvkm_gsp_rm 788 r535_gsp_rm = { 789 .rpc_get = r535_gsp_rpc_get, 790 .rpc_push = r535_gsp_rpc_push, 791 .rpc_done = r535_gsp_rpc_done, 792 793 .rm_ctrl_get = r535_gsp_rpc_rm_ctrl_get, 794 .rm_ctrl_push = r535_gsp_rpc_rm_ctrl_push, 795 .rm_ctrl_done = r535_gsp_rpc_rm_ctrl_done, 796 797 .rm_alloc_get = r535_gsp_rpc_rm_alloc_get, 798 .rm_alloc_push = r535_gsp_rpc_rm_alloc_push, 799 .rm_alloc_done = r535_gsp_rpc_rm_alloc_done, 800 801 .rm_free = r535_gsp_rpc_rm_free, 802 803 .client_ctor = r535_gsp_client_ctor, 804 .client_dtor = r535_gsp_client_dtor, 805 806 .device_ctor = r535_gsp_device_ctor, 807 .device_dtor = r535_gsp_device_dtor, 808 809 .event_ctor = r535_gsp_device_event_ctor, 810 .event_dtor = r535_gsp_event_dtor, 811 }; 812 813 static void 814 r535_gsp_msgq_work(struct work_struct *work) 815 { 816 struct nvkm_gsp *gsp = container_of(work, typeof(*gsp), msgq.work); 817 818 mutex_lock(&gsp->cmdq.mutex); 819 if (*gsp->msgq.rptr != *gsp->msgq.wptr) 820 r535_gsp_msg_recv(gsp, 0, 0); 821 mutex_unlock(&gsp->cmdq.mutex); 822 } 823 824 static irqreturn_t 825 r535_gsp_intr(struct nvkm_inth *inth) 826 { 827 struct nvkm_gsp *gsp = container_of(inth, typeof(*gsp), subdev.inth); 828 struct nvkm_subdev *subdev = &gsp->subdev; 829 u32 intr = nvkm_falcon_rd32(&gsp->falcon, 0x0008); 830 u32 inte = nvkm_falcon_rd32(&gsp->falcon, gsp->falcon.func->addr2 + 831 gsp->falcon.func->riscv_irqmask); 832 u32 stat = intr & inte; 833 834 if (!stat) { 835 nvkm_debug(subdev, "inte %08x %08x\n", intr, inte); 836 return IRQ_NONE; 837 } 838 839 if (stat & 0x00000040) { 840 nvkm_falcon_wr32(&gsp->falcon, 0x004, 0x00000040); 841 schedule_work(&gsp->msgq.work); 842 stat &= ~0x00000040; 843 } 844 845 if (stat) { 846 nvkm_error(subdev, "intr %08x\n", stat); 847 nvkm_falcon_wr32(&gsp->falcon, 0x014, stat); 848 nvkm_falcon_wr32(&gsp->falcon, 0x004, stat); 849 } 850 851 nvkm_falcon_intr_retrigger(&gsp->falcon); 852 return IRQ_HANDLED; 853 } 854 855 static int 856 r535_gsp_intr_get_table(struct nvkm_gsp *gsp) 857 { 858 NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *ctrl; 859 int ret = 0; 860 861 ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, 862 NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE, sizeof(*ctrl)); 863 if (IS_ERR(ctrl)) 864 return PTR_ERR(ctrl); 865 866 ret = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, &ctrl, sizeof(*ctrl)); 867 if (WARN_ON(ret)) { 868 nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); 869 return ret; 870 } 871 872 for (unsigned i = 0; i < ctrl->tableLen; i++) { 873 enum nvkm_subdev_type type; 874 int inst; 875 876 nvkm_debug(&gsp->subdev, 877 "%2d: engineIdx %3d pmcIntrMask %08x stall %08x nonStall %08x\n", i, 878 ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask, 879 ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall); 880 881 switch (ctrl->table[i].engineIdx) { 882 case MC_ENGINE_IDX_GSP: 883 type = NVKM_SUBDEV_GSP; 884 inst = 0; 885 break; 886 case MC_ENGINE_IDX_DISP: 887 type = NVKM_ENGINE_DISP; 888 inst = 0; 889 break; 890 case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9: 891 type = NVKM_ENGINE_CE; 892 inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_CE0; 893 break; 894 case MC_ENGINE_IDX_GR0: 895 type = NVKM_ENGINE_GR; 896 inst = 0; 897 break; 898 case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7: 899 type = NVKM_ENGINE_NVDEC; 900 inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVDEC0; 901 break; 902 case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2: 903 type = NVKM_ENGINE_NVENC; 904 inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_MSENC; 905 break; 906 case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7: 907 type = NVKM_ENGINE_NVJPG; 908 inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVJPEG0; 909 break; 910 case MC_ENGINE_IDX_OFA0: 911 type = NVKM_ENGINE_OFA; 912 inst = 0; 913 break; 914 default: 915 continue; 916 } 917 918 if (WARN_ON(gsp->intr_nr == ARRAY_SIZE(gsp->intr))) { 919 ret = -ENOSPC; 920 break; 921 } 922 923 gsp->intr[gsp->intr_nr].type = type; 924 gsp->intr[gsp->intr_nr].inst = inst; 925 gsp->intr[gsp->intr_nr].stall = ctrl->table[i].vectorStall; 926 gsp->intr[gsp->intr_nr].nonstall = ctrl->table[i].vectorNonStall; 927 gsp->intr_nr++; 928 } 929 930 nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); 931 return ret; 932 } 933 934 static int 935 r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp) 936 { 937 GspStaticConfigInfo *rpc; 938 int last_usable = -1; 939 940 rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc)); 941 if (IS_ERR(rpc)) 942 return PTR_ERR(rpc); 943 944 gsp->internal.client.object.client = &gsp->internal.client; 945 gsp->internal.client.object.parent = NULL; 946 gsp->internal.client.object.handle = rpc->hInternalClient; 947 gsp->internal.client.gsp = gsp; 948 949 gsp->internal.device.object.client = &gsp->internal.client; 950 gsp->internal.device.object.parent = &gsp->internal.client.object; 951 gsp->internal.device.object.handle = rpc->hInternalDevice; 952 953 gsp->internal.device.subdevice.client = &gsp->internal.client; 954 gsp->internal.device.subdevice.parent = &gsp->internal.device.object; 955 gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice; 956 957 gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase; 958 gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase; 959 960 for (int i = 0; i < rpc->fbRegionInfoParams.numFBRegions; i++) { 961 NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg = 962 &rpc->fbRegionInfoParams.fbRegion[i]; 963 964 nvkm_debug(&gsp->subdev, "fb region %d: " 965 "%016llx-%016llx rsvd:%016llx perf:%08x comp:%d iso:%d prot:%d\n", i, 966 reg->base, reg->limit, reg->reserved, reg->performance, 967 reg->supportCompressed, reg->supportISO, reg->bProtected); 968 969 if (!reg->reserved && !reg->bProtected) { 970 if (reg->supportCompressed && reg->supportISO && 971 !WARN_ON_ONCE(gsp->fb.region_nr >= ARRAY_SIZE(gsp->fb.region))) { 972 const u64 size = (reg->limit + 1) - reg->base; 973 974 gsp->fb.region[gsp->fb.region_nr].addr = reg->base; 975 gsp->fb.region[gsp->fb.region_nr].size = size; 976 gsp->fb.region_nr++; 977 } 978 979 last_usable = i; 980 } 981 } 982 983 if (last_usable >= 0) { 984 u32 rsvd_base = rpc->fbRegionInfoParams.fbRegion[last_usable].limit + 1; 985 986 gsp->fb.rsvd_size = gsp->fb.heap.addr - rsvd_base; 987 } 988 989 for (int gpc = 0; gpc < ARRAY_SIZE(rpc->tpcInfo); gpc++) { 990 if (rpc->gpcInfo.gpcMask & BIT(gpc)) { 991 gsp->gr.tpcs += hweight32(rpc->tpcInfo[gpc].tpcMask); 992 gsp->gr.gpcs++; 993 } 994 } 995 996 nvkm_gsp_rpc_done(gsp, rpc); 997 return 0; 998 } 999 1000 static void 1001 nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem) 1002 { 1003 if (mem->data) { 1004 /* 1005 * Poison the buffer to catch any unexpected access from 1006 * GSP-RM if the buffer was prematurely freed. 1007 */ 1008 memset(mem->data, 0xFF, mem->size); 1009 1010 dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr); 1011 memset(mem, 0, sizeof(*mem)); 1012 } 1013 } 1014 1015 static int 1016 nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem) 1017 { 1018 mem->size = size; 1019 mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL); 1020 if (WARN_ON(!mem->data)) 1021 return -ENOMEM; 1022 1023 return 0; 1024 } 1025 1026 static int 1027 r535_gsp_postinit(struct nvkm_gsp *gsp) 1028 { 1029 struct nvkm_device *device = gsp->subdev.device; 1030 int ret; 1031 1032 ret = r535_gsp_rpc_get_gsp_static_info(gsp); 1033 if (WARN_ON(ret)) 1034 return ret; 1035 1036 INIT_WORK(&gsp->msgq.work, r535_gsp_msgq_work); 1037 1038 ret = r535_gsp_intr_get_table(gsp); 1039 if (WARN_ON(ret)) 1040 return ret; 1041 1042 ret = nvkm_gsp_intr_stall(gsp, gsp->subdev.type, gsp->subdev.inst); 1043 if (WARN_ON(ret < 0)) 1044 return ret; 1045 1046 ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &gsp->subdev, 1047 r535_gsp_intr, &gsp->subdev.inth); 1048 if (WARN_ON(ret)) 1049 return ret; 1050 1051 nvkm_inth_allow(&gsp->subdev.inth); 1052 nvkm_wr32(device, 0x110004, 0x00000040); 1053 1054 /* Release the DMA buffers that were needed only for boot and init */ 1055 nvkm_gsp_mem_dtor(gsp, &gsp->boot.fw); 1056 nvkm_gsp_mem_dtor(gsp, &gsp->libos); 1057 1058 return ret; 1059 } 1060 1061 static int 1062 r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend) 1063 { 1064 rpc_unloading_guest_driver_v1F_07 *rpc; 1065 1066 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER, sizeof(*rpc)); 1067 if (IS_ERR(rpc)) 1068 return PTR_ERR(rpc); 1069 1070 if (suspend) { 1071 rpc->bInPMTransition = 1; 1072 rpc->bGc6Entering = 0; 1073 rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3; 1074 } else { 1075 rpc->bInPMTransition = 0; 1076 rpc->bGc6Entering = 0; 1077 rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0; 1078 } 1079 1080 return nvkm_gsp_rpc_wr(gsp, rpc, true); 1081 } 1082 1083 /* dword only */ 1084 struct nv_gsp_registry_entries { 1085 const char *name; 1086 u32 value; 1087 }; 1088 1089 static const struct nv_gsp_registry_entries r535_registry_entries[] = { 1090 { "RMSecBusResetEnable", 1 }, 1091 { "RMForcePcieConfigSave", 1 }, 1092 }; 1093 #define NV_GSP_REG_NUM_ENTRIES ARRAY_SIZE(r535_registry_entries) 1094 1095 static int 1096 r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp) 1097 { 1098 PACKED_REGISTRY_TABLE *rpc; 1099 char *strings; 1100 int str_offset; 1101 int i; 1102 size_t rpc_size = struct_size(rpc, entries, NV_GSP_REG_NUM_ENTRIES); 1103 1104 /* add strings + null terminator */ 1105 for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) 1106 rpc_size += strlen(r535_registry_entries[i].name) + 1; 1107 1108 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_SET_REGISTRY, rpc_size); 1109 if (IS_ERR(rpc)) 1110 return PTR_ERR(rpc); 1111 1112 rpc->numEntries = NV_GSP_REG_NUM_ENTRIES; 1113 1114 str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]); 1115 strings = (char *)rpc + str_offset; 1116 for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) { 1117 int name_len = strlen(r535_registry_entries[i].name) + 1; 1118 1119 rpc->entries[i].nameOffset = str_offset; 1120 rpc->entries[i].type = 1; 1121 rpc->entries[i].data = r535_registry_entries[i].value; 1122 rpc->entries[i].length = 4; 1123 memcpy(strings, r535_registry_entries[i].name, name_len); 1124 strings += name_len; 1125 str_offset += name_len; 1126 } 1127 rpc->size = str_offset; 1128 1129 return nvkm_gsp_rpc_wr(gsp, rpc, false); 1130 } 1131 1132 #if defined(CONFIG_ACPI) && defined(CONFIG_X86) 1133 static void 1134 r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) 1135 { 1136 const guid_t NVOP_DSM_GUID = 1137 GUID_INIT(0xA486D8F8, 0x0BDA, 0x471B, 1138 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0); 1139 u64 NVOP_DSM_REV = 0x00000100; 1140 union acpi_object argv4 = { 1141 .buffer.type = ACPI_TYPE_BUFFER, 1142 .buffer.length = 4, 1143 .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), 1144 }, *obj; 1145 1146 caps->status = 0xffff; 1147 1148 if (!acpi_check_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, BIT_ULL(0x1a))) 1149 return; 1150 1151 obj = acpi_evaluate_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, 0x1a, &argv4); 1152 if (!obj) 1153 return; 1154 1155 if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || 1156 WARN_ON(obj->buffer.length != 4)) 1157 return; 1158 1159 caps->status = 0; 1160 caps->optimusCaps = *(u32 *)obj->buffer.pointer; 1161 1162 ACPI_FREE(obj); 1163 1164 kfree(argv4.buffer.pointer); 1165 } 1166 1167 static void 1168 r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt) 1169 { 1170 const guid_t JT_DSM_GUID = 1171 GUID_INIT(0xCBECA351L, 0x067B, 0x4924, 1172 0x9C, 0xBD, 0xB4, 0x6B, 0x00, 0xB8, 0x6F, 0x34); 1173 u64 JT_DSM_REV = 0x00000103; 1174 u32 caps; 1175 union acpi_object argv4 = { 1176 .buffer.type = ACPI_TYPE_BUFFER, 1177 .buffer.length = sizeof(caps), 1178 .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), 1179 }, *obj; 1180 1181 jt->status = 0xffff; 1182 1183 obj = acpi_evaluate_dsm(handle, &JT_DSM_GUID, JT_DSM_REV, 0x1, &argv4); 1184 if (!obj) 1185 return; 1186 1187 if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || 1188 WARN_ON(obj->buffer.length != 4)) 1189 return; 1190 1191 jt->status = 0; 1192 jt->jtCaps = *(u32 *)obj->buffer.pointer; 1193 jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20; 1194 jt->bSBIOSCaps = 0; 1195 1196 ACPI_FREE(obj); 1197 1198 kfree(argv4.buffer.pointer); 1199 } 1200 1201 static void 1202 r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode, 1203 MUX_METHOD_DATA_ELEMENT *part) 1204 { 1205 union acpi_object mux_arg = { ACPI_TYPE_INTEGER }; 1206 struct acpi_object_list input = { 1, &mux_arg }; 1207 acpi_handle iter = NULL, handle_mux = NULL; 1208 acpi_status status; 1209 unsigned long long value; 1210 1211 mode->status = 0xffff; 1212 part->status = 0xffff; 1213 1214 do { 1215 status = acpi_get_next_object(ACPI_TYPE_DEVICE, handle, iter, &iter); 1216 if (ACPI_FAILURE(status) || !iter) 1217 return; 1218 1219 status = acpi_evaluate_integer(iter, "_ADR", NULL, &value); 1220 if (ACPI_FAILURE(status) || value != id) 1221 continue; 1222 1223 handle_mux = iter; 1224 } while (!handle_mux); 1225 1226 if (!handle_mux) 1227 return; 1228 1229 /* I -think- 0 means "acquire" according to nvidia's driver source */ 1230 input.pointer->integer.type = ACPI_TYPE_INTEGER; 1231 input.pointer->integer.value = 0; 1232 1233 status = acpi_evaluate_integer(handle_mux, "MXDM", &input, &value); 1234 if (ACPI_SUCCESS(status)) { 1235 mode->acpiId = id; 1236 mode->mode = value; 1237 mode->status = 0; 1238 } 1239 1240 status = acpi_evaluate_integer(handle_mux, "MXDS", &input, &value); 1241 if (ACPI_SUCCESS(status)) { 1242 part->acpiId = id; 1243 part->mode = value; 1244 part->status = 0; 1245 } 1246 } 1247 1248 static void 1249 r535_gsp_acpi_mux(acpi_handle handle, DOD_METHOD_DATA *dod, MUX_METHOD_DATA *mux) 1250 { 1251 mux->tableLen = dod->acpiIdListLen / sizeof(dod->acpiIdList[0]); 1252 1253 for (int i = 0; i < mux->tableLen; i++) { 1254 r535_gsp_acpi_mux_id(handle, dod->acpiIdList[i], &mux->acpiIdMuxModeTable[i], 1255 &mux->acpiIdMuxPartTable[i]); 1256 } 1257 } 1258 1259 static void 1260 r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod) 1261 { 1262 acpi_status status; 1263 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; 1264 union acpi_object *_DOD; 1265 1266 dod->status = 0xffff; 1267 1268 status = acpi_evaluate_object(handle, "_DOD", NULL, &output); 1269 if (ACPI_FAILURE(status)) 1270 return; 1271 1272 _DOD = output.pointer; 1273 1274 if (WARN_ON(_DOD->type != ACPI_TYPE_PACKAGE) || 1275 WARN_ON(_DOD->package.count > ARRAY_SIZE(dod->acpiIdList))) 1276 return; 1277 1278 for (int i = 0; i < _DOD->package.count; i++) { 1279 if (WARN_ON(_DOD->package.elements[i].type != ACPI_TYPE_INTEGER)) 1280 return; 1281 1282 dod->acpiIdList[i] = _DOD->package.elements[i].integer.value; 1283 dod->acpiIdListLen += sizeof(dod->acpiIdList[0]); 1284 } 1285 1286 dod->status = 0; 1287 kfree(output.pointer); 1288 } 1289 #endif 1290 1291 static void 1292 r535_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi) 1293 { 1294 #if defined(CONFIG_ACPI) && defined(CONFIG_X86) 1295 acpi_handle handle = ACPI_HANDLE(gsp->subdev.device->dev); 1296 1297 if (!handle) 1298 return; 1299 1300 acpi->bValid = 1; 1301 1302 r535_gsp_acpi_dod(handle, &acpi->dodMethodData); 1303 if (acpi->dodMethodData.status == 0) 1304 r535_gsp_acpi_mux(handle, &acpi->dodMethodData, &acpi->muxMethodData); 1305 1306 r535_gsp_acpi_jt(handle, &acpi->jtMethodData); 1307 r535_gsp_acpi_caps(handle, &acpi->capsMethodData); 1308 #endif 1309 } 1310 1311 static int 1312 r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp) 1313 { 1314 struct nvkm_device *device = gsp->subdev.device; 1315 struct nvkm_device_pci *pdev = container_of(device, typeof(*pdev), device); 1316 GspSystemInfo *info; 1317 1318 if (WARN_ON(device->type == NVKM_DEVICE_TEGRA)) 1319 return -ENOSYS; 1320 1321 info = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, sizeof(*info)); 1322 if (IS_ERR(info)) 1323 return PTR_ERR(info); 1324 1325 info->gpuPhysAddr = device->func->resource_addr(device, 0); 1326 info->gpuPhysFbAddr = device->func->resource_addr(device, 1); 1327 info->gpuPhysInstAddr = device->func->resource_addr(device, 3); 1328 info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev); 1329 info->maxUserVa = TASK_SIZE; 1330 info->pciConfigMirrorBase = 0x088000; 1331 info->pciConfigMirrorSize = 0x001000; 1332 r535_gsp_acpi_info(gsp, &info->acpiMethodData); 1333 1334 return nvkm_gsp_rpc_wr(gsp, info, false); 1335 } 1336 1337 static int 1338 r535_gsp_msg_os_error_log(void *priv, u32 fn, void *repv, u32 repc) 1339 { 1340 struct nvkm_gsp *gsp = priv; 1341 struct nvkm_subdev *subdev = &gsp->subdev; 1342 rpc_os_error_log_v17_00 *msg = repv; 1343 1344 if (WARN_ON(repc < sizeof(*msg))) 1345 return -EINVAL; 1346 1347 nvkm_error(subdev, "Xid:%d %s\n", msg->exceptType, msg->errString); 1348 return 0; 1349 } 1350 1351 static int 1352 r535_gsp_msg_rc_triggered(void *priv, u32 fn, void *repv, u32 repc) 1353 { 1354 rpc_rc_triggered_v17_02 *msg = repv; 1355 struct nvkm_gsp *gsp = priv; 1356 struct nvkm_subdev *subdev = &gsp->subdev; 1357 struct nvkm_chan *chan; 1358 unsigned long flags; 1359 1360 if (WARN_ON(repc < sizeof(*msg))) 1361 return -EINVAL; 1362 1363 nvkm_error(subdev, "rc engn:%08x chid:%d type:%d scope:%d part:%d\n", 1364 msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope, 1365 msg->partitionAttributionId); 1366 1367 chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid / 8, &flags); 1368 if (!chan) { 1369 nvkm_error(subdev, "rc chid:%d not found!\n", msg->chid); 1370 return 0; 1371 } 1372 1373 nvkm_chan_error(chan, false); 1374 nvkm_chan_put(&chan, flags); 1375 return 0; 1376 } 1377 1378 static int 1379 r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc) 1380 { 1381 struct nvkm_gsp *gsp = priv; 1382 struct nvkm_subdev *subdev = &gsp->subdev; 1383 1384 WARN_ON(repc != 0); 1385 1386 nvkm_error(subdev, "mmu fault queued\n"); 1387 return 0; 1388 } 1389 1390 static int 1391 r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc) 1392 { 1393 struct nvkm_gsp *gsp = priv; 1394 struct nvkm_gsp_client *client; 1395 struct nvkm_subdev *subdev = &gsp->subdev; 1396 rpc_post_event_v17_00 *msg = repv; 1397 1398 if (WARN_ON(repc < sizeof(*msg))) 1399 return -EINVAL; 1400 if (WARN_ON(repc != sizeof(*msg) + msg->eventDataSize)) 1401 return -EINVAL; 1402 1403 nvkm_debug(subdev, "event: %08x %08x %d %08x %08x %d %d\n", 1404 msg->hClient, msg->hEvent, msg->notifyIndex, msg->data, 1405 msg->status, msg->eventDataSize, msg->bNotifyList); 1406 1407 mutex_lock(&gsp->client_id.mutex); 1408 client = idr_find(&gsp->client_id.idr, msg->hClient & 0xffff); 1409 if (client) { 1410 struct nvkm_gsp_event *event; 1411 bool handled = false; 1412 1413 list_for_each_entry(event, &client->events, head) { 1414 if (event->object.handle == msg->hEvent) { 1415 event->func(event, msg->eventData, msg->eventDataSize); 1416 handled = true; 1417 } 1418 } 1419 1420 if (!handled) { 1421 nvkm_error(subdev, "event: cid 0x%08x event 0x%08x not found!\n", 1422 msg->hClient, msg->hEvent); 1423 } 1424 } else { 1425 nvkm_error(subdev, "event: cid 0x%08x not found!\n", msg->hClient); 1426 } 1427 mutex_unlock(&gsp->client_id.mutex); 1428 return 0; 1429 } 1430 1431 /** 1432 * r535_gsp_msg_run_cpu_sequencer() -- process I/O commands from the GSP 1433 * @priv: gsp pointer 1434 * @fn: function number (ignored) 1435 * @repv: pointer to libos print RPC 1436 * @repc: message size 1437 * 1438 * The GSP sequencer is a list of I/O commands that the GSP can send to 1439 * the driver to perform for various purposes. The most common usage is to 1440 * perform a special mid-initialization reset. 1441 */ 1442 static int 1443 r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc) 1444 { 1445 struct nvkm_gsp *gsp = priv; 1446 struct nvkm_subdev *subdev = &gsp->subdev; 1447 struct nvkm_device *device = subdev->device; 1448 rpc_run_cpu_sequencer_v17_00 *seq = repv; 1449 int ptr = 0, ret; 1450 1451 nvkm_debug(subdev, "seq: %08x %08x\n", seq->bufferSizeDWord, seq->cmdIndex); 1452 1453 while (ptr < seq->cmdIndex) { 1454 GSP_SEQUENCER_BUFFER_CMD *cmd = (void *)&seq->commandBuffer[ptr]; 1455 1456 ptr += 1; 1457 ptr += GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(cmd->opCode); 1458 1459 switch (cmd->opCode) { 1460 case GSP_SEQ_BUF_OPCODE_REG_WRITE: { 1461 u32 addr = cmd->payload.regWrite.addr; 1462 u32 data = cmd->payload.regWrite.val; 1463 1464 nvkm_trace(subdev, "seq wr32 %06x %08x\n", addr, data); 1465 nvkm_wr32(device, addr, data); 1466 } 1467 break; 1468 case GSP_SEQ_BUF_OPCODE_REG_MODIFY: { 1469 u32 addr = cmd->payload.regModify.addr; 1470 u32 mask = cmd->payload.regModify.mask; 1471 u32 data = cmd->payload.regModify.val; 1472 1473 nvkm_trace(subdev, "seq mask %06x %08x %08x\n", addr, mask, data); 1474 nvkm_mask(device, addr, mask, data); 1475 } 1476 break; 1477 case GSP_SEQ_BUF_OPCODE_REG_POLL: { 1478 u32 addr = cmd->payload.regPoll.addr; 1479 u32 mask = cmd->payload.regPoll.mask; 1480 u32 data = cmd->payload.regPoll.val; 1481 u32 usec = cmd->payload.regPoll.timeout ?: 4000000; 1482 //u32 error = cmd->payload.regPoll.error; 1483 1484 nvkm_trace(subdev, "seq poll %06x %08x %08x %d\n", addr, mask, data, usec); 1485 nvkm_rd32(device, addr); 1486 nvkm_usec(device, usec, 1487 if ((nvkm_rd32(device, addr) & mask) == data) 1488 break; 1489 ); 1490 } 1491 break; 1492 case GSP_SEQ_BUF_OPCODE_DELAY_US: { 1493 u32 usec = cmd->payload.delayUs.val; 1494 1495 nvkm_trace(subdev, "seq usec %d\n", usec); 1496 udelay(usec); 1497 } 1498 break; 1499 case GSP_SEQ_BUF_OPCODE_REG_STORE: { 1500 u32 addr = cmd->payload.regStore.addr; 1501 u32 slot = cmd->payload.regStore.index; 1502 1503 seq->regSaveArea[slot] = nvkm_rd32(device, addr); 1504 nvkm_trace(subdev, "seq save %08x -> %d: %08x\n", addr, slot, 1505 seq->regSaveArea[slot]); 1506 } 1507 break; 1508 case GSP_SEQ_BUF_OPCODE_CORE_RESET: 1509 nvkm_trace(subdev, "seq core reset\n"); 1510 nvkm_falcon_reset(&gsp->falcon); 1511 nvkm_falcon_mask(&gsp->falcon, 0x624, 0x00000080, 0x00000080); 1512 nvkm_falcon_wr32(&gsp->falcon, 0x10c, 0x00000000); 1513 break; 1514 case GSP_SEQ_BUF_OPCODE_CORE_START: 1515 nvkm_trace(subdev, "seq core start\n"); 1516 if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000040) 1517 nvkm_falcon_wr32(&gsp->falcon, 0x130, 0x00000002); 1518 else 1519 nvkm_falcon_wr32(&gsp->falcon, 0x100, 0x00000002); 1520 break; 1521 case GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT: 1522 nvkm_trace(subdev, "seq core wait halt\n"); 1523 nvkm_msec(device, 2000, 1524 if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000010) 1525 break; 1526 ); 1527 break; 1528 case GSP_SEQ_BUF_OPCODE_CORE_RESUME: { 1529 struct nvkm_sec2 *sec2 = device->sec2; 1530 u32 mbox0; 1531 1532 nvkm_trace(subdev, "seq core resume\n"); 1533 1534 ret = gsp->func->reset(gsp); 1535 if (WARN_ON(ret)) 1536 return ret; 1537 1538 nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr)); 1539 nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr)); 1540 1541 nvkm_falcon_start(&sec2->falcon); 1542 1543 if (nvkm_msec(device, 2000, 1544 if (nvkm_rd32(device, 0x1180f8) & 0x04000000) 1545 break; 1546 ) < 0) 1547 return -ETIMEDOUT; 1548 1549 mbox0 = nvkm_falcon_rd32(&sec2->falcon, 0x040); 1550 if (WARN_ON(mbox0)) { 1551 nvkm_error(&gsp->subdev, "seq core resume sec2: 0x%x\n", mbox0); 1552 return -EIO; 1553 } 1554 1555 nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version); 1556 1557 if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon))) 1558 return -EIO; 1559 } 1560 break; 1561 default: 1562 nvkm_error(subdev, "unknown sequencer opcode %08x\n", cmd->opCode); 1563 return -EINVAL; 1564 } 1565 } 1566 1567 return 0; 1568 } 1569 1570 static int 1571 r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) 1572 { 1573 struct nvkm_subdev *subdev = &gsp->subdev; 1574 struct nvkm_device *device = subdev->device; 1575 u32 wpr2_hi; 1576 int ret; 1577 1578 wpr2_hi = nvkm_rd32(device, 0x1fa828); 1579 if (!wpr2_hi) { 1580 nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n"); 1581 return 0; 1582 } 1583 1584 ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0); 1585 if (WARN_ON(ret)) 1586 return ret; 1587 1588 wpr2_hi = nvkm_rd32(device, 0x1fa828); 1589 if (WARN_ON(wpr2_hi)) 1590 return -EIO; 1591 1592 return 0; 1593 } 1594 1595 static int 1596 r535_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) 1597 { 1598 int ret; 1599 1600 ret = nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0); 1601 if (ret) 1602 return ret; 1603 1604 nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version); 1605 1606 if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon))) 1607 return -EIO; 1608 1609 return 0; 1610 } 1611 1612 static int 1613 r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp) 1614 { 1615 GspFwWprMeta *meta; 1616 int ret; 1617 1618 ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->wpr_meta); 1619 if (ret) 1620 return ret; 1621 1622 meta = gsp->wpr_meta.data; 1623 1624 meta->magic = GSP_FW_WPR_META_MAGIC; 1625 meta->revision = GSP_FW_WPR_META_REVISION; 1626 1627 meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr; 1628 meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size; 1629 1630 meta->sysmemAddrOfBootloader = gsp->boot.fw.addr; 1631 meta->sizeOfBootloader = gsp->boot.fw.size; 1632 meta->bootloaderCodeOffset = gsp->boot.code_offset; 1633 meta->bootloaderDataOffset = gsp->boot.data_offset; 1634 meta->bootloaderManifestOffset = gsp->boot.manifest_offset; 1635 1636 meta->sysmemAddrOfSignature = gsp->sig.addr; 1637 meta->sizeOfSignature = gsp->sig.size; 1638 1639 meta->gspFwRsvdStart = gsp->fb.heap.addr; 1640 meta->nonWprHeapOffset = gsp->fb.heap.addr; 1641 meta->nonWprHeapSize = gsp->fb.heap.size; 1642 meta->gspFwWprStart = gsp->fb.wpr2.addr; 1643 meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr; 1644 meta->gspFwHeapSize = gsp->fb.wpr2.heap.size; 1645 meta->gspFwOffset = gsp->fb.wpr2.elf.addr; 1646 meta->bootBinOffset = gsp->fb.wpr2.boot.addr; 1647 meta->frtsOffset = gsp->fb.wpr2.frts.addr; 1648 meta->frtsSize = gsp->fb.wpr2.frts.size; 1649 meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000); 1650 meta->fbSize = gsp->fb.size; 1651 meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr; 1652 meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size; 1653 meta->bootCount = 0; 1654 meta->partitionRpcAddr = 0; 1655 meta->partitionRpcRequestOffset = 0; 1656 meta->partitionRpcReplyOffset = 0; 1657 meta->verified = 0; 1658 return 0; 1659 } 1660 1661 static int 1662 r535_gsp_shared_init(struct nvkm_gsp *gsp) 1663 { 1664 struct { 1665 msgqTxHeader tx; 1666 msgqRxHeader rx; 1667 } *cmdq, *msgq; 1668 int ret, i; 1669 1670 gsp->shm.cmdq.size = 0x40000; 1671 gsp->shm.msgq.size = 0x40000; 1672 1673 gsp->shm.ptes.nr = (gsp->shm.cmdq.size + gsp->shm.msgq.size) >> GSP_PAGE_SHIFT; 1674 gsp->shm.ptes.nr += DIV_ROUND_UP(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE); 1675 gsp->shm.ptes.size = ALIGN(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE); 1676 1677 ret = nvkm_gsp_mem_ctor(gsp, gsp->shm.ptes.size + 1678 gsp->shm.cmdq.size + 1679 gsp->shm.msgq.size, 1680 &gsp->shm.mem); 1681 if (ret) 1682 return ret; 1683 1684 gsp->shm.ptes.ptr = gsp->shm.mem.data; 1685 gsp->shm.cmdq.ptr = (u8 *)gsp->shm.ptes.ptr + gsp->shm.ptes.size; 1686 gsp->shm.msgq.ptr = (u8 *)gsp->shm.cmdq.ptr + gsp->shm.cmdq.size; 1687 1688 for (i = 0; i < gsp->shm.ptes.nr; i++) 1689 gsp->shm.ptes.ptr[i] = gsp->shm.mem.addr + (i << GSP_PAGE_SHIFT); 1690 1691 cmdq = gsp->shm.cmdq.ptr; 1692 cmdq->tx.version = 0; 1693 cmdq->tx.size = gsp->shm.cmdq.size; 1694 cmdq->tx.entryOff = GSP_PAGE_SIZE; 1695 cmdq->tx.msgSize = GSP_PAGE_SIZE; 1696 cmdq->tx.msgCount = (cmdq->tx.size - cmdq->tx.entryOff) / cmdq->tx.msgSize; 1697 cmdq->tx.writePtr = 0; 1698 cmdq->tx.flags = 1; 1699 cmdq->tx.rxHdrOff = offsetof(typeof(*cmdq), rx.readPtr); 1700 1701 msgq = gsp->shm.msgq.ptr; 1702 1703 gsp->cmdq.cnt = cmdq->tx.msgCount; 1704 gsp->cmdq.wptr = &cmdq->tx.writePtr; 1705 gsp->cmdq.rptr = &msgq->rx.readPtr; 1706 gsp->msgq.cnt = cmdq->tx.msgCount; 1707 gsp->msgq.wptr = &msgq->tx.writePtr; 1708 gsp->msgq.rptr = &cmdq->rx.readPtr; 1709 return 0; 1710 } 1711 1712 static int 1713 r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume) 1714 { 1715 GSP_ARGUMENTS_CACHED *args; 1716 int ret; 1717 1718 if (!resume) { 1719 ret = r535_gsp_shared_init(gsp); 1720 if (ret) 1721 return ret; 1722 1723 ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs); 1724 if (ret) 1725 return ret; 1726 } 1727 1728 args = gsp->rmargs.data; 1729 args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr; 1730 args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr; 1731 args->messageQueueInitArguments.cmdQueueOffset = 1732 (u8 *)gsp->shm.cmdq.ptr - (u8 *)gsp->shm.mem.data; 1733 args->messageQueueInitArguments.statQueueOffset = 1734 (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data; 1735 1736 if (!resume) { 1737 args->srInitArguments.oldLevel = 0; 1738 args->srInitArguments.flags = 0; 1739 args->srInitArguments.bInPMTransition = 0; 1740 } else { 1741 args->srInitArguments.oldLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3; 1742 args->srInitArguments.flags = 0; 1743 args->srInitArguments.bInPMTransition = 1; 1744 } 1745 1746 return 0; 1747 } 1748 1749 static inline u64 1750 r535_gsp_libos_id8(const char *name) 1751 { 1752 u64 id = 0; 1753 1754 for (int i = 0; i < sizeof(id) && *name; i++, name++) 1755 id = (id << 8) | *name; 1756 1757 return id; 1758 } 1759 1760 /** 1761 * create_pte_array() - creates a PTE array of a physically contiguous buffer 1762 * @ptes: pointer to the array 1763 * @addr: base address of physically contiguous buffer (GSP_PAGE_SIZE aligned) 1764 * @size: size of the buffer 1765 * 1766 * GSP-RM sometimes expects physically-contiguous buffers to have an array of 1767 * "PTEs" for each page in that buffer. Although in theory that allows for 1768 * the buffer to be physically discontiguous, GSP-RM does not currently 1769 * support that. 1770 * 1771 * In this case, the PTEs are DMA addresses of each page of the buffer. Since 1772 * the buffer is physically contiguous, calculating all the PTEs is simple 1773 * math. 1774 * 1775 * See memdescGetPhysAddrsForGpu() 1776 */ 1777 static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size) 1778 { 1779 unsigned int num_pages = DIV_ROUND_UP_ULL(size, GSP_PAGE_SIZE); 1780 unsigned int i; 1781 1782 for (i = 0; i < num_pages; i++) 1783 ptes[i] = (u64)addr + (i << GSP_PAGE_SHIFT); 1784 } 1785 1786 /** 1787 * r535_gsp_libos_init() -- create the libos arguments structure 1788 * @gsp: gsp pointer 1789 * 1790 * The logging buffers are byte queues that contain encoded printf-like 1791 * messages from GSP-RM. They need to be decoded by a special application 1792 * that can parse the buffers. 1793 * 1794 * The 'loginit' buffer contains logs from early GSP-RM init and 1795 * exception dumps. The 'logrm' buffer contains the subsequent logs. Both are 1796 * written to directly by GSP-RM and can be any multiple of GSP_PAGE_SIZE. 1797 * 1798 * The physical address map for the log buffer is stored in the buffer 1799 * itself, starting with offset 1. Offset 0 contains the "put" pointer. 1800 * 1801 * The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is 1802 * configured for a larger page size (e.g. 64K pages), we need to give 1803 * the GSP an array of 4K pages. Fortunately, since the buffer is 1804 * physically contiguous, it's simple math to calculate the addresses. 1805 * 1806 * The buffers must be a multiple of GSP_PAGE_SIZE. GSP-RM also currently 1807 * ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the 1808 * buffers to be physically contiguous anyway. 1809 * 1810 * The memory allocated for the arguments must remain until the GSP sends the 1811 * init_done RPC. 1812 * 1813 * See _kgspInitLibosLoggingStructures (allocates memory for buffers) 1814 * See kgspSetupLibosInitArgs_IMPL (creates pLibosInitArgs[] array) 1815 */ 1816 static int 1817 r535_gsp_libos_init(struct nvkm_gsp *gsp) 1818 { 1819 LibosMemoryRegionInitArgument *args; 1820 int ret; 1821 1822 ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->libos); 1823 if (ret) 1824 return ret; 1825 1826 args = gsp->libos.data; 1827 1828 ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->loginit); 1829 if (ret) 1830 return ret; 1831 1832 args[0].id8 = r535_gsp_libos_id8("LOGINIT"); 1833 args[0].pa = gsp->loginit.addr; 1834 args[0].size = gsp->loginit.size; 1835 args[0].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; 1836 args[0].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; 1837 create_pte_array(gsp->loginit.data + sizeof(u64), gsp->loginit.addr, gsp->loginit.size); 1838 1839 ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logintr); 1840 if (ret) 1841 return ret; 1842 1843 args[1].id8 = r535_gsp_libos_id8("LOGINTR"); 1844 args[1].pa = gsp->logintr.addr; 1845 args[1].size = gsp->logintr.size; 1846 args[1].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; 1847 args[1].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; 1848 create_pte_array(gsp->logintr.data + sizeof(u64), gsp->logintr.addr, gsp->logintr.size); 1849 1850 ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logrm); 1851 if (ret) 1852 return ret; 1853 1854 args[2].id8 = r535_gsp_libos_id8("LOGRM"); 1855 args[2].pa = gsp->logrm.addr; 1856 args[2].size = gsp->logrm.size; 1857 args[2].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; 1858 args[2].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; 1859 create_pte_array(gsp->logrm.data + sizeof(u64), gsp->logrm.addr, gsp->logrm.size); 1860 1861 ret = r535_gsp_rmargs_init(gsp, false); 1862 if (ret) 1863 return ret; 1864 1865 args[3].id8 = r535_gsp_libos_id8("RMARGS"); 1866 args[3].pa = gsp->rmargs.addr; 1867 args[3].size = gsp->rmargs.size; 1868 args[3].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; 1869 args[3].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; 1870 return 0; 1871 } 1872 1873 void 1874 nvkm_gsp_sg_free(struct nvkm_device *device, struct sg_table *sgt) 1875 { 1876 struct scatterlist *sgl; 1877 int i; 1878 1879 dma_unmap_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0); 1880 1881 for_each_sgtable_sg(sgt, sgl, i) { 1882 struct page *page = sg_page(sgl); 1883 1884 __free_page(page); 1885 } 1886 1887 sg_free_table(sgt); 1888 } 1889 1890 int 1891 nvkm_gsp_sg(struct nvkm_device *device, u64 size, struct sg_table *sgt) 1892 { 1893 const u64 pages = DIV_ROUND_UP(size, PAGE_SIZE); 1894 struct scatterlist *sgl; 1895 int ret, i; 1896 1897 ret = sg_alloc_table(sgt, pages, GFP_KERNEL); 1898 if (ret) 1899 return ret; 1900 1901 for_each_sgtable_sg(sgt, sgl, i) { 1902 struct page *page = alloc_page(GFP_KERNEL); 1903 1904 if (!page) { 1905 nvkm_gsp_sg_free(device, sgt); 1906 return -ENOMEM; 1907 } 1908 1909 sg_set_page(sgl, page, PAGE_SIZE, 0); 1910 } 1911 1912 ret = dma_map_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0); 1913 if (ret) 1914 nvkm_gsp_sg_free(device, sgt); 1915 1916 return ret; 1917 } 1918 1919 static void 1920 nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3) 1921 { 1922 nvkm_gsp_sg_free(gsp->subdev.device, &rx3->lvl2); 1923 nvkm_gsp_mem_dtor(gsp, &rx3->lvl1); 1924 nvkm_gsp_mem_dtor(gsp, &rx3->lvl0); 1925 } 1926 1927 /** 1928 * nvkm_gsp_radix3_sg - build a radix3 table from a S/G list 1929 * @gsp: gsp pointer 1930 * @sgt: S/G list to traverse 1931 * @size: size of the image, in bytes 1932 * @rx3: radix3 array to update 1933 * 1934 * The GSP uses a three-level page table, called radix3, to map the firmware. 1935 * Each 64-bit "pointer" in the table is either the bus address of an entry in 1936 * the next table (for levels 0 and 1) or the bus address of the next page in 1937 * the GSP firmware image itself. 1938 * 1939 * Level 0 contains a single entry in one page that points to the first page 1940 * of level 1. 1941 * 1942 * Level 1, since it's also only one page in size, contains up to 512 entries, 1943 * one for each page in Level 2. 1944 * 1945 * Level 2 can be up to 512 pages in size, and each of those entries points to 1946 * the next page of the firmware image. Since there can be up to 512*512 1947 * pages, that limits the size of the firmware to 512*512*GSP_PAGE_SIZE = 1GB. 1948 * 1949 * Internally, the GSP has its window into system memory, but the base 1950 * physical address of the aperture is not 0. In fact, it varies depending on 1951 * the GPU architecture. Since the GPU is a PCI device, this window is 1952 * accessed via DMA and is therefore bound by IOMMU translation. The end 1953 * result is that GSP-RM must translate the bus addresses in the table to GSP 1954 * physical addresses. All this should happen transparently. 1955 * 1956 * Returns 0 on success, or negative error code 1957 * 1958 * See kgspCreateRadix3_IMPL 1959 */ 1960 static int 1961 nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size, 1962 struct nvkm_gsp_radix3 *rx3) 1963 { 1964 struct sg_dma_page_iter sg_dma_iter; 1965 struct scatterlist *sg; 1966 size_t bufsize; 1967 u64 *pte; 1968 int ret, i, page_idx = 0; 1969 1970 ret = nvkm_gsp_mem_ctor(gsp, GSP_PAGE_SIZE, &rx3->lvl0); 1971 if (ret) 1972 return ret; 1973 1974 ret = nvkm_gsp_mem_ctor(gsp, GSP_PAGE_SIZE, &rx3->lvl1); 1975 if (ret) 1976 goto lvl1_fail; 1977 1978 // Allocate level 2 1979 bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE); 1980 ret = nvkm_gsp_sg(gsp->subdev.device, bufsize, &rx3->lvl2); 1981 if (ret) 1982 goto lvl2_fail; 1983 1984 // Write the bus address of level 1 to level 0 1985 pte = rx3->lvl0.data; 1986 *pte = rx3->lvl1.addr; 1987 1988 // Write the bus address of each page in level 2 to level 1 1989 pte = rx3->lvl1.data; 1990 for_each_sgtable_dma_page(&rx3->lvl2, &sg_dma_iter, 0) 1991 *pte++ = sg_page_iter_dma_address(&sg_dma_iter); 1992 1993 // Finally, write the bus address of each page in sgt to level 2 1994 for_each_sgtable_sg(&rx3->lvl2, sg, i) { 1995 void *sgl_end; 1996 1997 pte = sg_virt(sg); 1998 sgl_end = (void *)pte + sg->length; 1999 2000 for_each_sgtable_dma_page(sgt, &sg_dma_iter, page_idx) { 2001 *pte++ = sg_page_iter_dma_address(&sg_dma_iter); 2002 page_idx++; 2003 2004 // Go to the next scatterlist for level 2 if we've reached the end 2005 if ((void *)pte >= sgl_end) 2006 break; 2007 } 2008 } 2009 2010 if (ret) { 2011 lvl2_fail: 2012 nvkm_gsp_mem_dtor(gsp, &rx3->lvl1); 2013 lvl1_fail: 2014 nvkm_gsp_mem_dtor(gsp, &rx3->lvl0); 2015 } 2016 2017 return ret; 2018 } 2019 2020 int 2021 r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) 2022 { 2023 u32 mbox0 = 0xff, mbox1 = 0xff; 2024 int ret; 2025 2026 if (!gsp->running) 2027 return 0; 2028 2029 if (suspend) { 2030 GspFwWprMeta *meta = gsp->wpr_meta.data; 2031 u64 len = meta->gspFwWprEnd - meta->gspFwWprStart; 2032 GspFwSRMeta *sr; 2033 2034 ret = nvkm_gsp_sg(gsp->subdev.device, len, &gsp->sr.sgt); 2035 if (ret) 2036 return ret; 2037 2038 ret = nvkm_gsp_radix3_sg(gsp, &gsp->sr.sgt, len, &gsp->sr.radix3); 2039 if (ret) 2040 return ret; 2041 2042 ret = nvkm_gsp_mem_ctor(gsp, sizeof(*sr), &gsp->sr.meta); 2043 if (ret) 2044 return ret; 2045 2046 sr = gsp->sr.meta.data; 2047 sr->magic = GSP_FW_SR_META_MAGIC; 2048 sr->revision = GSP_FW_SR_META_REVISION; 2049 sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.lvl0.addr; 2050 sr->sizeOfSuspendResumeData = len; 2051 2052 mbox0 = lower_32_bits(gsp->sr.meta.addr); 2053 mbox1 = upper_32_bits(gsp->sr.meta.addr); 2054 } 2055 2056 ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend); 2057 if (WARN_ON(ret)) 2058 return ret; 2059 2060 nvkm_msec(gsp->subdev.device, 2000, 2061 if (nvkm_falcon_rd32(&gsp->falcon, 0x040) & 0x80000000) 2062 break; 2063 ); 2064 2065 nvkm_falcon_reset(&gsp->falcon); 2066 2067 ret = nvkm_gsp_fwsec_sb(gsp); 2068 WARN_ON(ret); 2069 2070 ret = r535_gsp_booter_unload(gsp, mbox0, mbox1); 2071 WARN_ON(ret); 2072 2073 gsp->running = false; 2074 return 0; 2075 } 2076 2077 int 2078 r535_gsp_init(struct nvkm_gsp *gsp) 2079 { 2080 u32 mbox0, mbox1; 2081 int ret; 2082 2083 if (!gsp->sr.meta.data) { 2084 mbox0 = lower_32_bits(gsp->wpr_meta.addr); 2085 mbox1 = upper_32_bits(gsp->wpr_meta.addr); 2086 } else { 2087 r535_gsp_rmargs_init(gsp, true); 2088 2089 mbox0 = lower_32_bits(gsp->sr.meta.addr); 2090 mbox1 = upper_32_bits(gsp->sr.meta.addr); 2091 } 2092 2093 /* Execute booter to handle (eventually...) booting GSP-RM. */ 2094 ret = r535_gsp_booter_load(gsp, mbox0, mbox1); 2095 if (WARN_ON(ret)) 2096 goto done; 2097 2098 ret = r535_gsp_rpc_poll(gsp, NV_VGPU_MSG_EVENT_GSP_INIT_DONE); 2099 if (ret) 2100 goto done; 2101 2102 gsp->running = true; 2103 2104 done: 2105 if (gsp->sr.meta.data) { 2106 nvkm_gsp_mem_dtor(gsp, &gsp->sr.meta); 2107 nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3); 2108 nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt); 2109 return ret; 2110 } 2111 2112 if (ret == 0) 2113 ret = r535_gsp_postinit(gsp); 2114 2115 return ret; 2116 } 2117 2118 static int 2119 r535_gsp_rm_boot_ctor(struct nvkm_gsp *gsp) 2120 { 2121 const struct firmware *fw = gsp->fws.bl; 2122 const struct nvfw_bin_hdr *hdr; 2123 RM_RISCV_UCODE_DESC *desc; 2124 int ret; 2125 2126 hdr = nvfw_bin_hdr(&gsp->subdev, fw->data); 2127 desc = (void *)fw->data + hdr->header_offset; 2128 2129 ret = nvkm_gsp_mem_ctor(gsp, hdr->data_size, &gsp->boot.fw); 2130 if (ret) 2131 return ret; 2132 2133 memcpy(gsp->boot.fw.data, fw->data + hdr->data_offset, hdr->data_size); 2134 2135 gsp->boot.code_offset = desc->monitorCodeOffset; 2136 gsp->boot.data_offset = desc->monitorDataOffset; 2137 gsp->boot.manifest_offset = desc->manifestOffset; 2138 gsp->boot.app_version = desc->appVersion; 2139 return 0; 2140 } 2141 2142 static const struct nvkm_firmware_func 2143 r535_gsp_fw = { 2144 .type = NVKM_FIRMWARE_IMG_SGT, 2145 }; 2146 2147 static int 2148 r535_gsp_elf_section(struct nvkm_gsp *gsp, const char *name, const u8 **pdata, u64 *psize) 2149 { 2150 const u8 *img = gsp->fws.rm->data; 2151 const struct elf64_hdr *ehdr = (const struct elf64_hdr *)img; 2152 const struct elf64_shdr *shdr = (const struct elf64_shdr *)&img[ehdr->e_shoff]; 2153 const char *names = &img[shdr[ehdr->e_shstrndx].sh_offset]; 2154 2155 for (int i = 0; i < ehdr->e_shnum; i++, shdr++) { 2156 if (!strcmp(&names[shdr->sh_name], name)) { 2157 *pdata = &img[shdr->sh_offset]; 2158 *psize = shdr->sh_size; 2159 return 0; 2160 } 2161 } 2162 2163 nvkm_error(&gsp->subdev, "section '%s' not found\n", name); 2164 return -ENOENT; 2165 } 2166 2167 static void 2168 r535_gsp_dtor_fws(struct nvkm_gsp *gsp) 2169 { 2170 nvkm_firmware_put(gsp->fws.bl); 2171 gsp->fws.bl = NULL; 2172 nvkm_firmware_put(gsp->fws.booter.unload); 2173 gsp->fws.booter.unload = NULL; 2174 nvkm_firmware_put(gsp->fws.booter.load); 2175 gsp->fws.booter.load = NULL; 2176 nvkm_firmware_put(gsp->fws.rm); 2177 gsp->fws.rm = NULL; 2178 } 2179 2180 void 2181 r535_gsp_dtor(struct nvkm_gsp *gsp) 2182 { 2183 idr_destroy(&gsp->client_id.idr); 2184 mutex_destroy(&gsp->client_id.mutex); 2185 2186 nvkm_gsp_radix3_dtor(gsp, &gsp->radix3); 2187 nvkm_gsp_mem_dtor(gsp, &gsp->sig); 2188 nvkm_firmware_dtor(&gsp->fw); 2189 2190 nvkm_falcon_fw_dtor(&gsp->booter.unload); 2191 nvkm_falcon_fw_dtor(&gsp->booter.load); 2192 2193 mutex_destroy(&gsp->msgq.mutex); 2194 mutex_destroy(&gsp->cmdq.mutex); 2195 2196 r535_gsp_dtor_fws(gsp); 2197 2198 nvkm_gsp_mem_dtor(gsp, &gsp->rmargs); 2199 nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta); 2200 nvkm_gsp_mem_dtor(gsp, &gsp->shm.mem); 2201 nvkm_gsp_mem_dtor(gsp, &gsp->loginit); 2202 nvkm_gsp_mem_dtor(gsp, &gsp->logintr); 2203 nvkm_gsp_mem_dtor(gsp, &gsp->logrm); 2204 } 2205 2206 int 2207 r535_gsp_oneinit(struct nvkm_gsp *gsp) 2208 { 2209 struct nvkm_device *device = gsp->subdev.device; 2210 const u8 *data; 2211 u64 size; 2212 int ret; 2213 2214 mutex_init(&gsp->cmdq.mutex); 2215 mutex_init(&gsp->msgq.mutex); 2216 2217 ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load, 2218 &device->sec2->falcon, &gsp->booter.load); 2219 if (ret) 2220 return ret; 2221 2222 ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload, 2223 &device->sec2->falcon, &gsp->booter.unload); 2224 if (ret) 2225 return ret; 2226 2227 /* Load GSP firmware from ELF image into DMA-accessible memory. */ 2228 ret = r535_gsp_elf_section(gsp, ".fwimage", &data, &size); 2229 if (ret) 2230 return ret; 2231 2232 ret = nvkm_firmware_ctor(&r535_gsp_fw, "gsp-rm", device, data, size, &gsp->fw); 2233 if (ret) 2234 return ret; 2235 2236 /* Load relevant signature from ELF image. */ 2237 ret = r535_gsp_elf_section(gsp, gsp->func->sig_section, &data, &size); 2238 if (ret) 2239 return ret; 2240 2241 ret = nvkm_gsp_mem_ctor(gsp, ALIGN(size, 256), &gsp->sig); 2242 if (ret) 2243 return ret; 2244 2245 memcpy(gsp->sig.data, data, size); 2246 2247 /* Build radix3 page table for ELF image. */ 2248 ret = nvkm_gsp_radix3_sg(gsp, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3); 2249 if (ret) 2250 return ret; 2251 2252 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER, 2253 r535_gsp_msg_run_cpu_sequencer, gsp); 2254 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_POST_EVENT, r535_gsp_msg_post_event, gsp); 2255 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED, 2256 r535_gsp_msg_rc_triggered, gsp); 2257 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED, 2258 r535_gsp_msg_mmu_fault_queued, gsp); 2259 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp); 2260 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE, NULL, NULL); 2261 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, NULL, NULL); 2262 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL); 2263 ret = r535_gsp_rm_boot_ctor(gsp); 2264 if (ret) 2265 return ret; 2266 2267 /* Release FW images - we've copied them to DMA buffers now. */ 2268 r535_gsp_dtor_fws(gsp); 2269 2270 /* Calculate FB layout. */ 2271 gsp->fb.wpr2.frts.size = 0x100000; 2272 gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size; 2273 2274 gsp->fb.wpr2.boot.size = gsp->boot.fw.size; 2275 gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000); 2276 2277 gsp->fb.wpr2.elf.size = gsp->fw.len; 2278 gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000); 2279 2280 { 2281 u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30); 2282 2283 gsp->fb.wpr2.heap.size = 2284 gsp->func->wpr_heap.os_carveout_size + 2285 gsp->func->wpr_heap.base_size + 2286 ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) + 2287 ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20); 2288 2289 gsp->fb.wpr2.heap.size = max(gsp->fb.wpr2.heap.size, gsp->func->wpr_heap.min_size); 2290 } 2291 2292 gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000); 2293 gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000); 2294 2295 gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000); 2296 gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr; 2297 2298 gsp->fb.heap.size = 0x100000; 2299 gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size; 2300 2301 ret = nvkm_gsp_fwsec_frts(gsp); 2302 if (WARN_ON(ret)) 2303 return ret; 2304 2305 ret = r535_gsp_libos_init(gsp); 2306 if (WARN_ON(ret)) 2307 return ret; 2308 2309 ret = r535_gsp_wpr_meta_init(gsp); 2310 if (WARN_ON(ret)) 2311 return ret; 2312 2313 ret = r535_gsp_rpc_set_system_info(gsp); 2314 if (WARN_ON(ret)) 2315 return ret; 2316 2317 ret = r535_gsp_rpc_set_registry(gsp); 2318 if (WARN_ON(ret)) 2319 return ret; 2320 2321 /* Reset GSP into RISC-V mode. */ 2322 ret = gsp->func->reset(gsp); 2323 if (WARN_ON(ret)) 2324 return ret; 2325 2326 nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr)); 2327 nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr)); 2328 2329 mutex_init(&gsp->client_id.mutex); 2330 idr_init(&gsp->client_id.idr); 2331 return 0; 2332 } 2333 2334 static int 2335 r535_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver, 2336 const struct firmware **pfw) 2337 { 2338 char fwname[64]; 2339 2340 snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver); 2341 return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw); 2342 } 2343 2344 int 2345 r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) 2346 { 2347 struct nvkm_subdev *subdev = &gsp->subdev; 2348 int ret; 2349 bool enable_gsp = fwif->enable; 2350 2351 #if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT) 2352 enable_gsp = true; 2353 #endif 2354 if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp)) 2355 return -EINVAL; 2356 2357 if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) || 2358 (ret = r535_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load)) || 2359 (ret = r535_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload)) || 2360 (ret = r535_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl))) { 2361 r535_gsp_dtor_fws(gsp); 2362 return ret; 2363 } 2364 2365 return 0; 2366 } 2367 2368 #define NVKM_GSP_FIRMWARE(chip) \ 2369 MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-535.113.01.bin"); \ 2370 MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-535.113.01.bin"); \ 2371 MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-535.113.01.bin"); \ 2372 MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-535.113.01.bin") 2373 2374 NVKM_GSP_FIRMWARE(tu102); 2375 NVKM_GSP_FIRMWARE(tu104); 2376 NVKM_GSP_FIRMWARE(tu106); 2377 2378 NVKM_GSP_FIRMWARE(tu116); 2379 NVKM_GSP_FIRMWARE(tu117); 2380 2381 NVKM_GSP_FIRMWARE(ga100); 2382 2383 NVKM_GSP_FIRMWARE(ga102); 2384 NVKM_GSP_FIRMWARE(ga103); 2385 NVKM_GSP_FIRMWARE(ga104); 2386 NVKM_GSP_FIRMWARE(ga106); 2387 NVKM_GSP_FIRMWARE(ga107); 2388 2389 NVKM_GSP_FIRMWARE(ad102); 2390 NVKM_GSP_FIRMWARE(ad103); 2391 NVKM_GSP_FIRMWARE(ad104); 2392 NVKM_GSP_FIRMWARE(ad106); 2393 NVKM_GSP_FIRMWARE(ad107); 2394