1 /* 2 * Copyright 2023 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "priv.h" 23 24 #include <core/pci.h> 25 #include <subdev/timer.h> 26 #include <subdev/vfn.h> 27 #include <engine/fifo/chan.h> 28 #include <engine/sec2.h> 29 30 #include <nvfw/fw.h> 31 32 #include <nvrm/nvtypes.h> 33 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h> 34 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h> 35 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h> 36 #include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h> 37 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h> 38 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h> 39 #include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h> 40 #include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h> 41 #include <nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h> 42 #include <nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h> 43 #include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h> 44 #include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h> 45 #include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h> 46 #include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h> 47 #include <nvrm/535.113.01/nvidia/generated/g_allclasses.h> 48 #include <nvrm/535.113.01/nvidia/generated/g_os_nvoc.h> 49 #include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h> 50 #include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h> 51 #include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h> 52 #include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h> 53 #include <nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h> 54 #include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h> 55 56 #include <linux/acpi.h> 57 58 #define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE 59 #define GSP_MSG_MAX_SIZE GSP_PAGE_MIN_SIZE * 16 60 61 struct r535_gsp_msg { 62 u8 auth_tag_buffer[16]; 63 u8 aad_buffer[16]; 64 u32 checksum; 65 u32 sequence; 66 u32 elem_count; 67 u32 pad; 68 u8 data[]; 69 }; 70 71 #define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data) 72 73 static void * 74 r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime) 75 { 76 struct r535_gsp_msg *mqe; 77 u32 size, rptr = *gsp->msgq.rptr; 78 int used; 79 u8 *msg; 80 u32 len; 81 82 size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + repc, GSP_PAGE_SIZE); 83 if (WARN_ON(!size || size >= gsp->msgq.cnt)) 84 return ERR_PTR(-EINVAL); 85 86 do { 87 u32 wptr = *gsp->msgq.wptr; 88 89 used = wptr + gsp->msgq.cnt - rptr; 90 if (used >= gsp->msgq.cnt) 91 used -= gsp->msgq.cnt; 92 if (used >= size) 93 break; 94 95 usleep_range(1, 2); 96 } while (--(*ptime)); 97 98 if (WARN_ON(!*ptime)) 99 return ERR_PTR(-ETIMEDOUT); 100 101 mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + rptr * 0x1000); 102 103 if (prepc) { 104 *prepc = (used * GSP_PAGE_SIZE) - sizeof(*mqe); 105 return mqe->data; 106 } 107 108 msg = kvmalloc(repc, GFP_KERNEL); 109 if (!msg) 110 return ERR_PTR(-ENOMEM); 111 112 len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe); 113 len = min_t(u32, repc, len); 114 memcpy(msg, mqe->data, len); 115 116 rptr += DIV_ROUND_UP(len, GSP_PAGE_SIZE); 117 if (rptr == gsp->msgq.cnt) 118 rptr = 0; 119 120 repc -= len; 121 122 if (repc) { 123 mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000); 124 memcpy(msg + len, mqe, repc); 125 126 rptr += DIV_ROUND_UP(repc, GSP_PAGE_SIZE); 127 } 128 129 mb(); 130 (*gsp->msgq.rptr) = rptr; 131 return msg; 132 } 133 134 static void * 135 r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 repc, int *ptime) 136 { 137 return r535_gsp_msgq_wait(gsp, repc, NULL, ptime); 138 } 139 140 static int 141 r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv) 142 { 143 struct r535_gsp_msg *cmd = container_of(argv, typeof(*cmd), data); 144 struct r535_gsp_msg *cqe; 145 u32 argc = cmd->checksum; 146 u64 *ptr = (void *)cmd; 147 u64 *end; 148 u64 csum = 0; 149 int free, time = 1000000; 150 u32 wptr, size; 151 u32 off = 0; 152 153 argc = ALIGN(GSP_MSG_HDR_SIZE + argc, GSP_PAGE_SIZE); 154 155 end = (u64 *)((char *)ptr + argc); 156 cmd->pad = 0; 157 cmd->checksum = 0; 158 cmd->sequence = gsp->cmdq.seq++; 159 cmd->elem_count = DIV_ROUND_UP(argc, 0x1000); 160 161 while (ptr < end) 162 csum ^= *ptr++; 163 164 cmd->checksum = upper_32_bits(csum) ^ lower_32_bits(csum); 165 166 wptr = *gsp->cmdq.wptr; 167 do { 168 do { 169 free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1; 170 if (free >= gsp->cmdq.cnt) 171 free -= gsp->cmdq.cnt; 172 if (free >= 1) 173 break; 174 175 usleep_range(1, 2); 176 } while(--time); 177 178 if (WARN_ON(!time)) { 179 kvfree(cmd); 180 return -ETIMEDOUT; 181 } 182 183 cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000); 184 size = min_t(u32, argc, (gsp->cmdq.cnt - wptr) * GSP_PAGE_SIZE); 185 memcpy(cqe, (u8 *)cmd + off, size); 186 187 wptr += DIV_ROUND_UP(size, 0x1000); 188 if (wptr == gsp->cmdq.cnt) 189 wptr = 0; 190 191 off += size; 192 argc -= size; 193 } while(argc); 194 195 nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr); 196 wmb(); 197 (*gsp->cmdq.wptr) = wptr; 198 mb(); 199 200 nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000); 201 202 kvfree(cmd); 203 return 0; 204 } 205 206 static void * 207 r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 argc) 208 { 209 struct r535_gsp_msg *cmd; 210 u32 size = GSP_MSG_HDR_SIZE + argc; 211 212 size = ALIGN(size, GSP_MSG_MIN_SIZE); 213 cmd = kvzalloc(size, GFP_KERNEL); 214 if (!cmd) 215 return ERR_PTR(-ENOMEM); 216 217 cmd->checksum = argc; 218 return cmd->data; 219 } 220 221 struct nvfw_gsp_rpc { 222 u32 header_version; 223 u32 signature; 224 u32 length; 225 u32 function; 226 u32 rpc_result; 227 u32 rpc_result_private; 228 u32 sequence; 229 union { 230 u32 spare; 231 u32 cpuRmGfid; 232 }; 233 u8 data[]; 234 }; 235 236 static void 237 r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg) 238 { 239 kvfree(msg); 240 } 241 242 static void 243 r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl) 244 { 245 if (gsp->subdev.debug >= lvl) { 246 nvkm_printk__(&gsp->subdev, lvl, info, 247 "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n", 248 msg->function, msg->length, msg->length - sizeof(*msg), 249 msg->rpc_result, msg->rpc_result_private); 250 print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1, 251 msg->data, msg->length - sizeof(*msg), true); 252 } 253 } 254 255 static struct nvfw_gsp_rpc * 256 r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 repc) 257 { 258 struct nvkm_subdev *subdev = &gsp->subdev; 259 struct nvfw_gsp_rpc *msg; 260 int time = 4000000, i; 261 u32 size; 262 263 retry: 264 msg = r535_gsp_msgq_wait(gsp, sizeof(*msg), &size, &time); 265 if (IS_ERR_OR_NULL(msg)) 266 return msg; 267 268 msg = r535_gsp_msgq_recv(gsp, msg->length, &time); 269 if (IS_ERR_OR_NULL(msg)) 270 return msg; 271 272 if (msg->rpc_result) { 273 r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR); 274 r535_gsp_msg_done(gsp, msg); 275 return ERR_PTR(-EINVAL); 276 } 277 278 r535_gsp_msg_dump(gsp, msg, NV_DBG_TRACE); 279 280 if (fn && msg->function == fn) { 281 if (repc) { 282 if (msg->length < sizeof(*msg) + repc) { 283 nvkm_error(subdev, "msg len %d < %zd\n", 284 msg->length, sizeof(*msg) + repc); 285 r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR); 286 r535_gsp_msg_done(gsp, msg); 287 return ERR_PTR(-EIO); 288 } 289 290 return msg; 291 } 292 293 r535_gsp_msg_done(gsp, msg); 294 return NULL; 295 } 296 297 for (i = 0; i < gsp->msgq.ntfy_nr; i++) { 298 struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i]; 299 300 if (ntfy->fn == msg->function) { 301 ntfy->func(ntfy->priv, ntfy->fn, msg->data, msg->length - sizeof(*msg)); 302 break; 303 } 304 } 305 306 if (i == gsp->msgq.ntfy_nr) 307 r535_gsp_msg_dump(gsp, msg, NV_DBG_WARN); 308 309 r535_gsp_msg_done(gsp, msg); 310 if (fn) 311 goto retry; 312 313 if (*gsp->msgq.rptr != *gsp->msgq.wptr) 314 goto retry; 315 316 return NULL; 317 } 318 319 static int 320 r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv) 321 { 322 int ret = 0; 323 324 mutex_lock(&gsp->msgq.mutex); 325 if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) { 326 ret = -ENOSPC; 327 } else { 328 gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn; 329 gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func; 330 gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv; 331 gsp->msgq.ntfy_nr++; 332 } 333 mutex_unlock(&gsp->msgq.mutex); 334 return ret; 335 } 336 337 static int 338 r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn) 339 { 340 void *repv; 341 342 mutex_lock(&gsp->cmdq.mutex); 343 repv = r535_gsp_msg_recv(gsp, fn, 0); 344 mutex_unlock(&gsp->cmdq.mutex); 345 if (IS_ERR(repv)) 346 return PTR_ERR(repv); 347 348 return 0; 349 } 350 351 static void * 352 r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc) 353 { 354 struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data); 355 struct nvfw_gsp_rpc *msg; 356 u32 fn = rpc->function; 357 void *repv = NULL; 358 int ret; 359 360 if (gsp->subdev.debug >= NV_DBG_TRACE) { 361 nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function, 362 rpc->length, rpc->length - sizeof(*rpc)); 363 print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1, 364 rpc->data, rpc->length - sizeof(*rpc), true); 365 } 366 367 ret = r535_gsp_cmdq_push(gsp, rpc); 368 if (ret) { 369 mutex_unlock(&gsp->cmdq.mutex); 370 return ERR_PTR(ret); 371 } 372 373 if (wait) { 374 msg = r535_gsp_msg_recv(gsp, fn, repc); 375 if (!IS_ERR_OR_NULL(msg)) 376 repv = msg->data; 377 else 378 repv = msg; 379 } 380 381 return repv; 382 } 383 384 static void 385 r535_gsp_event_dtor(struct nvkm_gsp_event *event) 386 { 387 struct nvkm_gsp_device *device = event->device; 388 struct nvkm_gsp_client *client = device->object.client; 389 struct nvkm_gsp *gsp = client->gsp; 390 391 mutex_lock(&gsp->client_id.mutex); 392 if (event->func) { 393 list_del(&event->head); 394 event->func = NULL; 395 } 396 mutex_unlock(&gsp->client_id.mutex); 397 398 nvkm_gsp_rm_free(&event->object); 399 event->device = NULL; 400 } 401 402 static int 403 r535_gsp_device_event_get(struct nvkm_gsp_event *event) 404 { 405 struct nvkm_gsp_device *device = event->device; 406 NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl; 407 408 ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice, 409 NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl)); 410 if (IS_ERR(ctrl)) 411 return PTR_ERR(ctrl); 412 413 ctrl->event = event->id; 414 ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; 415 return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl); 416 } 417 418 static int 419 r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id, 420 nvkm_gsp_event_func func, struct nvkm_gsp_event *event) 421 { 422 struct nvkm_gsp_client *client = device->object.client; 423 struct nvkm_gsp *gsp = client->gsp; 424 NV0005_ALLOC_PARAMETERS *args; 425 int ret; 426 427 args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle, 428 NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args), 429 &event->object); 430 if (IS_ERR(args)) 431 return PTR_ERR(args); 432 433 args->hParentClient = client->object.handle; 434 args->hSrcResource = 0; 435 args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX; 436 args->notifyIndex = NV01_EVENT_CLIENT_RM | id; 437 args->data = NULL; 438 439 ret = nvkm_gsp_rm_alloc_wr(&event->object, args); 440 if (ret) 441 return ret; 442 443 event->device = device; 444 event->id = id; 445 446 ret = r535_gsp_device_event_get(event); 447 if (ret) { 448 nvkm_gsp_event_dtor(event); 449 return ret; 450 } 451 452 mutex_lock(&gsp->client_id.mutex); 453 event->func = func; 454 list_add(&event->head, &client->events); 455 mutex_unlock(&gsp->client_id.mutex); 456 return 0; 457 } 458 459 static void 460 r535_gsp_device_dtor(struct nvkm_gsp_device *device) 461 { 462 nvkm_gsp_rm_free(&device->subdevice); 463 nvkm_gsp_rm_free(&device->object); 464 } 465 466 static int 467 r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device) 468 { 469 NV2080_ALLOC_PARAMETERS *args; 470 471 return nvkm_gsp_rm_alloc(&device->object, 0x5d1d0000, NV20_SUBDEVICE_0, sizeof(*args), 472 &device->subdevice); 473 } 474 475 static int 476 r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device) 477 { 478 NV0080_ALLOC_PARAMETERS *args; 479 int ret; 480 481 args = nvkm_gsp_rm_alloc_get(&client->object, 0xde1d0000, NV01_DEVICE_0, sizeof(*args), 482 &device->object); 483 if (IS_ERR(args)) 484 return PTR_ERR(args); 485 486 args->hClientShare = client->object.handle; 487 488 ret = nvkm_gsp_rm_alloc_wr(&device->object, args); 489 if (ret) 490 return ret; 491 492 ret = r535_gsp_subdevice_ctor(device); 493 if (ret) 494 nvkm_gsp_rm_free(&device->object); 495 496 return ret; 497 } 498 499 static void 500 r535_gsp_client_dtor(struct nvkm_gsp_client *client) 501 { 502 struct nvkm_gsp *gsp = client->gsp; 503 504 nvkm_gsp_rm_free(&client->object); 505 506 mutex_lock(&gsp->client_id.mutex); 507 idr_remove(&gsp->client_id.idr, client->object.handle & 0xffff); 508 mutex_unlock(&gsp->client_id.mutex); 509 510 client->gsp = NULL; 511 } 512 513 static int 514 r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client) 515 { 516 NV0000_ALLOC_PARAMETERS *args; 517 int ret; 518 519 mutex_lock(&gsp->client_id.mutex); 520 ret = idr_alloc(&gsp->client_id.idr, client, 0, 0xffff + 1, GFP_KERNEL); 521 mutex_unlock(&gsp->client_id.mutex); 522 if (ret < 0) 523 return ret; 524 525 client->gsp = gsp; 526 client->object.client = client; 527 INIT_LIST_HEAD(&client->events); 528 529 args = nvkm_gsp_rm_alloc_get(&client->object, 0xc1d00000 | ret, NV01_ROOT, sizeof(*args), 530 &client->object); 531 if (IS_ERR(args)) { 532 r535_gsp_client_dtor(client); 533 return ret; 534 } 535 536 args->hClient = client->object.handle; 537 args->processID = ~0; 538 539 ret = nvkm_gsp_rm_alloc_wr(&client->object, args); 540 if (ret) { 541 r535_gsp_client_dtor(client); 542 return ret; 543 } 544 545 return 0; 546 } 547 548 static int 549 r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object) 550 { 551 struct nvkm_gsp_client *client = object->client; 552 struct nvkm_gsp *gsp = client->gsp; 553 rpc_free_v03_00 *rpc; 554 555 nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n", 556 client->object.handle, object->handle); 557 558 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc)); 559 if (WARN_ON(IS_ERR_OR_NULL(rpc))) 560 return -EIO; 561 562 rpc->params.hRoot = client->object.handle; 563 rpc->params.hObjectParent = 0; 564 rpc->params.hObjectOld = object->handle; 565 return nvkm_gsp_rpc_wr(gsp, rpc, true); 566 } 567 568 static void 569 r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *repv) 570 { 571 rpc_gsp_rm_alloc_v03_00 *rpc = container_of(repv, typeof(*rpc), params); 572 573 nvkm_gsp_rpc_done(object->client->gsp, rpc); 574 } 575 576 static void * 577 r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc) 578 { 579 rpc_gsp_rm_alloc_v03_00 *rpc = container_of(argv, typeof(*rpc), params); 580 struct nvkm_gsp *gsp = object->client->gsp; 581 void *ret; 582 583 rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc) + repc); 584 if (IS_ERR_OR_NULL(rpc)) 585 return rpc; 586 587 if (rpc->status) { 588 nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status); 589 ret = ERR_PTR(-EINVAL); 590 } else { 591 ret = repc ? rpc->params : NULL; 592 } 593 594 if (IS_ERR_OR_NULL(ret)) 595 nvkm_gsp_rpc_done(gsp, rpc); 596 597 return ret; 598 } 599 600 static void * 601 r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass, u32 argc) 602 { 603 struct nvkm_gsp_client *client = object->client; 604 struct nvkm_gsp *gsp = client->gsp; 605 rpc_gsp_rm_alloc_v03_00 *rpc; 606 607 nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x cls:0x%08x argc:%d\n", 608 client->object.handle, object->parent->handle, object->handle, oclass, argc); 609 610 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC, sizeof(*rpc) + argc); 611 if (IS_ERR(rpc)) 612 return rpc; 613 614 rpc->hClient = client->object.handle; 615 rpc->hParent = object->parent->handle; 616 rpc->hObject = object->handle; 617 rpc->hClass = oclass; 618 rpc->status = 0; 619 rpc->paramsSize = argc; 620 return rpc->params; 621 } 622 623 static void 624 r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv) 625 { 626 rpc_gsp_rm_control_v03_00 *rpc = container_of(repv, typeof(*rpc), params); 627 628 nvkm_gsp_rpc_done(object->client->gsp, rpc); 629 } 630 631 static void * 632 r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void *argv, u32 repc) 633 { 634 rpc_gsp_rm_control_v03_00 *rpc = container_of(argv, typeof(*rpc), params); 635 struct nvkm_gsp *gsp = object->client->gsp; 636 void *ret; 637 638 rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc); 639 if (IS_ERR_OR_NULL(rpc)) 640 return rpc; 641 642 if (rpc->status) { 643 nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n", 644 object->client->object.handle, object->handle, rpc->cmd, rpc->status); 645 ret = ERR_PTR(-EINVAL); 646 } else { 647 ret = repc ? rpc->params : NULL; 648 } 649 650 if (IS_ERR_OR_NULL(ret)) 651 nvkm_gsp_rpc_done(gsp, rpc); 652 653 return ret; 654 } 655 656 static void * 657 r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc) 658 { 659 struct nvkm_gsp_client *client = object->client; 660 struct nvkm_gsp *gsp = client->gsp; 661 rpc_gsp_rm_control_v03_00 *rpc; 662 663 nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x argc:%d\n", 664 client->object.handle, object->handle, cmd, argc); 665 666 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, sizeof(*rpc) + argc); 667 if (IS_ERR(rpc)) 668 return rpc; 669 670 rpc->hClient = client->object.handle; 671 rpc->hObject = object->handle; 672 rpc->cmd = cmd; 673 rpc->status = 0; 674 rpc->paramsSize = argc; 675 return rpc->params; 676 } 677 678 static void 679 r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv) 680 { 681 struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data); 682 683 r535_gsp_msg_done(gsp, rpc); 684 } 685 686 static void * 687 r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc) 688 { 689 struct nvfw_gsp_rpc *rpc; 690 691 rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + argc, sizeof(u64))); 692 if (!rpc) 693 return NULL; 694 695 rpc->header_version = 0x03000000; 696 rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V'; 697 rpc->function = fn; 698 rpc->rpc_result = 0xffffffff; 699 rpc->rpc_result_private = 0xffffffff; 700 rpc->length = sizeof(*rpc) + argc; 701 return rpc->data; 702 } 703 704 static void * 705 r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc) 706 { 707 struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data); 708 struct r535_gsp_msg *cmd = container_of((void *)rpc, typeof(*cmd), data); 709 const u32 max_msg_size = (16 * 0x1000) - sizeof(struct r535_gsp_msg); 710 const u32 max_rpc_size = max_msg_size - sizeof(*rpc); 711 u32 rpc_size = rpc->length - sizeof(*rpc); 712 void *repv; 713 714 mutex_lock(&gsp->cmdq.mutex); 715 if (rpc_size > max_rpc_size) { 716 const u32 fn = rpc->function; 717 718 /* Adjust length, and send initial RPC. */ 719 rpc->length = sizeof(*rpc) + max_rpc_size; 720 cmd->checksum = rpc->length; 721 722 repv = r535_gsp_rpc_send(gsp, argv, false, 0); 723 if (IS_ERR(repv)) 724 goto done; 725 726 argv += max_rpc_size; 727 rpc_size -= max_rpc_size; 728 729 /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */ 730 while (rpc_size) { 731 u32 size = min(rpc_size, max_rpc_size); 732 void *next; 733 734 next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size); 735 if (IS_ERR(next)) { 736 repv = next; 737 goto done; 738 } 739 740 memcpy(next, argv, size); 741 742 repv = r535_gsp_rpc_send(gsp, next, false, 0); 743 if (IS_ERR(repv)) 744 goto done; 745 746 argv += size; 747 rpc_size -= size; 748 } 749 750 /* Wait for reply. */ 751 if (wait) { 752 rpc = r535_gsp_msg_recv(gsp, fn, repc); 753 if (!IS_ERR_OR_NULL(rpc)) 754 repv = rpc->data; 755 else 756 repv = rpc; 757 } else { 758 repv = NULL; 759 } 760 } else { 761 repv = r535_gsp_rpc_send(gsp, argv, wait, repc); 762 } 763 764 done: 765 mutex_unlock(&gsp->cmdq.mutex); 766 return repv; 767 } 768 769 const struct nvkm_gsp_rm 770 r535_gsp_rm = { 771 .rpc_get = r535_gsp_rpc_get, 772 .rpc_push = r535_gsp_rpc_push, 773 .rpc_done = r535_gsp_rpc_done, 774 775 .rm_ctrl_get = r535_gsp_rpc_rm_ctrl_get, 776 .rm_ctrl_push = r535_gsp_rpc_rm_ctrl_push, 777 .rm_ctrl_done = r535_gsp_rpc_rm_ctrl_done, 778 779 .rm_alloc_get = r535_gsp_rpc_rm_alloc_get, 780 .rm_alloc_push = r535_gsp_rpc_rm_alloc_push, 781 .rm_alloc_done = r535_gsp_rpc_rm_alloc_done, 782 783 .rm_free = r535_gsp_rpc_rm_free, 784 785 .client_ctor = r535_gsp_client_ctor, 786 .client_dtor = r535_gsp_client_dtor, 787 788 .device_ctor = r535_gsp_device_ctor, 789 .device_dtor = r535_gsp_device_dtor, 790 791 .event_ctor = r535_gsp_device_event_ctor, 792 .event_dtor = r535_gsp_event_dtor, 793 }; 794 795 static void 796 r535_gsp_msgq_work(struct work_struct *work) 797 { 798 struct nvkm_gsp *gsp = container_of(work, typeof(*gsp), msgq.work); 799 800 mutex_lock(&gsp->cmdq.mutex); 801 if (*gsp->msgq.rptr != *gsp->msgq.wptr) 802 r535_gsp_msg_recv(gsp, 0, 0); 803 mutex_unlock(&gsp->cmdq.mutex); 804 } 805 806 static irqreturn_t 807 r535_gsp_intr(struct nvkm_inth *inth) 808 { 809 struct nvkm_gsp *gsp = container_of(inth, typeof(*gsp), subdev.inth); 810 struct nvkm_subdev *subdev = &gsp->subdev; 811 u32 intr = nvkm_falcon_rd32(&gsp->falcon, 0x0008); 812 u32 inte = nvkm_falcon_rd32(&gsp->falcon, gsp->falcon.func->addr2 + 813 gsp->falcon.func->riscv_irqmask); 814 u32 stat = intr & inte; 815 816 if (!stat) { 817 nvkm_debug(subdev, "inte %08x %08x\n", intr, inte); 818 return IRQ_NONE; 819 } 820 821 if (stat & 0x00000040) { 822 nvkm_falcon_wr32(&gsp->falcon, 0x004, 0x00000040); 823 schedule_work(&gsp->msgq.work); 824 stat &= ~0x00000040; 825 } 826 827 if (stat) { 828 nvkm_error(subdev, "intr %08x\n", stat); 829 nvkm_falcon_wr32(&gsp->falcon, 0x014, stat); 830 nvkm_falcon_wr32(&gsp->falcon, 0x004, stat); 831 } 832 833 nvkm_falcon_intr_retrigger(&gsp->falcon); 834 return IRQ_HANDLED; 835 } 836 837 static int 838 r535_gsp_intr_get_table(struct nvkm_gsp *gsp) 839 { 840 NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *ctrl; 841 int ret = 0; 842 843 ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, 844 NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE, sizeof(*ctrl)); 845 if (IS_ERR(ctrl)) 846 return PTR_ERR(ctrl); 847 848 ctrl = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, ctrl, sizeof(*ctrl)); 849 if (WARN_ON(IS_ERR(ctrl))) 850 return PTR_ERR(ctrl); 851 852 for (unsigned i = 0; i < ctrl->tableLen; i++) { 853 enum nvkm_subdev_type type; 854 int inst; 855 856 nvkm_debug(&gsp->subdev, 857 "%2d: engineIdx %3d pmcIntrMask %08x stall %08x nonStall %08x\n", i, 858 ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask, 859 ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall); 860 861 switch (ctrl->table[i].engineIdx) { 862 case MC_ENGINE_IDX_GSP: 863 type = NVKM_SUBDEV_GSP; 864 inst = 0; 865 break; 866 case MC_ENGINE_IDX_DISP: 867 type = NVKM_ENGINE_DISP; 868 inst = 0; 869 break; 870 case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9: 871 type = NVKM_ENGINE_CE; 872 inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_CE0; 873 break; 874 case MC_ENGINE_IDX_GR0: 875 type = NVKM_ENGINE_GR; 876 inst = 0; 877 break; 878 case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7: 879 type = NVKM_ENGINE_NVDEC; 880 inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVDEC0; 881 break; 882 case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2: 883 type = NVKM_ENGINE_NVENC; 884 inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_MSENC; 885 break; 886 case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7: 887 type = NVKM_ENGINE_NVJPG; 888 inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVJPEG0; 889 break; 890 case MC_ENGINE_IDX_OFA0: 891 type = NVKM_ENGINE_OFA; 892 inst = 0; 893 break; 894 default: 895 continue; 896 } 897 898 if (WARN_ON(gsp->intr_nr == ARRAY_SIZE(gsp->intr))) { 899 ret = -ENOSPC; 900 break; 901 } 902 903 gsp->intr[gsp->intr_nr].type = type; 904 gsp->intr[gsp->intr_nr].inst = inst; 905 gsp->intr[gsp->intr_nr].stall = ctrl->table[i].vectorStall; 906 gsp->intr[gsp->intr_nr].nonstall = ctrl->table[i].vectorNonStall; 907 gsp->intr_nr++; 908 } 909 910 nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl); 911 return ret; 912 } 913 914 static int 915 r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp) 916 { 917 GspStaticConfigInfo *rpc; 918 int last_usable = -1; 919 920 rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc)); 921 if (IS_ERR(rpc)) 922 return PTR_ERR(rpc); 923 924 gsp->internal.client.object.client = &gsp->internal.client; 925 gsp->internal.client.object.parent = NULL; 926 gsp->internal.client.object.handle = rpc->hInternalClient; 927 gsp->internal.client.gsp = gsp; 928 929 gsp->internal.device.object.client = &gsp->internal.client; 930 gsp->internal.device.object.parent = &gsp->internal.client.object; 931 gsp->internal.device.object.handle = rpc->hInternalDevice; 932 933 gsp->internal.device.subdevice.client = &gsp->internal.client; 934 gsp->internal.device.subdevice.parent = &gsp->internal.device.object; 935 gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice; 936 937 gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase; 938 gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase; 939 940 for (int i = 0; i < rpc->fbRegionInfoParams.numFBRegions; i++) { 941 NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg = 942 &rpc->fbRegionInfoParams.fbRegion[i]; 943 944 nvkm_debug(&gsp->subdev, "fb region %d: " 945 "%016llx-%016llx rsvd:%016llx perf:%08x comp:%d iso:%d prot:%d\n", i, 946 reg->base, reg->limit, reg->reserved, reg->performance, 947 reg->supportCompressed, reg->supportISO, reg->bProtected); 948 949 if (!reg->reserved && !reg->bProtected) { 950 if (reg->supportCompressed && reg->supportISO && 951 !WARN_ON_ONCE(gsp->fb.region_nr >= ARRAY_SIZE(gsp->fb.region))) { 952 const u64 size = (reg->limit + 1) - reg->base; 953 954 gsp->fb.region[gsp->fb.region_nr].addr = reg->base; 955 gsp->fb.region[gsp->fb.region_nr].size = size; 956 gsp->fb.region_nr++; 957 } 958 959 last_usable = i; 960 } 961 } 962 963 if (last_usable >= 0) { 964 u32 rsvd_base = rpc->fbRegionInfoParams.fbRegion[last_usable].limit + 1; 965 966 gsp->fb.rsvd_size = gsp->fb.heap.addr - rsvd_base; 967 } 968 969 for (int gpc = 0; gpc < ARRAY_SIZE(rpc->tpcInfo); gpc++) { 970 if (rpc->gpcInfo.gpcMask & BIT(gpc)) { 971 gsp->gr.tpcs += hweight32(rpc->tpcInfo[gpc].tpcMask); 972 gsp->gr.gpcs++; 973 } 974 } 975 976 nvkm_gsp_rpc_done(gsp, rpc); 977 return 0; 978 } 979 980 static int 981 r535_gsp_postinit(struct nvkm_gsp *gsp) 982 { 983 struct nvkm_device *device = gsp->subdev.device; 984 int ret; 985 986 ret = r535_gsp_rpc_get_gsp_static_info(gsp); 987 if (WARN_ON(ret)) 988 return ret; 989 990 INIT_WORK(&gsp->msgq.work, r535_gsp_msgq_work); 991 992 ret = r535_gsp_intr_get_table(gsp); 993 if (WARN_ON(ret)) 994 return ret; 995 996 ret = nvkm_gsp_intr_stall(gsp, gsp->subdev.type, gsp->subdev.inst); 997 if (WARN_ON(ret < 0)) 998 return ret; 999 1000 ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &gsp->subdev, 1001 r535_gsp_intr, &gsp->subdev.inth); 1002 if (WARN_ON(ret)) 1003 return ret; 1004 1005 nvkm_inth_allow(&gsp->subdev.inth); 1006 nvkm_wr32(device, 0x110004, 0x00000040); 1007 return ret; 1008 } 1009 1010 static int 1011 r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend) 1012 { 1013 rpc_unloading_guest_driver_v1F_07 *rpc; 1014 1015 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER, sizeof(*rpc)); 1016 if (IS_ERR(rpc)) 1017 return PTR_ERR(rpc); 1018 1019 if (suspend) { 1020 rpc->bInPMTransition = 1; 1021 rpc->bGc6Entering = 0; 1022 rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3; 1023 } else { 1024 rpc->bInPMTransition = 0; 1025 rpc->bGc6Entering = 0; 1026 rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0; 1027 } 1028 1029 return nvkm_gsp_rpc_wr(gsp, rpc, true); 1030 } 1031 1032 /* dword only */ 1033 struct nv_gsp_registry_entries { 1034 const char *name; 1035 u32 value; 1036 }; 1037 1038 static const struct nv_gsp_registry_entries r535_registry_entries[] = { 1039 { "RMSecBusResetEnable", 1 }, 1040 { "RMForcePcieConfigSave", 1 }, 1041 }; 1042 #define NV_GSP_REG_NUM_ENTRIES ARRAY_SIZE(r535_registry_entries) 1043 1044 static int 1045 r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp) 1046 { 1047 PACKED_REGISTRY_TABLE *rpc; 1048 char *strings; 1049 int str_offset; 1050 int i; 1051 size_t rpc_size = sizeof(*rpc) + sizeof(rpc->entries[0]) * NV_GSP_REG_NUM_ENTRIES; 1052 1053 /* add strings + null terminator */ 1054 for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) 1055 rpc_size += strlen(r535_registry_entries[i].name) + 1; 1056 1057 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_SET_REGISTRY, rpc_size); 1058 if (IS_ERR(rpc)) 1059 return PTR_ERR(rpc); 1060 1061 rpc->size = sizeof(*rpc); 1062 rpc->numEntries = NV_GSP_REG_NUM_ENTRIES; 1063 1064 str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]); 1065 strings = (char *)&rpc->entries[NV_GSP_REG_NUM_ENTRIES]; 1066 for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) { 1067 int name_len = strlen(r535_registry_entries[i].name) + 1; 1068 1069 rpc->entries[i].nameOffset = str_offset; 1070 rpc->entries[i].type = 1; 1071 rpc->entries[i].data = r535_registry_entries[i].value; 1072 rpc->entries[i].length = 4; 1073 memcpy(strings, r535_registry_entries[i].name, name_len); 1074 strings += name_len; 1075 str_offset += name_len; 1076 } 1077 1078 return nvkm_gsp_rpc_wr(gsp, rpc, false); 1079 } 1080 1081 #if defined(CONFIG_ACPI) && defined(CONFIG_X86) 1082 static void 1083 r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) 1084 { 1085 const guid_t NVOP_DSM_GUID = 1086 GUID_INIT(0xA486D8F8, 0x0BDA, 0x471B, 1087 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0); 1088 u64 NVOP_DSM_REV = 0x00000100; 1089 union acpi_object argv4 = { 1090 .buffer.type = ACPI_TYPE_BUFFER, 1091 .buffer.length = 4, 1092 .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), 1093 }, *obj; 1094 1095 caps->status = 0xffff; 1096 1097 if (!acpi_check_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, BIT_ULL(0x1a))) 1098 return; 1099 1100 obj = acpi_evaluate_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, 0x1a, &argv4); 1101 if (!obj) 1102 return; 1103 1104 printk(KERN_ERR "nvop: obj type %d\n", obj->type); 1105 printk(KERN_ERR "nvop: obj len %d\n", obj->buffer.length); 1106 1107 if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || 1108 WARN_ON(obj->buffer.length != 4)) 1109 return; 1110 1111 caps->status = 0; 1112 caps->optimusCaps = *(u32 *)obj->buffer.pointer; 1113 printk(KERN_ERR "nvop: caps %08x\n", caps->optimusCaps); 1114 1115 ACPI_FREE(obj); 1116 1117 kfree(argv4.buffer.pointer); 1118 } 1119 1120 static void 1121 r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt) 1122 { 1123 const guid_t JT_DSM_GUID = 1124 GUID_INIT(0xCBECA351L, 0x067B, 0x4924, 1125 0x9C, 0xBD, 0xB4, 0x6B, 0x00, 0xB8, 0x6F, 0x34); 1126 u64 JT_DSM_REV = 0x00000103; 1127 u32 caps; 1128 union acpi_object argv4 = { 1129 .buffer.type = ACPI_TYPE_BUFFER, 1130 .buffer.length = sizeof(caps), 1131 .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), 1132 }, *obj; 1133 1134 jt->status = 0xffff; 1135 1136 obj = acpi_evaluate_dsm(handle, &JT_DSM_GUID, JT_DSM_REV, 0x1, &argv4); 1137 if (!obj) 1138 return; 1139 1140 printk(KERN_ERR "jt: obj type %d\n", obj->type); 1141 printk(KERN_ERR "jt: obj len %d\n", obj->buffer.length); 1142 1143 if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || 1144 WARN_ON(obj->buffer.length != 4)) 1145 return; 1146 1147 jt->status = 0; 1148 jt->jtCaps = *(u32 *)obj->buffer.pointer; 1149 jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20; 1150 jt->bSBIOSCaps = 0; 1151 printk(KERN_ERR "jt: caps %08x rev:%04x\n", jt->jtCaps, jt->jtRevId); 1152 1153 ACPI_FREE(obj); 1154 1155 kfree(argv4.buffer.pointer); 1156 } 1157 1158 static void 1159 r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode, 1160 MUX_METHOD_DATA_ELEMENT *part) 1161 { 1162 acpi_handle iter = NULL, handle_mux; 1163 acpi_status status; 1164 unsigned long long value; 1165 1166 mode->status = 0xffff; 1167 part->status = 0xffff; 1168 1169 do { 1170 status = acpi_get_next_object(ACPI_TYPE_DEVICE, handle, iter, &iter); 1171 if (ACPI_FAILURE(status) || !iter) 1172 return; 1173 1174 status = acpi_evaluate_integer(iter, "_ADR", NULL, &value); 1175 if (ACPI_FAILURE(status) || value != id) 1176 continue; 1177 1178 handle_mux = iter; 1179 } while (!handle_mux); 1180 1181 if (!handle_mux) 1182 return; 1183 1184 status = acpi_evaluate_integer(handle_mux, "MXDM", NULL, &value); 1185 if (ACPI_SUCCESS(status)) { 1186 mode->acpiId = id; 1187 mode->mode = value; 1188 mode->status = 0; 1189 } 1190 1191 status = acpi_evaluate_integer(handle_mux, "MXDS", NULL, &value); 1192 if (ACPI_SUCCESS(status)) { 1193 part->acpiId = id; 1194 part->mode = value; 1195 part->status = 0; 1196 } 1197 } 1198 1199 static void 1200 r535_gsp_acpi_mux(acpi_handle handle, DOD_METHOD_DATA *dod, MUX_METHOD_DATA *mux) 1201 { 1202 mux->tableLen = dod->acpiIdListLen / sizeof(dod->acpiIdList[0]); 1203 1204 for (int i = 0; i < mux->tableLen; i++) { 1205 r535_gsp_acpi_mux_id(handle, dod->acpiIdList[i], &mux->acpiIdMuxModeTable[i], 1206 &mux->acpiIdMuxPartTable[i]); 1207 } 1208 } 1209 1210 static void 1211 r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod) 1212 { 1213 acpi_status status; 1214 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; 1215 union acpi_object *_DOD; 1216 1217 dod->status = 0xffff; 1218 1219 status = acpi_evaluate_object(handle, "_DOD", NULL, &output); 1220 if (ACPI_FAILURE(status)) 1221 return; 1222 1223 _DOD = output.pointer; 1224 1225 if (WARN_ON(_DOD->type != ACPI_TYPE_PACKAGE) || 1226 WARN_ON(_DOD->package.count > ARRAY_SIZE(dod->acpiIdList))) 1227 return; 1228 1229 for (int i = 0; i < _DOD->package.count; i++) { 1230 if (WARN_ON(_DOD->package.elements[i].type != ACPI_TYPE_INTEGER)) 1231 return; 1232 1233 dod->acpiIdList[i] = _DOD->package.elements[i].integer.value; 1234 dod->acpiIdListLen += sizeof(dod->acpiIdList[0]); 1235 } 1236 1237 printk(KERN_ERR "_DOD: ok! len:%d\n", dod->acpiIdListLen); 1238 dod->status = 0; 1239 } 1240 #endif 1241 1242 static void 1243 r535_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi) 1244 { 1245 #if defined(CONFIG_ACPI) && defined(CONFIG_X86) 1246 acpi_handle handle = ACPI_HANDLE(gsp->subdev.device->dev); 1247 1248 if (!handle) 1249 return; 1250 1251 acpi->bValid = 1; 1252 1253 r535_gsp_acpi_dod(handle, &acpi->dodMethodData); 1254 if (acpi->dodMethodData.status == 0) 1255 r535_gsp_acpi_mux(handle, &acpi->dodMethodData, &acpi->muxMethodData); 1256 1257 r535_gsp_acpi_jt(handle, &acpi->jtMethodData); 1258 r535_gsp_acpi_caps(handle, &acpi->capsMethodData); 1259 #endif 1260 } 1261 1262 static int 1263 r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp) 1264 { 1265 struct nvkm_device *device = gsp->subdev.device; 1266 struct nvkm_device_pci *pdev = container_of(device, typeof(*pdev), device); 1267 GspSystemInfo *info; 1268 1269 if (WARN_ON(device->type == NVKM_DEVICE_TEGRA)) 1270 return -ENOSYS; 1271 1272 info = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, sizeof(*info)); 1273 if (IS_ERR(info)) 1274 return PTR_ERR(info); 1275 1276 info->gpuPhysAddr = device->func->resource_addr(device, 0); 1277 info->gpuPhysFbAddr = device->func->resource_addr(device, 1); 1278 info->gpuPhysInstAddr = device->func->resource_addr(device, 3); 1279 info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev); 1280 info->maxUserVa = TASK_SIZE; 1281 info->pciConfigMirrorBase = 0x088000; 1282 info->pciConfigMirrorSize = 0x001000; 1283 r535_gsp_acpi_info(gsp, &info->acpiMethodData); 1284 1285 return nvkm_gsp_rpc_wr(gsp, info, false); 1286 } 1287 1288 static int 1289 r535_gsp_msg_os_error_log(void *priv, u32 fn, void *repv, u32 repc) 1290 { 1291 struct nvkm_gsp *gsp = priv; 1292 struct nvkm_subdev *subdev = &gsp->subdev; 1293 rpc_os_error_log_v17_00 *msg = repv; 1294 1295 if (WARN_ON(repc < sizeof(*msg))) 1296 return -EINVAL; 1297 1298 nvkm_error(subdev, "Xid:%d %s\n", msg->exceptType, msg->errString); 1299 return 0; 1300 } 1301 1302 static int 1303 r535_gsp_msg_rc_triggered(void *priv, u32 fn, void *repv, u32 repc) 1304 { 1305 rpc_rc_triggered_v17_02 *msg = repv; 1306 struct nvkm_gsp *gsp = priv; 1307 struct nvkm_subdev *subdev = &gsp->subdev; 1308 struct nvkm_chan *chan; 1309 unsigned long flags; 1310 1311 if (WARN_ON(repc < sizeof(*msg))) 1312 return -EINVAL; 1313 1314 nvkm_error(subdev, "rc engn:%08x chid:%d type:%d scope:%d part:%d\n", 1315 msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope, 1316 msg->partitionAttributionId); 1317 1318 chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid / 8, &flags); 1319 if (!chan) { 1320 nvkm_error(subdev, "rc chid:%d not found!\n", msg->chid); 1321 return 0; 1322 } 1323 1324 nvkm_chan_error(chan, false); 1325 nvkm_chan_put(&chan, flags); 1326 return 0; 1327 } 1328 1329 static int 1330 r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc) 1331 { 1332 struct nvkm_gsp *gsp = priv; 1333 struct nvkm_subdev *subdev = &gsp->subdev; 1334 1335 WARN_ON(repc != 0); 1336 1337 nvkm_error(subdev, "mmu fault queued\n"); 1338 return 0; 1339 } 1340 1341 static int 1342 r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc) 1343 { 1344 struct nvkm_gsp *gsp = priv; 1345 struct nvkm_gsp_client *client; 1346 struct nvkm_subdev *subdev = &gsp->subdev; 1347 rpc_post_event_v17_00 *msg = repv; 1348 1349 if (WARN_ON(repc < sizeof(*msg))) 1350 return -EINVAL; 1351 if (WARN_ON(repc != sizeof(*msg) + msg->eventDataSize)) 1352 return -EINVAL; 1353 1354 nvkm_debug(subdev, "event: %08x %08x %d %08x %08x %d %d\n", 1355 msg->hClient, msg->hEvent, msg->notifyIndex, msg->data, 1356 msg->status, msg->eventDataSize, msg->bNotifyList); 1357 1358 mutex_lock(&gsp->client_id.mutex); 1359 client = idr_find(&gsp->client_id.idr, msg->hClient & 0xffff); 1360 if (client) { 1361 struct nvkm_gsp_event *event; 1362 bool handled = false; 1363 1364 list_for_each_entry(event, &client->events, head) { 1365 if (event->object.handle == msg->hEvent) { 1366 event->func(event, msg->eventData, msg->eventDataSize); 1367 handled = true; 1368 } 1369 } 1370 1371 if (!handled) { 1372 nvkm_error(subdev, "event: cid 0x%08x event 0x%08x not found!\n", 1373 msg->hClient, msg->hEvent); 1374 } 1375 } else { 1376 nvkm_error(subdev, "event: cid 0x%08x not found!\n", msg->hClient); 1377 } 1378 mutex_unlock(&gsp->client_id.mutex); 1379 return 0; 1380 } 1381 1382 static int 1383 r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc) 1384 { 1385 struct nvkm_gsp *gsp = priv; 1386 struct nvkm_subdev *subdev = &gsp->subdev; 1387 struct nvkm_device *device = subdev->device; 1388 rpc_run_cpu_sequencer_v17_00 *seq = repv; 1389 int ptr = 0, ret; 1390 1391 nvkm_debug(subdev, "seq: %08x %08x\n", seq->bufferSizeDWord, seq->cmdIndex); 1392 1393 while (ptr < seq->cmdIndex) { 1394 GSP_SEQUENCER_BUFFER_CMD *cmd = (void *)&seq->commandBuffer[ptr]; 1395 1396 ptr += 1; 1397 ptr += GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(cmd->opCode); 1398 1399 switch (cmd->opCode) { 1400 case GSP_SEQ_BUF_OPCODE_REG_WRITE: { 1401 u32 addr = cmd->payload.regWrite.addr; 1402 u32 data = cmd->payload.regWrite.val; 1403 1404 nvkm_trace(subdev, "seq wr32 %06x %08x\n", addr, data); 1405 nvkm_wr32(device, addr, data); 1406 } 1407 break; 1408 case GSP_SEQ_BUF_OPCODE_REG_MODIFY: { 1409 u32 addr = cmd->payload.regModify.addr; 1410 u32 mask = cmd->payload.regModify.mask; 1411 u32 data = cmd->payload.regModify.val; 1412 1413 nvkm_trace(subdev, "seq mask %06x %08x %08x\n", addr, mask, data); 1414 nvkm_mask(device, addr, mask, data); 1415 } 1416 break; 1417 case GSP_SEQ_BUF_OPCODE_REG_POLL: { 1418 u32 addr = cmd->payload.regPoll.addr; 1419 u32 mask = cmd->payload.regPoll.mask; 1420 u32 data = cmd->payload.regPoll.val; 1421 u32 usec = cmd->payload.regPoll.timeout ?: 4000000; 1422 //u32 error = cmd->payload.regPoll.error; 1423 1424 nvkm_trace(subdev, "seq poll %06x %08x %08x %d\n", addr, mask, data, usec); 1425 nvkm_rd32(device, addr); 1426 nvkm_usec(device, usec, 1427 if ((nvkm_rd32(device, addr) & mask) == data) 1428 break; 1429 ); 1430 } 1431 break; 1432 case GSP_SEQ_BUF_OPCODE_DELAY_US: { 1433 u32 usec = cmd->payload.delayUs.val; 1434 1435 nvkm_trace(subdev, "seq usec %d\n", usec); 1436 udelay(usec); 1437 } 1438 break; 1439 case GSP_SEQ_BUF_OPCODE_REG_STORE: { 1440 u32 addr = cmd->payload.regStore.addr; 1441 u32 slot = cmd->payload.regStore.index; 1442 1443 seq->regSaveArea[slot] = nvkm_rd32(device, addr); 1444 nvkm_trace(subdev, "seq save %08x -> %d: %08x\n", addr, slot, 1445 seq->regSaveArea[slot]); 1446 } 1447 break; 1448 case GSP_SEQ_BUF_OPCODE_CORE_RESET: 1449 nvkm_trace(subdev, "seq core reset\n"); 1450 nvkm_falcon_reset(&gsp->falcon); 1451 nvkm_falcon_mask(&gsp->falcon, 0x624, 0x00000080, 0x00000080); 1452 nvkm_falcon_wr32(&gsp->falcon, 0x10c, 0x00000000); 1453 break; 1454 case GSP_SEQ_BUF_OPCODE_CORE_START: 1455 nvkm_trace(subdev, "seq core start\n"); 1456 if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000040) 1457 nvkm_falcon_wr32(&gsp->falcon, 0x130, 0x00000002); 1458 else 1459 nvkm_falcon_wr32(&gsp->falcon, 0x100, 0x00000002); 1460 break; 1461 case GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT: 1462 nvkm_trace(subdev, "seq core wait halt\n"); 1463 nvkm_msec(device, 2000, 1464 if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000010) 1465 break; 1466 ); 1467 break; 1468 case GSP_SEQ_BUF_OPCODE_CORE_RESUME: { 1469 struct nvkm_sec2 *sec2 = device->sec2; 1470 u32 mbox0; 1471 1472 nvkm_trace(subdev, "seq core resume\n"); 1473 1474 ret = gsp->func->reset(gsp); 1475 if (WARN_ON(ret)) 1476 return ret; 1477 1478 nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr)); 1479 nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr)); 1480 1481 nvkm_falcon_start(&sec2->falcon); 1482 1483 if (nvkm_msec(device, 2000, 1484 if (nvkm_rd32(device, 0x1180f8) & 0x04000000) 1485 break; 1486 ) < 0) 1487 return -ETIMEDOUT; 1488 1489 mbox0 = nvkm_falcon_rd32(&sec2->falcon, 0x040); 1490 if (WARN_ON(mbox0)) { 1491 nvkm_error(&gsp->subdev, "seq core resume sec2: 0x%x\n", mbox0); 1492 return -EIO; 1493 } 1494 1495 nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version); 1496 1497 if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon))) 1498 return -EIO; 1499 } 1500 break; 1501 default: 1502 nvkm_error(subdev, "unknown sequencer opcode %08x\n", cmd->opCode); 1503 return -EINVAL; 1504 } 1505 } 1506 1507 return 0; 1508 } 1509 1510 static void 1511 nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem) 1512 { 1513 if (mem->data) { 1514 dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr); 1515 mem->data = NULL; 1516 } 1517 } 1518 1519 static int 1520 nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, u32 size, struct nvkm_gsp_mem *mem) 1521 { 1522 mem->size = size; 1523 mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL); 1524 if (WARN_ON(!mem->data)) 1525 return -ENOMEM; 1526 1527 return 0; 1528 } 1529 1530 1531 static int 1532 r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) 1533 { 1534 struct nvkm_subdev *subdev = &gsp->subdev; 1535 struct nvkm_device *device = subdev->device; 1536 u32 wpr2_hi; 1537 int ret; 1538 1539 wpr2_hi = nvkm_rd32(device, 0x1fa828); 1540 if (!wpr2_hi) { 1541 nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n"); 1542 return 0; 1543 } 1544 1545 ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0); 1546 if (WARN_ON(ret)) 1547 return ret; 1548 1549 wpr2_hi = nvkm_rd32(device, 0x1fa828); 1550 if (WARN_ON(wpr2_hi)) 1551 return -EIO; 1552 1553 return 0; 1554 } 1555 1556 static int 1557 r535_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1) 1558 { 1559 int ret; 1560 1561 ret = nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0); 1562 if (ret) 1563 return ret; 1564 1565 nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version); 1566 1567 if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon))) 1568 return -EIO; 1569 1570 return 0; 1571 } 1572 1573 static int 1574 r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp) 1575 { 1576 GspFwWprMeta *meta; 1577 int ret; 1578 1579 ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->wpr_meta); 1580 if (ret) 1581 return ret; 1582 1583 meta = gsp->wpr_meta.data; 1584 1585 meta->magic = GSP_FW_WPR_META_MAGIC; 1586 meta->revision = GSP_FW_WPR_META_REVISION; 1587 1588 meta->sysmemAddrOfRadix3Elf = gsp->radix3.mem[0].addr; 1589 meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size; 1590 1591 meta->sysmemAddrOfBootloader = gsp->boot.fw.addr; 1592 meta->sizeOfBootloader = gsp->boot.fw.size; 1593 meta->bootloaderCodeOffset = gsp->boot.code_offset; 1594 meta->bootloaderDataOffset = gsp->boot.data_offset; 1595 meta->bootloaderManifestOffset = gsp->boot.manifest_offset; 1596 1597 meta->sysmemAddrOfSignature = gsp->sig.addr; 1598 meta->sizeOfSignature = gsp->sig.size; 1599 1600 meta->gspFwRsvdStart = gsp->fb.heap.addr; 1601 meta->nonWprHeapOffset = gsp->fb.heap.addr; 1602 meta->nonWprHeapSize = gsp->fb.heap.size; 1603 meta->gspFwWprStart = gsp->fb.wpr2.addr; 1604 meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr; 1605 meta->gspFwHeapSize = gsp->fb.wpr2.heap.size; 1606 meta->gspFwOffset = gsp->fb.wpr2.elf.addr; 1607 meta->bootBinOffset = gsp->fb.wpr2.boot.addr; 1608 meta->frtsOffset = gsp->fb.wpr2.frts.addr; 1609 meta->frtsSize = gsp->fb.wpr2.frts.size; 1610 meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000); 1611 meta->fbSize = gsp->fb.size; 1612 meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr; 1613 meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size; 1614 meta->bootCount = 0; 1615 meta->partitionRpcAddr = 0; 1616 meta->partitionRpcRequestOffset = 0; 1617 meta->partitionRpcReplyOffset = 0; 1618 meta->verified = 0; 1619 return 0; 1620 } 1621 1622 static int 1623 r535_gsp_shared_init(struct nvkm_gsp *gsp) 1624 { 1625 struct { 1626 msgqTxHeader tx; 1627 msgqRxHeader rx; 1628 } *cmdq, *msgq; 1629 int ret, i; 1630 1631 gsp->shm.cmdq.size = 0x40000; 1632 gsp->shm.msgq.size = 0x40000; 1633 1634 gsp->shm.ptes.nr = (gsp->shm.cmdq.size + gsp->shm.msgq.size) >> GSP_PAGE_SHIFT; 1635 gsp->shm.ptes.nr += DIV_ROUND_UP(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE); 1636 gsp->shm.ptes.size = ALIGN(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE); 1637 1638 ret = nvkm_gsp_mem_ctor(gsp, gsp->shm.ptes.size + 1639 gsp->shm.cmdq.size + 1640 gsp->shm.msgq.size, 1641 &gsp->shm.mem); 1642 if (ret) 1643 return ret; 1644 1645 gsp->shm.ptes.ptr = gsp->shm.mem.data; 1646 gsp->shm.cmdq.ptr = (u8 *)gsp->shm.ptes.ptr + gsp->shm.ptes.size; 1647 gsp->shm.msgq.ptr = (u8 *)gsp->shm.cmdq.ptr + gsp->shm.cmdq.size; 1648 1649 for (i = 0; i < gsp->shm.ptes.nr; i++) 1650 gsp->shm.ptes.ptr[i] = gsp->shm.mem.addr + (i << GSP_PAGE_SHIFT); 1651 1652 cmdq = gsp->shm.cmdq.ptr; 1653 cmdq->tx.version = 0; 1654 cmdq->tx.size = gsp->shm.cmdq.size; 1655 cmdq->tx.entryOff = GSP_PAGE_SIZE; 1656 cmdq->tx.msgSize = GSP_PAGE_SIZE; 1657 cmdq->tx.msgCount = (cmdq->tx.size - cmdq->tx.entryOff) / cmdq->tx.msgSize; 1658 cmdq->tx.writePtr = 0; 1659 cmdq->tx.flags = 1; 1660 cmdq->tx.rxHdrOff = offsetof(typeof(*cmdq), rx.readPtr); 1661 1662 msgq = gsp->shm.msgq.ptr; 1663 1664 gsp->cmdq.cnt = cmdq->tx.msgCount; 1665 gsp->cmdq.wptr = &cmdq->tx.writePtr; 1666 gsp->cmdq.rptr = &msgq->rx.readPtr; 1667 gsp->msgq.cnt = cmdq->tx.msgCount; 1668 gsp->msgq.wptr = &msgq->tx.writePtr; 1669 gsp->msgq.rptr = &cmdq->rx.readPtr; 1670 return 0; 1671 } 1672 1673 static int 1674 r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume) 1675 { 1676 GSP_ARGUMENTS_CACHED *args; 1677 int ret; 1678 1679 if (!resume) { 1680 ret = r535_gsp_shared_init(gsp); 1681 if (ret) 1682 return ret; 1683 1684 ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs); 1685 if (ret) 1686 return ret; 1687 } 1688 1689 args = gsp->rmargs.data; 1690 args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr; 1691 args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr; 1692 args->messageQueueInitArguments.cmdQueueOffset = 1693 (u8 *)gsp->shm.cmdq.ptr - (u8 *)gsp->shm.mem.data; 1694 args->messageQueueInitArguments.statQueueOffset = 1695 (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data; 1696 1697 if (!resume) { 1698 args->srInitArguments.oldLevel = 0; 1699 args->srInitArguments.flags = 0; 1700 args->srInitArguments.bInPMTransition = 0; 1701 } else { 1702 args->srInitArguments.oldLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3; 1703 args->srInitArguments.flags = 0; 1704 args->srInitArguments.bInPMTransition = 1; 1705 } 1706 1707 return 0; 1708 } 1709 1710 static inline u64 1711 r535_gsp_libos_id8(const char *name) 1712 { 1713 u64 id = 0; 1714 1715 for (int i = 0; i < sizeof(id) && *name; i++, name++) 1716 id = (id << 8) | *name; 1717 1718 return id; 1719 } 1720 1721 static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size) 1722 { 1723 unsigned int num_pages = DIV_ROUND_UP_ULL(size, GSP_PAGE_SIZE); 1724 unsigned int i; 1725 1726 for (i = 0; i < num_pages; i++) 1727 ptes[i] = (u64)addr + (i << GSP_PAGE_SHIFT); 1728 } 1729 1730 static int 1731 r535_gsp_libos_init(struct nvkm_gsp *gsp) 1732 { 1733 LibosMemoryRegionInitArgument *args; 1734 int ret; 1735 1736 ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->libos); 1737 if (ret) 1738 return ret; 1739 1740 args = gsp->libos.data; 1741 1742 ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->loginit); 1743 if (ret) 1744 return ret; 1745 1746 args[0].id8 = r535_gsp_libos_id8("LOGINIT"); 1747 args[0].pa = gsp->loginit.addr; 1748 args[0].size = gsp->loginit.size; 1749 args[0].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; 1750 args[0].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; 1751 create_pte_array(gsp->loginit.data + sizeof(u64), gsp->loginit.addr, gsp->loginit.size); 1752 1753 ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logintr); 1754 if (ret) 1755 return ret; 1756 1757 args[1].id8 = r535_gsp_libos_id8("LOGINTR"); 1758 args[1].pa = gsp->logintr.addr; 1759 args[1].size = gsp->logintr.size; 1760 args[1].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; 1761 args[1].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; 1762 create_pte_array(gsp->logintr.data + sizeof(u64), gsp->logintr.addr, gsp->logintr.size); 1763 1764 ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logrm); 1765 if (ret) 1766 return ret; 1767 1768 args[2].id8 = r535_gsp_libos_id8("LOGRM"); 1769 args[2].pa = gsp->logrm.addr; 1770 args[2].size = gsp->logrm.size; 1771 args[2].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; 1772 args[2].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; 1773 create_pte_array(gsp->logrm.data + sizeof(u64), gsp->logrm.addr, gsp->logrm.size); 1774 1775 ret = r535_gsp_rmargs_init(gsp, false); 1776 if (ret) 1777 return ret; 1778 1779 args[3].id8 = r535_gsp_libos_id8("RMARGS"); 1780 args[3].pa = gsp->rmargs.addr; 1781 args[3].size = gsp->rmargs.size; 1782 args[3].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; 1783 args[3].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; 1784 return 0; 1785 } 1786 1787 void 1788 nvkm_gsp_sg_free(struct nvkm_device *device, struct sg_table *sgt) 1789 { 1790 struct scatterlist *sgl; 1791 int i; 1792 1793 dma_unmap_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0); 1794 1795 for_each_sgtable_sg(sgt, sgl, i) { 1796 struct page *page = sg_page(sgl); 1797 1798 __free_page(page); 1799 } 1800 1801 sg_free_table(sgt); 1802 } 1803 1804 int 1805 nvkm_gsp_sg(struct nvkm_device *device, u64 size, struct sg_table *sgt) 1806 { 1807 const u64 pages = DIV_ROUND_UP(size, PAGE_SIZE); 1808 struct scatterlist *sgl; 1809 int ret, i; 1810 1811 ret = sg_alloc_table(sgt, pages, GFP_KERNEL); 1812 if (ret) 1813 return ret; 1814 1815 for_each_sgtable_sg(sgt, sgl, i) { 1816 struct page *page = alloc_page(GFP_KERNEL); 1817 1818 if (!page) { 1819 nvkm_gsp_sg_free(device, sgt); 1820 return -ENOMEM; 1821 } 1822 1823 sg_set_page(sgl, page, PAGE_SIZE, 0); 1824 } 1825 1826 ret = dma_map_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0); 1827 if (ret) 1828 nvkm_gsp_sg_free(device, sgt); 1829 1830 return ret; 1831 } 1832 1833 static void 1834 nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3) 1835 { 1836 for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) 1837 nvkm_gsp_mem_dtor(gsp, &rx3->mem[i]); 1838 } 1839 1840 static int 1841 nvkm_gsp_radix3_sg(struct nvkm_device *device, struct sg_table *sgt, u64 size, 1842 struct nvkm_gsp_radix3 *rx3) 1843 { 1844 u64 addr; 1845 1846 for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) { 1847 u64 *ptes; 1848 int idx; 1849 1850 rx3->mem[i].size = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE); 1851 rx3->mem[i].data = dma_alloc_coherent(device->dev, rx3->mem[i].size, 1852 &rx3->mem[i].addr, GFP_KERNEL); 1853 if (WARN_ON(!rx3->mem[i].data)) 1854 return -ENOMEM; 1855 1856 ptes = rx3->mem[i].data; 1857 if (i == 2) { 1858 struct scatterlist *sgl; 1859 1860 for_each_sgtable_dma_sg(sgt, sgl, idx) { 1861 for (int j = 0; j < sg_dma_len(sgl) / GSP_PAGE_SIZE; j++) 1862 *ptes++ = sg_dma_address(sgl) + (GSP_PAGE_SIZE * j); 1863 } 1864 } else { 1865 for (int j = 0; j < size / GSP_PAGE_SIZE; j++) 1866 *ptes++ = addr + GSP_PAGE_SIZE * j; 1867 } 1868 1869 size = rx3->mem[i].size; 1870 addr = rx3->mem[i].addr; 1871 } 1872 1873 return 0; 1874 } 1875 1876 int 1877 r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) 1878 { 1879 u32 mbox0 = 0xff, mbox1 = 0xff; 1880 int ret; 1881 1882 if (!gsp->running) 1883 return 0; 1884 1885 if (suspend) { 1886 GspFwWprMeta *meta = gsp->wpr_meta.data; 1887 u64 len = meta->gspFwWprEnd - meta->gspFwWprStart; 1888 GspFwSRMeta *sr; 1889 1890 ret = nvkm_gsp_sg(gsp->subdev.device, len, &gsp->sr.sgt); 1891 if (ret) 1892 return ret; 1893 1894 ret = nvkm_gsp_radix3_sg(gsp->subdev.device, &gsp->sr.sgt, len, &gsp->sr.radix3); 1895 if (ret) 1896 return ret; 1897 1898 ret = nvkm_gsp_mem_ctor(gsp, sizeof(*sr), &gsp->sr.meta); 1899 if (ret) 1900 return ret; 1901 1902 sr = gsp->sr.meta.data; 1903 sr->magic = GSP_FW_SR_META_MAGIC; 1904 sr->revision = GSP_FW_SR_META_REVISION; 1905 sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.mem[0].addr; 1906 sr->sizeOfSuspendResumeData = len; 1907 1908 mbox0 = lower_32_bits(gsp->sr.meta.addr); 1909 mbox1 = upper_32_bits(gsp->sr.meta.addr); 1910 } 1911 1912 ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend); 1913 if (WARN_ON(ret)) 1914 return ret; 1915 1916 nvkm_msec(gsp->subdev.device, 2000, 1917 if (nvkm_falcon_rd32(&gsp->falcon, 0x040) & 0x80000000) 1918 break; 1919 ); 1920 1921 nvkm_falcon_reset(&gsp->falcon); 1922 1923 ret = nvkm_gsp_fwsec_sb(gsp); 1924 WARN_ON(ret); 1925 1926 ret = r535_gsp_booter_unload(gsp, mbox0, mbox1); 1927 WARN_ON(ret); 1928 1929 gsp->running = false; 1930 return 0; 1931 } 1932 1933 int 1934 r535_gsp_init(struct nvkm_gsp *gsp) 1935 { 1936 u32 mbox0, mbox1; 1937 int ret; 1938 1939 if (!gsp->sr.meta.data) { 1940 mbox0 = lower_32_bits(gsp->wpr_meta.addr); 1941 mbox1 = upper_32_bits(gsp->wpr_meta.addr); 1942 } else { 1943 r535_gsp_rmargs_init(gsp, true); 1944 1945 mbox0 = lower_32_bits(gsp->sr.meta.addr); 1946 mbox1 = upper_32_bits(gsp->sr.meta.addr); 1947 } 1948 1949 /* Execute booter to handle (eventually...) booting GSP-RM. */ 1950 ret = r535_gsp_booter_load(gsp, mbox0, mbox1); 1951 if (WARN_ON(ret)) 1952 goto done; 1953 1954 ret = r535_gsp_rpc_poll(gsp, NV_VGPU_MSG_EVENT_GSP_INIT_DONE); 1955 if (ret) 1956 goto done; 1957 1958 gsp->running = true; 1959 1960 done: 1961 if (gsp->sr.meta.data) { 1962 nvkm_gsp_mem_dtor(gsp, &gsp->sr.meta); 1963 nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3); 1964 nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt); 1965 return ret; 1966 } 1967 1968 if (ret == 0) 1969 ret = r535_gsp_postinit(gsp); 1970 1971 return ret; 1972 } 1973 1974 static int 1975 r535_gsp_rm_boot_ctor(struct nvkm_gsp *gsp) 1976 { 1977 const struct firmware *fw = gsp->fws.bl; 1978 const struct nvfw_bin_hdr *hdr; 1979 RM_RISCV_UCODE_DESC *desc; 1980 int ret; 1981 1982 hdr = nvfw_bin_hdr(&gsp->subdev, fw->data); 1983 desc = (void *)fw->data + hdr->header_offset; 1984 1985 ret = nvkm_gsp_mem_ctor(gsp, hdr->data_size, &gsp->boot.fw); 1986 if (ret) 1987 return ret; 1988 1989 memcpy(gsp->boot.fw.data, fw->data + hdr->data_offset, hdr->data_size); 1990 1991 gsp->boot.code_offset = desc->monitorCodeOffset; 1992 gsp->boot.data_offset = desc->monitorDataOffset; 1993 gsp->boot.manifest_offset = desc->manifestOffset; 1994 gsp->boot.app_version = desc->appVersion; 1995 return 0; 1996 } 1997 1998 static const struct nvkm_firmware_func 1999 r535_gsp_fw = { 2000 .type = NVKM_FIRMWARE_IMG_SGT, 2001 }; 2002 2003 static int 2004 r535_gsp_elf_section(struct nvkm_gsp *gsp, const char *name, const u8 **pdata, u64 *psize) 2005 { 2006 const u8 *img = gsp->fws.rm->data; 2007 const struct elf64_hdr *ehdr = (const struct elf64_hdr *)img; 2008 const struct elf64_shdr *shdr = (const struct elf64_shdr *)&img[ehdr->e_shoff]; 2009 const char *names = &img[shdr[ehdr->e_shstrndx].sh_offset]; 2010 2011 for (int i = 0; i < ehdr->e_shnum; i++, shdr++) { 2012 if (!strcmp(&names[shdr->sh_name], name)) { 2013 *pdata = &img[shdr->sh_offset]; 2014 *psize = shdr->sh_size; 2015 return 0; 2016 } 2017 } 2018 2019 nvkm_error(&gsp->subdev, "section '%s' not found\n", name); 2020 return -ENOENT; 2021 } 2022 2023 static void 2024 r535_gsp_dtor_fws(struct nvkm_gsp *gsp) 2025 { 2026 nvkm_firmware_put(gsp->fws.bl); 2027 gsp->fws.bl = NULL; 2028 nvkm_firmware_put(gsp->fws.booter.unload); 2029 gsp->fws.booter.unload = NULL; 2030 nvkm_firmware_put(gsp->fws.booter.load); 2031 gsp->fws.booter.load = NULL; 2032 nvkm_firmware_put(gsp->fws.rm); 2033 gsp->fws.rm = NULL; 2034 } 2035 2036 void 2037 r535_gsp_dtor(struct nvkm_gsp *gsp) 2038 { 2039 idr_destroy(&gsp->client_id.idr); 2040 mutex_destroy(&gsp->client_id.mutex); 2041 2042 nvkm_gsp_radix3_dtor(gsp, &gsp->radix3); 2043 nvkm_gsp_mem_dtor(gsp, &gsp->sig); 2044 nvkm_firmware_dtor(&gsp->fw); 2045 2046 nvkm_falcon_fw_dtor(&gsp->booter.unload); 2047 nvkm_falcon_fw_dtor(&gsp->booter.load); 2048 2049 mutex_destroy(&gsp->msgq.mutex); 2050 mutex_destroy(&gsp->cmdq.mutex); 2051 2052 r535_gsp_dtor_fws(gsp); 2053 } 2054 2055 int 2056 r535_gsp_oneinit(struct nvkm_gsp *gsp) 2057 { 2058 struct nvkm_device *device = gsp->subdev.device; 2059 const u8 *data; 2060 u64 size; 2061 int ret; 2062 2063 mutex_init(&gsp->cmdq.mutex); 2064 mutex_init(&gsp->msgq.mutex); 2065 2066 ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load, 2067 &device->sec2->falcon, &gsp->booter.load); 2068 if (ret) 2069 return ret; 2070 2071 ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload, 2072 &device->sec2->falcon, &gsp->booter.unload); 2073 if (ret) 2074 return ret; 2075 2076 /* Load GSP firmware from ELF image into DMA-accessible memory. */ 2077 ret = r535_gsp_elf_section(gsp, ".fwimage", &data, &size); 2078 if (ret) 2079 return ret; 2080 2081 ret = nvkm_firmware_ctor(&r535_gsp_fw, "gsp-rm", device, data, size, &gsp->fw); 2082 if (ret) 2083 return ret; 2084 2085 /* Load relevant signature from ELF image. */ 2086 ret = r535_gsp_elf_section(gsp, gsp->func->sig_section, &data, &size); 2087 if (ret) 2088 return ret; 2089 2090 ret = nvkm_gsp_mem_ctor(gsp, ALIGN(size, 256), &gsp->sig); 2091 if (ret) 2092 return ret; 2093 2094 memcpy(gsp->sig.data, data, size); 2095 2096 /* Build radix3 page table for ELF image. */ 2097 ret = nvkm_gsp_radix3_sg(device, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3); 2098 if (ret) 2099 return ret; 2100 2101 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER, 2102 r535_gsp_msg_run_cpu_sequencer, gsp); 2103 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_POST_EVENT, r535_gsp_msg_post_event, gsp); 2104 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED, 2105 r535_gsp_msg_rc_triggered, gsp); 2106 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED, 2107 r535_gsp_msg_mmu_fault_queued, gsp); 2108 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp); 2109 2110 ret = r535_gsp_rm_boot_ctor(gsp); 2111 if (ret) 2112 return ret; 2113 2114 /* Release FW images - we've copied them to DMA buffers now. */ 2115 r535_gsp_dtor_fws(gsp); 2116 2117 /* Calculate FB layout. */ 2118 gsp->fb.wpr2.frts.size = 0x100000; 2119 gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size; 2120 2121 gsp->fb.wpr2.boot.size = gsp->boot.fw.size; 2122 gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000); 2123 2124 gsp->fb.wpr2.elf.size = gsp->fw.len; 2125 gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000); 2126 2127 { 2128 u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30); 2129 2130 gsp->fb.wpr2.heap.size = 2131 gsp->func->wpr_heap.os_carveout_size + 2132 gsp->func->wpr_heap.base_size + 2133 ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) + 2134 ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20); 2135 2136 gsp->fb.wpr2.heap.size = max(gsp->fb.wpr2.heap.size, gsp->func->wpr_heap.min_size); 2137 } 2138 2139 gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000); 2140 gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000); 2141 2142 gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000); 2143 gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr; 2144 2145 gsp->fb.heap.size = 0x100000; 2146 gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size; 2147 2148 ret = nvkm_gsp_fwsec_frts(gsp); 2149 if (WARN_ON(ret)) 2150 return ret; 2151 2152 ret = r535_gsp_libos_init(gsp); 2153 if (WARN_ON(ret)) 2154 return ret; 2155 2156 ret = r535_gsp_wpr_meta_init(gsp); 2157 if (WARN_ON(ret)) 2158 return ret; 2159 2160 ret = r535_gsp_rpc_set_system_info(gsp); 2161 if (WARN_ON(ret)) 2162 return ret; 2163 2164 ret = r535_gsp_rpc_set_registry(gsp); 2165 if (WARN_ON(ret)) 2166 return ret; 2167 2168 /* Reset GSP into RISC-V mode. */ 2169 ret = gsp->func->reset(gsp); 2170 if (WARN_ON(ret)) 2171 return ret; 2172 2173 nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr)); 2174 nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr)); 2175 2176 mutex_init(&gsp->client_id.mutex); 2177 idr_init(&gsp->client_id.idr); 2178 return 0; 2179 } 2180 2181 static int 2182 r535_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver, 2183 const struct firmware **pfw) 2184 { 2185 char fwname[64]; 2186 2187 snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver); 2188 return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw); 2189 } 2190 2191 int 2192 r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) 2193 { 2194 struct nvkm_subdev *subdev = &gsp->subdev; 2195 int ret; 2196 2197 if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", fwif->enable)) 2198 return -EINVAL; 2199 2200 if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) || 2201 (ret = r535_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load)) || 2202 (ret = r535_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload)) || 2203 (ret = r535_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl))) { 2204 r535_gsp_dtor_fws(gsp); 2205 return ret; 2206 } 2207 2208 return 0; 2209 } 2210 2211 #define NVKM_GSP_FIRMWARE(chip) \ 2212 MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-535.113.01.bin"); \ 2213 MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-535.113.01.bin"); \ 2214 MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-535.113.01.bin"); \ 2215 MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-535.113.01.bin") 2216 2217 NVKM_GSP_FIRMWARE(tu102); 2218 NVKM_GSP_FIRMWARE(tu104); 2219 NVKM_GSP_FIRMWARE(tu106); 2220 2221 NVKM_GSP_FIRMWARE(tu116); 2222 NVKM_GSP_FIRMWARE(tu117); 2223 2224 NVKM_GSP_FIRMWARE(ga100); 2225 2226 NVKM_GSP_FIRMWARE(ga102); 2227 NVKM_GSP_FIRMWARE(ga103); 2228 NVKM_GSP_FIRMWARE(ga104); 2229 NVKM_GSP_FIRMWARE(ga106); 2230 NVKM_GSP_FIRMWARE(ga107); 2231 2232 NVKM_GSP_FIRMWARE(ad102); 2233 NVKM_GSP_FIRMWARE(ad103); 2234 NVKM_GSP_FIRMWARE(ad104); 2235 NVKM_GSP_FIRMWARE(ad106); 2236 NVKM_GSP_FIRMWARE(ad107); 2237