1 /*
2 * Copyright 2023 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22 #include <rm/rpc.h>
23
24 #include "nvrm/rpcfn.h"
25
26 #define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE
27 #define GSP_MSG_MAX_SIZE (GSP_MSG_MIN_SIZE * 16)
28
29 /**
30 * DOC: GSP message queue element
31 *
32 * https://github.com/NVIDIA/open-gpu-kernel-modules/blob/535/src/nvidia/inc/kernel/gpu/gsp/message_queue_priv.h
33 *
34 * The GSP command queue and status queue are message queues for the
35 * communication between software and GSP. The software submits the GSP
36 * RPC via the GSP command queue, GSP writes the status of the submitted
37 * RPC in the status queue.
38 *
39 * A GSP message queue element consists of three parts:
40 *
41 * - message element header (struct r535_gsp_msg), which mostly maintains
42 * the metadata for queuing the element.
43 *
44 * - RPC message header (struct nvfw_gsp_rpc), which maintains the info
45 * of the RPC. E.g., the RPC function number.
46 *
47 * - The payload, where the RPC message stays. E.g. the params of a
48 * specific RPC function. Some RPC functions also have their headers
49 * in the payload. E.g. rm_alloc, rm_control.
50 *
51 * The memory layout of a GSP message element can be illustrated below::
52 *
53 * +------------------------+
54 * | Message Element Header |
55 * | (r535_gsp_msg) |
56 * | |
57 * | (r535_gsp_msg.data) |
58 * | | |
59 * |----------V-------------|
60 * | GSP RPC Header |
61 * | (nvfw_gsp_rpc) |
62 * | |
63 * | (nvfw_gsp_rpc.data) |
64 * | | |
65 * |----------V-------------|
66 * | Payload |
67 * | |
68 * | header(optional) |
69 * | params |
70 * +------------------------+
71 *
72 * The max size of a message queue element is 16 pages (including the
73 * headers). When a GSP message to be sent is larger than 16 pages, the
74 * message should be split into multiple elements and sent accordingly.
75 *
76 * In the bunch of the split elements, the first element has the expected
77 * function number, while the rest of the elements are sent with the
78 * function number NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD.
79 *
80 * GSP consumes the elements from the cmdq and always writes the result
81 * back to the msgq. The result is also formed as split elements.
82 *
83 * Terminology:
84 *
85 * - gsp_msg(msg): GSP message element (element header + GSP RPC header +
86 * payload)
87 * - gsp_rpc(rpc): GSP RPC (RPC header + payload)
88 * - gsp_rpc_buf: buffer for (GSP RPC header + payload)
89 * - gsp_rpc_len: size of (GSP RPC header + payload)
90 * - params_size: size of params in the payload
91 * - payload_size: size of (header if exists + params) in the payload
92 */
93
94 struct r535_gsp_msg {
95 u8 auth_tag_buffer[16];
96 u8 aad_buffer[16];
97 u32 checksum;
98 u32 sequence;
99 u32 elem_count;
100 u32 pad;
101 u8 data[];
102 };
103
104 struct nvfw_gsp_rpc {
105 u32 header_version;
106 u32 signature;
107 u32 length;
108 u32 function;
109 u32 rpc_result;
110 u32 rpc_result_private;
111 u32 sequence;
112 union {
113 u32 spare;
114 u32 cpuRmGfid;
115 };
116 u8 data[];
117 };
118
119 #define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data)
120
121 #define to_gsp_hdr(p, header) \
122 container_of((void *)p, typeof(*header), data)
123
124 #define to_payload_hdr(p, header) \
125 container_of((void *)p, typeof(*header), params)
126
127 int
r535_rpc_status_to_errno(uint32_t rpc_status)128 r535_rpc_status_to_errno(uint32_t rpc_status)
129 {
130 switch (rpc_status) {
131 case 0x55: /* NV_ERR_NOT_READY */
132 case 0x66: /* NV_ERR_TIMEOUT_RETRY */
133 return -EBUSY;
134 case 0x51: /* NV_ERR_NO_MEMORY */
135 return -ENOMEM;
136 default:
137 return -EINVAL;
138 }
139 }
140
141 static int
r535_gsp_msgq_wait(struct nvkm_gsp * gsp,u32 gsp_rpc_len,int * ptime)142 r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *ptime)
143 {
144 u32 size, rptr = *gsp->msgq.rptr;
145 int used;
146
147 size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + gsp_rpc_len,
148 GSP_PAGE_SIZE);
149 if (WARN_ON(!size || size >= gsp->msgq.cnt))
150 return -EINVAL;
151
152 do {
153 u32 wptr = *gsp->msgq.wptr;
154
155 used = wptr + gsp->msgq.cnt - rptr;
156 if (used >= gsp->msgq.cnt)
157 used -= gsp->msgq.cnt;
158 if (used >= size)
159 break;
160
161 usleep_range(1, 2);
162 } while (--(*ptime));
163
164 if (WARN_ON(!*ptime))
165 return -ETIMEDOUT;
166
167 return used;
168 }
169
170 static struct r535_gsp_msg *
r535_gsp_msgq_get_entry(struct nvkm_gsp * gsp)171 r535_gsp_msgq_get_entry(struct nvkm_gsp *gsp)
172 {
173 u32 rptr = *gsp->msgq.rptr;
174
175 /* Skip the first page, which is the message queue info */
176 return (void *)((u8 *)gsp->shm.msgq.ptr + GSP_PAGE_SIZE +
177 rptr * GSP_PAGE_SIZE);
178 }
179
180 /**
181 * DOC: Receive a GSP message queue element
182 *
183 * Receiving a GSP message queue element from the message queue consists of
184 * the following steps:
185 *
186 * - Peek the element from the queue: r535_gsp_msgq_peek().
187 * Peek the first page of the element to determine the total size of the
188 * message before allocating the proper memory.
189 *
190 * - Allocate memory for the message.
191 * Once the total size of the message is determined from the GSP message
192 * queue element, the caller of r535_gsp_msgq_recv() allocates the
193 * required memory.
194 *
195 * - Receive the message: r535_gsp_msgq_recv().
196 * Copy the message into the allocated memory. Advance the read pointer.
197 * If the message is a large GSP message, r535_gsp_msgq_recv() calls
198 * r535_gsp_msgq_recv_one_elem() repeatedly to receive continuation parts
199 * until the complete message is received.
200 * r535_gsp_msgq_recv() assembles the payloads of cotinuation parts into
201 * the return of the large GSP message.
202 *
203 * - Free the allocated memory: r535_gsp_msg_done().
204 * The user is responsible for freeing the memory allocated for the GSP
205 * message pages after they have been processed.
206 */
207 static void *
r535_gsp_msgq_peek(struct nvkm_gsp * gsp,u32 gsp_rpc_len,int * retries)208 r535_gsp_msgq_peek(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
209 {
210 struct r535_gsp_msg *mqe;
211 int ret;
212
213 ret = r535_gsp_msgq_wait(gsp, gsp_rpc_len, retries);
214 if (ret < 0)
215 return ERR_PTR(ret);
216
217 mqe = r535_gsp_msgq_get_entry(gsp);
218
219 return mqe->data;
220 }
221
222 struct r535_gsp_msg_info {
223 int *retries;
224 u32 gsp_rpc_len;
225 void *gsp_rpc_buf;
226 bool continuation;
227 };
228
229 static void
230 r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl);
231
232 static void *
r535_gsp_msgq_recv_one_elem(struct nvkm_gsp * gsp,struct r535_gsp_msg_info * info)233 r535_gsp_msgq_recv_one_elem(struct nvkm_gsp *gsp,
234 struct r535_gsp_msg_info *info)
235 {
236 u8 *buf = info->gsp_rpc_buf;
237 u32 rptr = *gsp->msgq.rptr;
238 struct r535_gsp_msg *mqe;
239 u32 size, expected, len;
240 int ret;
241
242 expected = info->gsp_rpc_len;
243
244 ret = r535_gsp_msgq_wait(gsp, expected, info->retries);
245 if (ret < 0)
246 return ERR_PTR(ret);
247
248 mqe = r535_gsp_msgq_get_entry(gsp);
249
250 if (info->continuation) {
251 struct nvfw_gsp_rpc *rpc = (struct nvfw_gsp_rpc *)mqe->data;
252
253 if (rpc->function != NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD) {
254 nvkm_error(&gsp->subdev,
255 "Not a continuation of a large RPC\n");
256 r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
257 return ERR_PTR(-EIO);
258 }
259 }
260
261 size = ALIGN(expected + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE);
262
263 len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe);
264 len = min_t(u32, expected, len);
265
266 if (info->continuation)
267 memcpy(buf, mqe->data + sizeof(struct nvfw_gsp_rpc),
268 len - sizeof(struct nvfw_gsp_rpc));
269 else
270 memcpy(buf, mqe->data, len);
271
272 expected -= len;
273
274 if (expected) {
275 mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000);
276 memcpy(buf + len, mqe, expected);
277 }
278
279 rptr = (rptr + DIV_ROUND_UP(size, GSP_PAGE_SIZE)) % gsp->msgq.cnt;
280
281 mb();
282 (*gsp->msgq.rptr) = rptr;
283 return buf;
284 }
285
286 static void *
r535_gsp_msgq_recv(struct nvkm_gsp * gsp,u32 gsp_rpc_len,int * retries)287 r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
288 {
289 struct r535_gsp_msg *mqe;
290 const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*mqe);
291 struct nvfw_gsp_rpc *rpc;
292 struct r535_gsp_msg_info info = {0};
293 u32 expected = gsp_rpc_len;
294 void *buf;
295
296 mqe = r535_gsp_msgq_get_entry(gsp);
297 rpc = (struct nvfw_gsp_rpc *)mqe->data;
298
299 if (WARN_ON(rpc->length > max_rpc_size))
300 return NULL;
301
302 buf = kvmalloc(max_t(u32, rpc->length, expected), GFP_KERNEL);
303 if (!buf)
304 return ERR_PTR(-ENOMEM);
305
306 info.gsp_rpc_buf = buf;
307 info.retries = retries;
308 info.gsp_rpc_len = rpc->length;
309
310 buf = r535_gsp_msgq_recv_one_elem(gsp, &info);
311 if (IS_ERR(buf)) {
312 kvfree(info.gsp_rpc_buf);
313 info.gsp_rpc_buf = NULL;
314 return buf;
315 }
316
317 if (expected <= max_rpc_size)
318 return buf;
319
320 info.gsp_rpc_buf += info.gsp_rpc_len;
321 expected -= info.gsp_rpc_len;
322
323 while (expected) {
324 u32 size;
325
326 rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries);
327 if (IS_ERR_OR_NULL(rpc)) {
328 kfree(buf);
329 return rpc;
330 }
331
332 info.gsp_rpc_len = rpc->length;
333 info.continuation = true;
334
335 rpc = r535_gsp_msgq_recv_one_elem(gsp, &info);
336 if (IS_ERR_OR_NULL(rpc)) {
337 kfree(buf);
338 return rpc;
339 }
340
341 size = info.gsp_rpc_len - sizeof(*rpc);
342 expected -= size;
343 info.gsp_rpc_buf += size;
344 }
345
346 rpc = buf;
347 rpc->length = gsp_rpc_len;
348 return buf;
349 }
350
351 static int
r535_gsp_cmdq_push(struct nvkm_gsp * gsp,void * rpc)352 r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *rpc)
353 {
354 struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
355 struct r535_gsp_msg *cqe;
356 u32 gsp_rpc_len = msg->checksum;
357 u64 *ptr = (void *)msg;
358 u64 *end;
359 u64 csum = 0;
360 int free, time = 1000000;
361 u32 wptr, size, step, len;
362 u32 off = 0;
363
364 len = ALIGN(GSP_MSG_HDR_SIZE + gsp_rpc_len, GSP_PAGE_SIZE);
365
366 end = (u64 *)((char *)ptr + len);
367 msg->pad = 0;
368 msg->checksum = 0;
369 msg->sequence = gsp->cmdq.seq++;
370 msg->elem_count = DIV_ROUND_UP(len, 0x1000);
371
372 while (ptr < end)
373 csum ^= *ptr++;
374
375 msg->checksum = upper_32_bits(csum) ^ lower_32_bits(csum);
376
377 wptr = *gsp->cmdq.wptr;
378 do {
379 do {
380 free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1;
381 if (free >= gsp->cmdq.cnt)
382 free -= gsp->cmdq.cnt;
383 if (free >= 1)
384 break;
385
386 usleep_range(1, 2);
387 } while(--time);
388
389 if (WARN_ON(!time)) {
390 kvfree(msg);
391 return -ETIMEDOUT;
392 }
393
394 cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000);
395 step = min_t(u32, free, (gsp->cmdq.cnt - wptr));
396 size = min_t(u32, len, step * GSP_PAGE_SIZE);
397
398 memcpy(cqe, (u8 *)msg + off, size);
399
400 wptr += DIV_ROUND_UP(size, 0x1000);
401 if (wptr == gsp->cmdq.cnt)
402 wptr = 0;
403
404 off += size;
405 len -= size;
406 } while (len);
407
408 nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr);
409 wmb();
410 (*gsp->cmdq.wptr) = wptr;
411 mb();
412
413 nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000);
414
415 kvfree(msg);
416 return 0;
417 }
418
419 static void *
r535_gsp_cmdq_get(struct nvkm_gsp * gsp,u32 gsp_rpc_len)420 r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 gsp_rpc_len)
421 {
422 struct r535_gsp_msg *msg;
423 u32 size = GSP_MSG_HDR_SIZE + gsp_rpc_len;
424
425 size = ALIGN(size, GSP_MSG_MIN_SIZE);
426 msg = kvzalloc(size, GFP_KERNEL);
427 if (!msg)
428 return ERR_PTR(-ENOMEM);
429
430 msg->checksum = gsp_rpc_len;
431 return msg->data;
432 }
433
434 static void
r535_gsp_msg_done(struct nvkm_gsp * gsp,struct nvfw_gsp_rpc * msg)435 r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg)
436 {
437 kvfree(msg);
438 }
439
440 static void
r535_gsp_msg_dump(struct nvkm_gsp * gsp,struct nvfw_gsp_rpc * msg,int lvl)441 r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl)
442 {
443 if (gsp->subdev.debug >= lvl) {
444 nvkm_printk__(&gsp->subdev, lvl, info,
445 "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n",
446 msg->function, msg->length, msg->length - sizeof(*msg),
447 msg->rpc_result, msg->rpc_result_private);
448 print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1,
449 msg->data, msg->length - sizeof(*msg), true);
450 }
451 }
452
453 struct nvfw_gsp_rpc *
r535_gsp_msg_recv(struct nvkm_gsp * gsp,int fn,u32 gsp_rpc_len)454 r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 gsp_rpc_len)
455 {
456 struct nvkm_subdev *subdev = &gsp->subdev;
457 struct nvfw_gsp_rpc *rpc;
458 int retries = 4000000, i;
459
460 retry:
461 rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), &retries);
462 if (IS_ERR_OR_NULL(rpc))
463 return rpc;
464
465 rpc = r535_gsp_msgq_recv(gsp, gsp_rpc_len, &retries);
466 if (IS_ERR_OR_NULL(rpc))
467 return rpc;
468
469 if (rpc->rpc_result) {
470 r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
471 r535_gsp_msg_done(gsp, rpc);
472 return ERR_PTR(-EINVAL);
473 }
474
475 r535_gsp_msg_dump(gsp, rpc, NV_DBG_TRACE);
476
477 if (fn && rpc->function == fn) {
478 if (gsp_rpc_len) {
479 if (rpc->length < gsp_rpc_len) {
480 nvkm_error(subdev, "rpc len %d < %d\n",
481 rpc->length, gsp_rpc_len);
482 r535_gsp_msg_dump(gsp, rpc, NV_DBG_ERROR);
483 r535_gsp_msg_done(gsp, rpc);
484 return ERR_PTR(-EIO);
485 }
486
487 return rpc;
488 }
489
490 r535_gsp_msg_done(gsp, rpc);
491 return NULL;
492 }
493
494 for (i = 0; i < gsp->msgq.ntfy_nr; i++) {
495 struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i];
496
497 if (ntfy->fn == rpc->function) {
498 if (ntfy->func)
499 ntfy->func(ntfy->priv, ntfy->fn, rpc->data,
500 rpc->length - sizeof(*rpc));
501 break;
502 }
503 }
504
505 if (i == gsp->msgq.ntfy_nr)
506 r535_gsp_msg_dump(gsp, rpc, NV_DBG_WARN);
507
508 r535_gsp_msg_done(gsp, rpc);
509 if (fn)
510 goto retry;
511
512 if (*gsp->msgq.rptr != *gsp->msgq.wptr)
513 goto retry;
514
515 return NULL;
516 }
517
518 int
r535_gsp_msg_ntfy_add(struct nvkm_gsp * gsp,u32 fn,nvkm_gsp_msg_ntfy_func func,void * priv)519 r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv)
520 {
521 int ret = 0;
522
523 mutex_lock(&gsp->msgq.mutex);
524 if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) {
525 ret = -ENOSPC;
526 } else {
527 gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn;
528 gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func;
529 gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv;
530 gsp->msgq.ntfy_nr++;
531 }
532 mutex_unlock(&gsp->msgq.mutex);
533 return ret;
534 }
535
536 int
r535_gsp_rpc_poll(struct nvkm_gsp * gsp,u32 fn)537 r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn)
538 {
539 void *repv;
540
541 mutex_lock(&gsp->cmdq.mutex);
542 repv = r535_gsp_msg_recv(gsp, fn, 0);
543 mutex_unlock(&gsp->cmdq.mutex);
544 if (IS_ERR(repv))
545 return PTR_ERR(repv);
546
547 return 0;
548 }
549
550 static void *
r535_gsp_rpc_handle_reply(struct nvkm_gsp * gsp,u32 fn,enum nvkm_gsp_rpc_reply_policy policy,u32 gsp_rpc_len)551 r535_gsp_rpc_handle_reply(struct nvkm_gsp *gsp, u32 fn,
552 enum nvkm_gsp_rpc_reply_policy policy,
553 u32 gsp_rpc_len)
554 {
555 struct nvfw_gsp_rpc *reply;
556 void *repv = NULL;
557
558 switch (policy) {
559 case NVKM_GSP_RPC_REPLY_NOWAIT:
560 break;
561 case NVKM_GSP_RPC_REPLY_RECV:
562 reply = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len);
563 if (!IS_ERR_OR_NULL(reply))
564 repv = reply->data;
565 else
566 repv = reply;
567 break;
568 case NVKM_GSP_RPC_REPLY_POLL:
569 repv = r535_gsp_msg_recv(gsp, fn, 0);
570 break;
571 }
572
573 return repv;
574 }
575
576 static void *
r535_gsp_rpc_send(struct nvkm_gsp * gsp,void * payload,enum nvkm_gsp_rpc_reply_policy policy,u32 gsp_rpc_len)577 r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload,
578 enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len)
579 {
580 struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
581 u32 fn = rpc->function;
582 int ret;
583
584 if (gsp->subdev.debug >= NV_DBG_TRACE) {
585 nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function,
586 rpc->length, rpc->length - sizeof(*rpc));
587 print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1,
588 rpc->data, rpc->length - sizeof(*rpc), true);
589 }
590
591 ret = r535_gsp_cmdq_push(gsp, rpc);
592 if (ret)
593 return ERR_PTR(ret);
594
595 return r535_gsp_rpc_handle_reply(gsp, fn, policy, gsp_rpc_len);
596 }
597
598 static void
r535_gsp_rpc_done(struct nvkm_gsp * gsp,void * repv)599 r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
600 {
601 struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data);
602
603 r535_gsp_msg_done(gsp, rpc);
604 }
605
606 static void *
r535_gsp_rpc_get(struct nvkm_gsp * gsp,u32 fn,u32 payload_size)607 r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 payload_size)
608 {
609 struct nvfw_gsp_rpc *rpc;
610
611 rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + payload_size,
612 sizeof(u64)));
613 if (IS_ERR(rpc))
614 return ERR_CAST(rpc);
615
616 rpc->header_version = 0x03000000;
617 rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V';
618 rpc->function = fn;
619 rpc->rpc_result = 0xffffffff;
620 rpc->rpc_result_private = 0xffffffff;
621 rpc->length = sizeof(*rpc) + payload_size;
622 return rpc->data;
623 }
624
625 static void *
r535_gsp_rpc_push(struct nvkm_gsp * gsp,void * payload,enum nvkm_gsp_rpc_reply_policy policy,u32 gsp_rpc_len)626 r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload,
627 enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len)
628 {
629 struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc);
630 struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg);
631 const u32 max_rpc_size = GSP_MSG_MAX_SIZE - sizeof(*msg);
632 const u32 max_payload_size = max_rpc_size - sizeof(*rpc);
633 u32 payload_size = rpc->length - sizeof(*rpc);
634 void *repv;
635
636 mutex_lock(&gsp->cmdq.mutex);
637 if (payload_size > max_payload_size) {
638 const u32 fn = rpc->function;
639 u32 remain_payload_size = payload_size;
640 void *next;
641
642 /* Send initial RPC. */
643 next = r535_gsp_rpc_get(gsp, fn, max_payload_size);
644 if (IS_ERR(next)) {
645 repv = next;
646 goto done;
647 }
648
649 memcpy(next, payload, max_payload_size);
650
651 repv = r535_gsp_rpc_send(gsp, next, NVKM_GSP_RPC_REPLY_NOWAIT, 0);
652 if (IS_ERR(repv))
653 goto done;
654
655 payload += max_payload_size;
656 remain_payload_size -= max_payload_size;
657
658 /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */
659 while (remain_payload_size) {
660 u32 size = min(remain_payload_size,
661 max_payload_size);
662
663 next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size);
664 if (IS_ERR(next)) {
665 repv = next;
666 goto done;
667 }
668
669 memcpy(next, payload, size);
670
671 repv = r535_gsp_rpc_send(gsp, next, NVKM_GSP_RPC_REPLY_NOWAIT, 0);
672 if (IS_ERR(repv))
673 goto done;
674
675 payload += size;
676 remain_payload_size -= size;
677 }
678
679 /* Wait for reply. */
680 repv = r535_gsp_rpc_handle_reply(gsp, fn, policy, payload_size +
681 sizeof(*rpc));
682 if (!IS_ERR(repv))
683 kvfree(msg);
684 } else {
685 repv = r535_gsp_rpc_send(gsp, payload, policy, gsp_rpc_len);
686 }
687
688 done:
689 mutex_unlock(&gsp->cmdq.mutex);
690 return repv;
691 }
692
693 const struct nvkm_rm_api_rpc
694 r535_rpc = {
695 .get = r535_gsp_rpc_get,
696 .push = r535_gsp_rpc_push,
697 .done = r535_gsp_rpc_done,
698 };
699