1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
3
4 #include <linux/completion.h>
5 #include <linux/circ_buf.h>
6 #include <linux/list.h>
7
8 #include <soc/qcom/cmd-db.h>
9 #include <soc/qcom/tcs.h>
10
11 #include "a6xx_gmu.h"
12 #include "a6xx_gmu.xml.h"
13 #include "a6xx_gpu.h"
14
15 #define HFI_MSG_ID(val) [val] = #val
16
17 static const char * const a6xx_hfi_msg_id[] = {
18 HFI_MSG_ID(HFI_H2F_MSG_INIT),
19 HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION),
20 HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE),
21 HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
22 HFI_MSG_ID(HFI_H2F_MSG_TEST),
23 HFI_MSG_ID(HFI_H2F_MSG_START),
24 HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START),
25 HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE),
26 HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER),
27 };
28
a6xx_hfi_queue_read(struct a6xx_gmu * gmu,struct a6xx_hfi_queue * queue,u32 * data,u32 dwords)29 static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu,
30 struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
31 {
32 struct a6xx_hfi_queue_header *header = queue->header;
33 u32 i, hdr, index = header->read_index;
34
35 if (header->read_index == header->write_index) {
36 header->rx_request = 1;
37 return 0;
38 }
39
40 hdr = queue->data[index];
41
42 queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index;
43
44 /*
45 * If we are to assume that the GMU firmware is in fact a rational actor
46 * and is programmed to not send us a larger response than we expect
47 * then we can also assume that if the header size is unexpectedly large
48 * that it is due to memory corruption and/or hardware failure. In this
49 * case the only reasonable course of action is to BUG() to help harden
50 * the failure.
51 */
52
53 BUG_ON(HFI_HEADER_SIZE(hdr) > dwords);
54
55 for (i = 0; i < HFI_HEADER_SIZE(hdr); i++) {
56 data[i] = queue->data[index];
57 index = (index + 1) % header->size;
58 }
59
60 if (!gmu->legacy)
61 index = ALIGN(index, 4) % header->size;
62
63 header->read_index = index;
64 return HFI_HEADER_SIZE(hdr);
65 }
66
a6xx_hfi_queue_write(struct a6xx_gmu * gmu,struct a6xx_hfi_queue * queue,u32 * data,u32 dwords)67 static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
68 struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
69 {
70 struct a6xx_hfi_queue_header *header = queue->header;
71 u32 i, space, index = header->write_index;
72
73 spin_lock(&queue->lock);
74
75 space = CIRC_SPACE(header->write_index, header->read_index,
76 header->size);
77 if (space < dwords) {
78 header->dropped++;
79 spin_unlock(&queue->lock);
80 return -ENOSPC;
81 }
82
83 queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index;
84
85 for (i = 0; i < dwords; i++) {
86 queue->data[index] = data[i];
87 index = (index + 1) % header->size;
88 }
89
90 /* Cookify any non used data at the end of the write buffer */
91 if (!gmu->legacy) {
92 for (; index % 4; index = (index + 1) % header->size)
93 queue->data[index] = 0xfafafafa;
94 }
95
96 header->write_index = index;
97 spin_unlock(&queue->lock);
98
99 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01);
100 return 0;
101 }
102
a6xx_hfi_wait_for_ack(struct a6xx_gmu * gmu,u32 id,u32 seqnum,u32 * payload,u32 payload_size)103 static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
104 u32 *payload, u32 payload_size)
105 {
106 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
107 u32 val;
108 int ret;
109
110 /* Wait for a response */
111 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
112 val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
113
114 if (ret) {
115 DRM_DEV_ERROR(gmu->dev,
116 "Message %s id %d timed out waiting for response\n",
117 a6xx_hfi_msg_id[id], seqnum);
118 return -ETIMEDOUT;
119 }
120
121 /* Clear the interrupt */
122 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR,
123 A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ);
124
125 for (;;) {
126 struct a6xx_hfi_msg_response resp;
127
128 /* Get the next packet */
129 ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp,
130 sizeof(resp) >> 2);
131
132 /* If the queue is empty our response never made it */
133 if (!ret) {
134 DRM_DEV_ERROR(gmu->dev,
135 "The HFI response queue is unexpectedly empty\n");
136
137 return -ENOENT;
138 }
139
140 if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) {
141 struct a6xx_hfi_msg_error *error =
142 (struct a6xx_hfi_msg_error *) &resp;
143
144 DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n",
145 error->code);
146 continue;
147 }
148
149 if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
150 DRM_DEV_ERROR(gmu->dev,
151 "Unexpected message id %d on the response queue\n",
152 HFI_HEADER_SEQNUM(resp.ret_header));
153 continue;
154 }
155
156 if (resp.error) {
157 DRM_DEV_ERROR(gmu->dev,
158 "Message %s id %d returned error %d\n",
159 a6xx_hfi_msg_id[id], seqnum, resp.error);
160 return -EINVAL;
161 }
162
163 /* All is well, copy over the buffer */
164 if (payload && payload_size)
165 memcpy(payload, resp.payload,
166 min_t(u32, payload_size, sizeof(resp.payload)));
167
168 return 0;
169 }
170 }
171
a6xx_hfi_send_msg(struct a6xx_gmu * gmu,int id,void * data,u32 size,u32 * payload,u32 payload_size)172 static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
173 void *data, u32 size, u32 *payload, u32 payload_size)
174 {
175 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE];
176 int ret, dwords = size >> 2;
177 u32 seqnum;
178
179 seqnum = atomic_inc_return(&queue->seqnum) % 0xfff;
180
181 /* First dword of the message is the message header - fill it in */
182 *((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) |
183 (dwords << 8) | id;
184
185 ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
186 if (ret) {
187 DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n",
188 a6xx_hfi_msg_id[id], seqnum);
189 return ret;
190 }
191
192 return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size);
193 }
194
a6xx_hfi_send_gmu_init(struct a6xx_gmu * gmu,int boot_state)195 static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
196 {
197 struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 };
198
199 msg.dbg_buffer_addr = (u32) gmu->debug.iova;
200 msg.dbg_buffer_size = (u32) gmu->debug.size;
201 msg.boot_state = boot_state;
202
203 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg),
204 NULL, 0);
205 }
206
a6xx_hfi_get_fw_version(struct a6xx_gmu * gmu,u32 * version)207 static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version)
208 {
209 struct a6xx_hfi_msg_fw_version msg = { 0 };
210
211 /* Currently supporting version 1.10 */
212 msg.supported_version = (1 << 28) | (1 << 19) | (1 << 17);
213
214 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg),
215 version, sizeof(*version));
216 }
217
a6xx_hfi_send_perf_table_v1(struct a6xx_gmu * gmu)218 static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu)
219 {
220 struct a6xx_hfi_msg_perf_table_v1 msg = { 0 };
221 int i;
222
223 msg.num_gpu_levels = gmu->nr_gpu_freqs;
224 msg.num_gmu_levels = gmu->nr_gmu_freqs;
225
226 for (i = 0; i < gmu->nr_gpu_freqs; i++) {
227 msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
228 msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
229 }
230
231 for (i = 0; i < gmu->nr_gmu_freqs; i++) {
232 msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
233 msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
234 }
235
236 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
237 NULL, 0);
238 }
239
a6xx_hfi_send_perf_table(struct a6xx_gmu * gmu)240 static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
241 {
242 struct a6xx_hfi_msg_perf_table msg = { 0 };
243 int i;
244
245 msg.num_gpu_levels = gmu->nr_gpu_freqs;
246 msg.num_gmu_levels = gmu->nr_gmu_freqs;
247
248 for (i = 0; i < gmu->nr_gpu_freqs; i++) {
249 msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
250 msg.gx_votes[i].acd = 0xffffffff;
251 msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
252 }
253
254 for (i = 0; i < gmu->nr_gmu_freqs; i++) {
255 msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
256 msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
257 }
258
259 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
260 NULL, 0);
261 }
262
a6xx_generate_bw_table(const struct a6xx_info * info,struct a6xx_gmu * gmu,struct a6xx_hfi_msg_bw_table * msg)263 static void a6xx_generate_bw_table(const struct a6xx_info *info, struct a6xx_gmu *gmu,
264 struct a6xx_hfi_msg_bw_table *msg)
265 {
266 unsigned int i, j;
267
268 for (i = 0; i < GMU_MAX_BCMS; i++) {
269 if (!info->bcms[i].name)
270 break;
271 msg->ddr_cmds_addrs[i] = cmd_db_read_addr(info->bcms[i].name);
272 }
273 msg->ddr_cmds_num = i;
274
275 for (i = 0; i < gmu->nr_gpu_bws; ++i)
276 for (j = 0; j < msg->ddr_cmds_num; j++)
277 msg->ddr_cmds_data[i][j] = gmu->gpu_ib_votes[i][j];
278 msg->bw_level_num = gmu->nr_gpu_bws;
279
280 /* Compute the wait bitmask with each BCM having the commit bit */
281 msg->ddr_wait_bitmask = 0;
282 for (j = 0; j < msg->ddr_cmds_num; j++)
283 if (msg->ddr_cmds_data[0][j] & BCM_TCS_CMD_COMMIT_MASK)
284 msg->ddr_wait_bitmask |= BIT(j);
285
286 /*
287 * These are the CX (CNOC) votes - these are used by the GMU
288 * The 'CN0' BCM is used on all targets, and votes are basically
289 * 'off' and 'on' states with first bit to enable the path.
290 */
291
292 msg->cnoc_cmds_addrs[0] = cmd_db_read_addr("CN0");
293 msg->cnoc_cmds_num = 1;
294
295 msg->cnoc_cmds_data[0][0] = BCM_TCS_CMD(true, false, 0, 0);
296 msg->cnoc_cmds_data[1][0] = BCM_TCS_CMD(true, true, 0, BIT(0));
297
298 /* Compute the wait bitmask with each BCM having the commit bit */
299 msg->cnoc_wait_bitmask = 0;
300 for (j = 0; j < msg->cnoc_cmds_num; j++)
301 if (msg->cnoc_cmds_data[0][j] & BCM_TCS_CMD_COMMIT_MASK)
302 msg->cnoc_wait_bitmask |= BIT(j);
303 }
304
a618_build_bw_table(struct a6xx_hfi_msg_bw_table * msg)305 static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
306 {
307 /* Send a single "off" entry since the 618 GMU doesn't do bus scaling */
308 msg->bw_level_num = 1;
309
310 msg->ddr_cmds_num = 3;
311 msg->ddr_wait_bitmask = 0x01;
312
313 msg->ddr_cmds_addrs[0] = 0x50000;
314 msg->ddr_cmds_addrs[1] = 0x5003c;
315 msg->ddr_cmds_addrs[2] = 0x5000c;
316
317 msg->ddr_cmds_data[0][0] = 0x40000000;
318 msg->ddr_cmds_data[0][1] = 0x40000000;
319 msg->ddr_cmds_data[0][2] = 0x40000000;
320
321 /*
322 * These are the CX (CNOC) votes - these are used by the GMU but the
323 * votes are known and fixed for the target
324 */
325 msg->cnoc_cmds_num = 1;
326 msg->cnoc_wait_bitmask = 0x01;
327
328 msg->cnoc_cmds_addrs[0] = 0x5007c;
329 msg->cnoc_cmds_data[0][0] = 0x40000000;
330 msg->cnoc_cmds_data[1][0] = 0x60000001;
331 }
332
a619_build_bw_table(struct a6xx_hfi_msg_bw_table * msg)333 static void a619_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
334 {
335 msg->bw_level_num = 13;
336
337 msg->ddr_cmds_num = 3;
338 msg->ddr_wait_bitmask = 0x0;
339
340 msg->ddr_cmds_addrs[0] = 0x50000;
341 msg->ddr_cmds_addrs[1] = 0x50004;
342 msg->ddr_cmds_addrs[2] = 0x50080;
343
344 msg->ddr_cmds_data[0][0] = 0x40000000;
345 msg->ddr_cmds_data[0][1] = 0x40000000;
346 msg->ddr_cmds_data[0][2] = 0x40000000;
347 msg->ddr_cmds_data[1][0] = 0x6000030c;
348 msg->ddr_cmds_data[1][1] = 0x600000db;
349 msg->ddr_cmds_data[1][2] = 0x60000008;
350 msg->ddr_cmds_data[2][0] = 0x60000618;
351 msg->ddr_cmds_data[2][1] = 0x600001b6;
352 msg->ddr_cmds_data[2][2] = 0x60000008;
353 msg->ddr_cmds_data[3][0] = 0x60000925;
354 msg->ddr_cmds_data[3][1] = 0x60000291;
355 msg->ddr_cmds_data[3][2] = 0x60000008;
356 msg->ddr_cmds_data[4][0] = 0x60000dc1;
357 msg->ddr_cmds_data[4][1] = 0x600003dc;
358 msg->ddr_cmds_data[4][2] = 0x60000008;
359 msg->ddr_cmds_data[5][0] = 0x600010ad;
360 msg->ddr_cmds_data[5][1] = 0x600004ae;
361 msg->ddr_cmds_data[5][2] = 0x60000008;
362 msg->ddr_cmds_data[6][0] = 0x600014c3;
363 msg->ddr_cmds_data[6][1] = 0x600005d4;
364 msg->ddr_cmds_data[6][2] = 0x60000008;
365 msg->ddr_cmds_data[7][0] = 0x6000176a;
366 msg->ddr_cmds_data[7][1] = 0x60000693;
367 msg->ddr_cmds_data[7][2] = 0x60000008;
368 msg->ddr_cmds_data[8][0] = 0x60001f01;
369 msg->ddr_cmds_data[8][1] = 0x600008b5;
370 msg->ddr_cmds_data[8][2] = 0x60000008;
371 msg->ddr_cmds_data[9][0] = 0x60002940;
372 msg->ddr_cmds_data[9][1] = 0x60000b95;
373 msg->ddr_cmds_data[9][2] = 0x60000008;
374 msg->ddr_cmds_data[10][0] = 0x60002f68;
375 msg->ddr_cmds_data[10][1] = 0x60000d50;
376 msg->ddr_cmds_data[10][2] = 0x60000008;
377 msg->ddr_cmds_data[11][0] = 0x60003700;
378 msg->ddr_cmds_data[11][1] = 0x60000f71;
379 msg->ddr_cmds_data[11][2] = 0x60000008;
380 msg->ddr_cmds_data[12][0] = 0x60003fce;
381 msg->ddr_cmds_data[12][1] = 0x600011ea;
382 msg->ddr_cmds_data[12][2] = 0x60000008;
383
384 msg->cnoc_cmds_num = 1;
385 msg->cnoc_wait_bitmask = 0x0;
386
387 msg->cnoc_cmds_addrs[0] = 0x50054;
388
389 msg->cnoc_cmds_data[0][0] = 0x40000000;
390 }
391
a640_build_bw_table(struct a6xx_hfi_msg_bw_table * msg)392 static void a640_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
393 {
394 /*
395 * Send a single "off" entry just to get things running
396 * TODO: bus scaling
397 */
398 msg->bw_level_num = 1;
399
400 msg->ddr_cmds_num = 3;
401 msg->ddr_wait_bitmask = 0x01;
402
403 msg->ddr_cmds_addrs[0] = 0x50000;
404 msg->ddr_cmds_addrs[1] = 0x5003c;
405 msg->ddr_cmds_addrs[2] = 0x5000c;
406
407 msg->ddr_cmds_data[0][0] = 0x40000000;
408 msg->ddr_cmds_data[0][1] = 0x40000000;
409 msg->ddr_cmds_data[0][2] = 0x40000000;
410
411 /*
412 * These are the CX (CNOC) votes - these are used by the GMU but the
413 * votes are known and fixed for the target
414 */
415 msg->cnoc_cmds_num = 3;
416 msg->cnoc_wait_bitmask = 0x01;
417
418 msg->cnoc_cmds_addrs[0] = 0x50034;
419 msg->cnoc_cmds_addrs[1] = 0x5007c;
420 msg->cnoc_cmds_addrs[2] = 0x5004c;
421
422 msg->cnoc_cmds_data[0][0] = 0x40000000;
423 msg->cnoc_cmds_data[0][1] = 0x00000000;
424 msg->cnoc_cmds_data[0][2] = 0x40000000;
425
426 msg->cnoc_cmds_data[1][0] = 0x60000001;
427 msg->cnoc_cmds_data[1][1] = 0x20000001;
428 msg->cnoc_cmds_data[1][2] = 0x60000001;
429 }
430
a650_build_bw_table(struct a6xx_hfi_msg_bw_table * msg)431 static void a650_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
432 {
433 /*
434 * Send a single "off" entry just to get things running
435 * TODO: bus scaling
436 */
437 msg->bw_level_num = 1;
438
439 msg->ddr_cmds_num = 3;
440 msg->ddr_wait_bitmask = 0x01;
441
442 msg->ddr_cmds_addrs[0] = 0x50000;
443 msg->ddr_cmds_addrs[1] = 0x50004;
444 msg->ddr_cmds_addrs[2] = 0x5007c;
445
446 msg->ddr_cmds_data[0][0] = 0x40000000;
447 msg->ddr_cmds_data[0][1] = 0x40000000;
448 msg->ddr_cmds_data[0][2] = 0x40000000;
449
450 /*
451 * These are the CX (CNOC) votes - these are used by the GMU but the
452 * votes are known and fixed for the target
453 */
454 msg->cnoc_cmds_num = 1;
455 msg->cnoc_wait_bitmask = 0x01;
456
457 msg->cnoc_cmds_addrs[0] = 0x500a4;
458 msg->cnoc_cmds_data[0][0] = 0x40000000;
459 msg->cnoc_cmds_data[1][0] = 0x60000001;
460 }
461
a690_build_bw_table(struct a6xx_hfi_msg_bw_table * msg)462 static void a690_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
463 {
464 /*
465 * Send a single "off" entry just to get things running
466 * TODO: bus scaling
467 */
468 msg->bw_level_num = 1;
469
470 msg->ddr_cmds_num = 3;
471 msg->ddr_wait_bitmask = 0x01;
472
473 msg->ddr_cmds_addrs[0] = 0x50004;
474 msg->ddr_cmds_addrs[1] = 0x50000;
475 msg->ddr_cmds_addrs[2] = 0x500ac;
476
477 msg->ddr_cmds_data[0][0] = 0x40000000;
478 msg->ddr_cmds_data[0][1] = 0x40000000;
479 msg->ddr_cmds_data[0][2] = 0x40000000;
480
481 /*
482 * These are the CX (CNOC) votes - these are used by the GMU but the
483 * votes are known and fixed for the target
484 */
485 msg->cnoc_cmds_num = 1;
486 msg->cnoc_wait_bitmask = 0x01;
487
488 msg->cnoc_cmds_addrs[0] = 0x5003c;
489 msg->cnoc_cmds_data[0][0] = 0x40000000;
490 msg->cnoc_cmds_data[1][0] = 0x60000001;
491 }
492
a660_build_bw_table(struct a6xx_hfi_msg_bw_table * msg)493 static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
494 {
495 /*
496 * Send a single "off" entry just to get things running
497 * TODO: bus scaling
498 */
499 msg->bw_level_num = 1;
500
501 msg->ddr_cmds_num = 3;
502 msg->ddr_wait_bitmask = 0x01;
503
504 msg->ddr_cmds_addrs[0] = 0x50004;
505 msg->ddr_cmds_addrs[1] = 0x500a0;
506 msg->ddr_cmds_addrs[2] = 0x50000;
507
508 msg->ddr_cmds_data[0][0] = 0x40000000;
509 msg->ddr_cmds_data[0][1] = 0x40000000;
510 msg->ddr_cmds_data[0][2] = 0x40000000;
511
512 /*
513 * These are the CX (CNOC) votes - these are used by the GMU but the
514 * votes are known and fixed for the target
515 */
516 msg->cnoc_cmds_num = 1;
517 msg->cnoc_wait_bitmask = 0x01;
518
519 msg->cnoc_cmds_addrs[0] = 0x50070;
520 msg->cnoc_cmds_data[0][0] = 0x40000000;
521 msg->cnoc_cmds_data[1][0] = 0x60000001;
522 }
523
a663_build_bw_table(struct a6xx_hfi_msg_bw_table * msg)524 static void a663_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
525 {
526 /*
527 * Send a single "off" entry just to get things running
528 * TODO: bus scaling
529 */
530 msg->bw_level_num = 1;
531
532 msg->ddr_cmds_num = 3;
533 msg->ddr_wait_bitmask = 0x07;
534
535 msg->ddr_cmds_addrs[0] = 0x50004;
536 msg->ddr_cmds_addrs[1] = 0x50000;
537 msg->ddr_cmds_addrs[2] = 0x500b4;
538
539 msg->ddr_cmds_data[0][0] = 0x40000000;
540 msg->ddr_cmds_data[0][1] = 0x40000000;
541 msg->ddr_cmds_data[0][2] = 0x40000000;
542
543 /*
544 * These are the CX (CNOC) votes - these are used by the GMU but the
545 * votes are known and fixed for the target
546 */
547 msg->cnoc_cmds_num = 1;
548 msg->cnoc_wait_bitmask = 0x01;
549
550 msg->cnoc_cmds_addrs[0] = 0x50058;
551 msg->cnoc_cmds_data[0][0] = 0x40000000;
552 msg->cnoc_cmds_data[1][0] = 0x60000001;
553 }
554
adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table * msg)555 static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
556 {
557 /*
558 * Send a single "off" entry just to get things running
559 * TODO: bus scaling
560 */
561 msg->bw_level_num = 1;
562
563 msg->ddr_cmds_num = 3;
564 msg->ddr_wait_bitmask = 0x07;
565
566 msg->ddr_cmds_addrs[0] = 0x50004;
567 msg->ddr_cmds_addrs[1] = 0x50000;
568 msg->ddr_cmds_addrs[2] = 0x50088;
569
570 msg->ddr_cmds_data[0][0] = 0x40000000;
571 msg->ddr_cmds_data[0][1] = 0x40000000;
572 msg->ddr_cmds_data[0][2] = 0x40000000;
573
574 /*
575 * These are the CX (CNOC) votes - these are used by the GMU but the
576 * votes are known and fixed for the target
577 */
578 msg->cnoc_cmds_num = 1;
579 msg->cnoc_wait_bitmask = 0x01;
580
581 msg->cnoc_cmds_addrs[0] = 0x5006c;
582 msg->cnoc_cmds_data[0][0] = 0x40000000;
583 msg->cnoc_cmds_data[1][0] = 0x60000001;
584 }
585
a730_build_bw_table(struct a6xx_hfi_msg_bw_table * msg)586 static void a730_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
587 {
588 msg->bw_level_num = 12;
589
590 msg->ddr_cmds_num = 3;
591 msg->ddr_wait_bitmask = 0x7;
592
593 msg->ddr_cmds_addrs[0] = cmd_db_read_addr("SH0");
594 msg->ddr_cmds_addrs[1] = cmd_db_read_addr("MC0");
595 msg->ddr_cmds_addrs[2] = cmd_db_read_addr("ACV");
596
597 msg->ddr_cmds_data[0][0] = 0x40000000;
598 msg->ddr_cmds_data[0][1] = 0x40000000;
599 msg->ddr_cmds_data[0][2] = 0x40000000;
600 msg->ddr_cmds_data[1][0] = 0x600002e8;
601 msg->ddr_cmds_data[1][1] = 0x600003d0;
602 msg->ddr_cmds_data[1][2] = 0x60000008;
603 msg->ddr_cmds_data[2][0] = 0x6000068d;
604 msg->ddr_cmds_data[2][1] = 0x6000089a;
605 msg->ddr_cmds_data[2][2] = 0x60000008;
606 msg->ddr_cmds_data[3][0] = 0x600007f2;
607 msg->ddr_cmds_data[3][1] = 0x60000a6e;
608 msg->ddr_cmds_data[3][2] = 0x60000008;
609 msg->ddr_cmds_data[4][0] = 0x600009e5;
610 msg->ddr_cmds_data[4][1] = 0x60000cfd;
611 msg->ddr_cmds_data[4][2] = 0x60000008;
612 msg->ddr_cmds_data[5][0] = 0x60000b29;
613 msg->ddr_cmds_data[5][1] = 0x60000ea6;
614 msg->ddr_cmds_data[5][2] = 0x60000008;
615 msg->ddr_cmds_data[6][0] = 0x60001698;
616 msg->ddr_cmds_data[6][1] = 0x60001da8;
617 msg->ddr_cmds_data[6][2] = 0x60000008;
618 msg->ddr_cmds_data[7][0] = 0x600018d2;
619 msg->ddr_cmds_data[7][1] = 0x60002093;
620 msg->ddr_cmds_data[7][2] = 0x60000008;
621 msg->ddr_cmds_data[8][0] = 0x60001e66;
622 msg->ddr_cmds_data[8][1] = 0x600027e6;
623 msg->ddr_cmds_data[8][2] = 0x60000008;
624 msg->ddr_cmds_data[9][0] = 0x600027c2;
625 msg->ddr_cmds_data[9][1] = 0x6000342f;
626 msg->ddr_cmds_data[9][2] = 0x60000008;
627 msg->ddr_cmds_data[10][0] = 0x60002e71;
628 msg->ddr_cmds_data[10][1] = 0x60003cf5;
629 msg->ddr_cmds_data[10][2] = 0x60000008;
630 msg->ddr_cmds_data[11][0] = 0x600030ae;
631 msg->ddr_cmds_data[11][1] = 0x60003fe5;
632 msg->ddr_cmds_data[11][2] = 0x60000008;
633
634 msg->cnoc_cmds_num = 1;
635 msg->cnoc_wait_bitmask = 0x1;
636
637 msg->cnoc_cmds_addrs[0] = cmd_db_read_addr("CN0");
638 msg->cnoc_cmds_data[0][0] = 0x40000000;
639 msg->cnoc_cmds_data[1][0] = 0x60000001;
640 }
641
a740_build_bw_table(struct a6xx_hfi_msg_bw_table * msg)642 static void a740_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
643 {
644 msg->bw_level_num = 1;
645
646 msg->ddr_cmds_num = 3;
647 msg->ddr_wait_bitmask = 0x7;
648
649 msg->ddr_cmds_addrs[0] = cmd_db_read_addr("SH0");
650 msg->ddr_cmds_addrs[1] = cmd_db_read_addr("MC0");
651 msg->ddr_cmds_addrs[2] = cmd_db_read_addr("ACV");
652
653 msg->ddr_cmds_data[0][0] = 0x40000000;
654 msg->ddr_cmds_data[0][1] = 0x40000000;
655 msg->ddr_cmds_data[0][2] = 0x40000000;
656
657 /* TODO: add a proper dvfs table */
658
659 msg->cnoc_cmds_num = 1;
660 msg->cnoc_wait_bitmask = 0x1;
661
662 msg->cnoc_cmds_addrs[0] = cmd_db_read_addr("CN0");
663 msg->cnoc_cmds_data[0][0] = 0x40000000;
664 msg->cnoc_cmds_data[1][0] = 0x60000001;
665 }
666
a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table * msg)667 static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
668 {
669 /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
670 msg->bw_level_num = 1;
671
672 msg->ddr_cmds_num = 3;
673 msg->ddr_wait_bitmask = 0x07;
674
675 msg->ddr_cmds_addrs[0] = 0x50000;
676 msg->ddr_cmds_addrs[1] = 0x5005c;
677 msg->ddr_cmds_addrs[2] = 0x5000c;
678
679 msg->ddr_cmds_data[0][0] = 0x40000000;
680 msg->ddr_cmds_data[0][1] = 0x40000000;
681 msg->ddr_cmds_data[0][2] = 0x40000000;
682
683 /*
684 * These are the CX (CNOC) votes. This is used but the values for the
685 * sdm845 GMU are known and fixed so we can hard code them.
686 */
687
688 msg->cnoc_cmds_num = 3;
689 msg->cnoc_wait_bitmask = 0x05;
690
691 msg->cnoc_cmds_addrs[0] = 0x50034;
692 msg->cnoc_cmds_addrs[1] = 0x5007c;
693 msg->cnoc_cmds_addrs[2] = 0x5004c;
694
695 msg->cnoc_cmds_data[0][0] = 0x40000000;
696 msg->cnoc_cmds_data[0][1] = 0x00000000;
697 msg->cnoc_cmds_data[0][2] = 0x40000000;
698
699 msg->cnoc_cmds_data[1][0] = 0x60000001;
700 msg->cnoc_cmds_data[1][1] = 0x20000001;
701 msg->cnoc_cmds_data[1][2] = 0x60000001;
702 }
703
704
a6xx_hfi_send_bw_table(struct a6xx_gmu * gmu)705 static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
706 {
707 struct a6xx_hfi_msg_bw_table *msg;
708 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
709 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
710 const struct a6xx_info *info = adreno_gpu->info->a6xx;
711
712 if (gmu->bw_table)
713 goto send;
714
715 msg = devm_kzalloc(gmu->dev, sizeof(*msg), GFP_KERNEL);
716 if (!msg)
717 return -ENOMEM;
718
719 if (info->bcms && gmu->nr_gpu_bws > 1)
720 a6xx_generate_bw_table(info, gmu, msg);
721 else if (adreno_is_a618(adreno_gpu))
722 a618_build_bw_table(msg);
723 else if (adreno_is_a619(adreno_gpu))
724 a619_build_bw_table(msg);
725 else if (adreno_is_a640_family(adreno_gpu))
726 a640_build_bw_table(msg);
727 else if (adreno_is_a650(adreno_gpu))
728 a650_build_bw_table(msg);
729 else if (adreno_is_7c3(adreno_gpu))
730 adreno_7c3_build_bw_table(msg);
731 else if (adreno_is_a660(adreno_gpu))
732 a660_build_bw_table(msg);
733 else if (adreno_is_a663(adreno_gpu))
734 a663_build_bw_table(msg);
735 else if (adreno_is_a690(adreno_gpu))
736 a690_build_bw_table(msg);
737 else if (adreno_is_a730(adreno_gpu))
738 a730_build_bw_table(msg);
739 else if (adreno_is_a740_family(adreno_gpu))
740 a740_build_bw_table(msg);
741 else
742 a6xx_build_bw_table(msg);
743
744 gmu->bw_table = msg;
745
746 send:
747 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, gmu->bw_table, sizeof(*(gmu->bw_table)),
748 NULL, 0);
749 }
750
a6xx_hfi_send_test(struct a6xx_gmu * gmu)751 static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
752 {
753 struct a6xx_hfi_msg_test msg = { 0 };
754
755 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg),
756 NULL, 0);
757 }
758
a6xx_hfi_send_start(struct a6xx_gmu * gmu)759 static int a6xx_hfi_send_start(struct a6xx_gmu *gmu)
760 {
761 struct a6xx_hfi_msg_start msg = { 0 };
762
763 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_START, &msg, sizeof(msg),
764 NULL, 0);
765 }
766
a6xx_hfi_send_core_fw_start(struct a6xx_gmu * gmu)767 static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu)
768 {
769 struct a6xx_hfi_msg_core_fw_start msg = { 0 };
770
771 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_CORE_FW_START, &msg,
772 sizeof(msg), NULL, 0);
773 }
774
a6xx_hfi_set_freq(struct a6xx_gmu * gmu,u32 freq_index,u32 bw_index)775 int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, u32 freq_index, u32 bw_index)
776 {
777 struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 };
778
779 msg.ack_type = 1; /* blocking */
780 msg.freq = freq_index;
781 msg.bw = bw_index;
782
783 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg,
784 sizeof(msg), NULL, 0);
785 }
786
a6xx_hfi_send_prep_slumber(struct a6xx_gmu * gmu)787 int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu)
788 {
789 struct a6xx_hfi_prep_slumber_cmd msg = { 0 };
790
791 /* TODO: should freq and bw fields be non-zero ? */
792
793 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PREPARE_SLUMBER, &msg,
794 sizeof(msg), NULL, 0);
795 }
796
a6xx_hfi_start_v1(struct a6xx_gmu * gmu,int boot_state)797 static int a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state)
798 {
799 int ret;
800
801 ret = a6xx_hfi_send_gmu_init(gmu, boot_state);
802 if (ret)
803 return ret;
804
805 ret = a6xx_hfi_get_fw_version(gmu, NULL);
806 if (ret)
807 return ret;
808
809 /*
810 * We have to get exchange version numbers per the sequence but at this
811 * point th kernel driver doesn't need to know the exact version of
812 * the GMU firmware
813 */
814
815 ret = a6xx_hfi_send_perf_table_v1(gmu);
816 if (ret)
817 return ret;
818
819 ret = a6xx_hfi_send_bw_table(gmu);
820 if (ret)
821 return ret;
822
823 /*
824 * Let the GMU know that there won't be any more HFI messages until next
825 * boot
826 */
827 a6xx_hfi_send_test(gmu);
828
829 return 0;
830 }
831
a6xx_hfi_start(struct a6xx_gmu * gmu,int boot_state)832 int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
833 {
834 int ret;
835
836 if (gmu->legacy)
837 return a6xx_hfi_start_v1(gmu, boot_state);
838
839
840 ret = a6xx_hfi_send_perf_table(gmu);
841 if (ret)
842 return ret;
843
844 ret = a6xx_hfi_send_bw_table(gmu);
845 if (ret)
846 return ret;
847
848 ret = a6xx_hfi_send_core_fw_start(gmu);
849 if (ret)
850 return ret;
851
852 /*
853 * Downstream driver sends this in its "a6xx_hw_init" equivalent,
854 * but seems to be no harm in sending it here
855 */
856 ret = a6xx_hfi_send_start(gmu);
857 if (ret)
858 return ret;
859
860 return 0;
861 }
862
a6xx_hfi_stop(struct a6xx_gmu * gmu)863 void a6xx_hfi_stop(struct a6xx_gmu *gmu)
864 {
865 int i;
866
867 for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) {
868 struct a6xx_hfi_queue *queue = &gmu->queues[i];
869
870 if (!queue->header)
871 continue;
872
873 if (queue->header->read_index != queue->header->write_index)
874 DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i);
875
876 queue->header->read_index = 0;
877 queue->header->write_index = 0;
878
879 memset(&queue->history, 0xff, sizeof(queue->history));
880 queue->history_idx = 0;
881 }
882 }
883
a6xx_hfi_queue_init(struct a6xx_hfi_queue * queue,struct a6xx_hfi_queue_header * header,void * virt,u64 iova,u32 id)884 static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
885 struct a6xx_hfi_queue_header *header, void *virt, u64 iova,
886 u32 id)
887 {
888 spin_lock_init(&queue->lock);
889 queue->header = header;
890 queue->data = virt;
891 atomic_set(&queue->seqnum, 0);
892
893 memset(&queue->history, 0xff, sizeof(queue->history));
894 queue->history_idx = 0;
895
896 /* Set up the shared memory header */
897 header->iova = iova;
898 header->type = 10 << 8 | id;
899 header->status = 1;
900 header->size = SZ_4K >> 2;
901 header->msg_size = 0;
902 header->dropped = 0;
903 header->rx_watermark = 1;
904 header->tx_watermark = 1;
905 header->rx_request = 1;
906 header->tx_request = 0;
907 header->read_index = 0;
908 header->write_index = 0;
909 }
910
a6xx_hfi_init(struct a6xx_gmu * gmu)911 void a6xx_hfi_init(struct a6xx_gmu *gmu)
912 {
913 struct a6xx_gmu_bo *hfi = &gmu->hfi;
914 struct a6xx_hfi_queue_table_header *table = hfi->virt;
915 struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table);
916 u64 offset;
917 int table_size;
918
919 /*
920 * The table size is the size of the table header plus all of the queue
921 * headers
922 */
923 table_size = sizeof(*table);
924 table_size += (ARRAY_SIZE(gmu->queues) *
925 sizeof(struct a6xx_hfi_queue_header));
926
927 table->version = 0;
928 table->size = table_size;
929 /* First queue header is located immediately after the table header */
930 table->qhdr0_offset = sizeof(*table) >> 2;
931 table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2;
932 table->num_queues = ARRAY_SIZE(gmu->queues);
933 table->active_queues = ARRAY_SIZE(gmu->queues);
934
935 /* Command queue */
936 offset = SZ_4K;
937 a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset,
938 hfi->iova + offset, 0);
939
940 /* GMU response queue */
941 offset += SZ_4K;
942 a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset,
943 hfi->iova + offset, gmu->legacy ? 4 : 1);
944 }
945