xref: /linux/drivers/gpu/drm/msm/adreno/a6xx_hfi.c (revision bca5cfbb694d66a1c482d0c347eee80f6afbc870)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
3 
4 #include <linux/completion.h>
5 #include <linux/circ_buf.h>
6 #include <linux/list.h>
7 
8 #include <soc/qcom/cmd-db.h>
9 #include <soc/qcom/tcs.h>
10 
11 #include "a6xx_gmu.h"
12 #include "a6xx_gmu.xml.h"
13 #include "a6xx_gpu.h"
14 
15 #define HFI_MSG_ID(val) [val] = #val
16 
17 static const char * const a6xx_hfi_msg_id[] = {
18 	HFI_MSG_ID(HFI_H2F_MSG_INIT),
19 	HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION),
20 	HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE),
21 	HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
22 	HFI_MSG_ID(HFI_H2F_MSG_TEST),
23 	HFI_MSG_ID(HFI_H2F_MSG_START),
24 	HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START),
25 	HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE),
26 	HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER),
27 };
28 
29 static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu,
30 	struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
31 {
32 	struct a6xx_hfi_queue_header *header = queue->header;
33 	u32 i, hdr, index = header->read_index;
34 
35 	if (header->read_index == header->write_index) {
36 		header->rx_request = 1;
37 		return 0;
38 	}
39 
40 	hdr = queue->data[index];
41 
42 	queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index;
43 
44 	/*
45 	 * If we are to assume that the GMU firmware is in fact a rational actor
46 	 * and is programmed to not send us a larger response than we expect
47 	 * then we can also assume that if the header size is unexpectedly large
48 	 * that it is due to memory corruption and/or hardware failure. In this
49 	 * case the only reasonable course of action is to BUG() to help harden
50 	 * the failure.
51 	 */
52 
53 	BUG_ON(HFI_HEADER_SIZE(hdr) > dwords);
54 
55 	for (i = 0; i < HFI_HEADER_SIZE(hdr); i++) {
56 		data[i] = queue->data[index];
57 		index = (index + 1) % header->size;
58 	}
59 
60 	if (!gmu->legacy)
61 		index = ALIGN(index, 4) % header->size;
62 
63 	header->read_index = index;
64 	return HFI_HEADER_SIZE(hdr);
65 }
66 
67 static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
68 	struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
69 {
70 	struct a6xx_hfi_queue_header *header = queue->header;
71 	u32 i, space, index = header->write_index;
72 
73 	spin_lock(&queue->lock);
74 
75 	space = CIRC_SPACE(header->write_index, header->read_index,
76 		header->size);
77 	if (space < dwords) {
78 		header->dropped++;
79 		spin_unlock(&queue->lock);
80 		return -ENOSPC;
81 	}
82 
83 	queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index;
84 
85 	for (i = 0; i < dwords; i++) {
86 		queue->data[index] = data[i];
87 		index = (index + 1) % header->size;
88 	}
89 
90 	/* Cookify any non used data at the end of the write buffer */
91 	if (!gmu->legacy) {
92 		for (; index % 4; index = (index + 1) % header->size)
93 			queue->data[index] = 0xfafafafa;
94 	}
95 
96 	header->write_index = index;
97 	spin_unlock(&queue->lock);
98 
99 	gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01);
100 	return 0;
101 }
102 
103 static int a6xx_hfi_wait_for_msg_interrupt(struct a6xx_gmu *gmu, u32 id, u32 seqnum)
104 {
105 	int ret;
106 	u32 val;
107 
108 	/* Wait for a response */
109 	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
110 		val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 1000000);
111 
112 	if (ret) {
113 		DRM_DEV_ERROR(gmu->dev,
114 			"Message %s id %d timed out waiting for response\n",
115 			a6xx_hfi_msg_id[id], seqnum);
116 		return -ETIMEDOUT;
117 	}
118 
119 	/* Clear the interrupt */
120 	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR,
121 		A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ);
122 
123 	return 0;
124 }
125 
126 static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
127 		u32 *payload, u32 payload_size)
128 {
129 	struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
130 	int ret;
131 
132 	ret = a6xx_hfi_wait_for_msg_interrupt(gmu, id, seqnum);
133 	if (ret)
134 		return ret;
135 
136 	for (;;) {
137 		struct a6xx_hfi_msg_response resp;
138 
139 		/* Get the next packet */
140 		ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp,
141 			sizeof(resp) >> 2);
142 
143 		/* If the queue is empty, there may have been previous missed
144 		 * responses that preceded the response to our packet. Wait
145 		 * further before we give up.
146 		 */
147 		if (!ret) {
148 			ret = a6xx_hfi_wait_for_msg_interrupt(gmu, id, seqnum);
149 			if (ret) {
150 				DRM_DEV_ERROR(gmu->dev,
151 					"The HFI response queue is unexpectedly empty\n");
152 				return ret;
153 			}
154 			continue;
155 		}
156 
157 		if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) {
158 			struct a6xx_hfi_msg_error *error =
159 				(struct a6xx_hfi_msg_error *) &resp;
160 
161 			DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n",
162 				error->code);
163 			continue;
164 		}
165 
166 		if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
167 			DRM_DEV_ERROR(gmu->dev,
168 				"Unexpected message id %d on the response queue\n",
169 				HFI_HEADER_SEQNUM(resp.ret_header));
170 			continue;
171 		}
172 
173 		if (resp.error) {
174 			DRM_DEV_ERROR(gmu->dev,
175 				"Message %s id %d returned error %d\n",
176 				a6xx_hfi_msg_id[id], seqnum, resp.error);
177 			return -EINVAL;
178 		}
179 
180 		/* All is well, copy over the buffer */
181 		if (payload && payload_size)
182 			memcpy(payload, resp.payload,
183 				min_t(u32, payload_size, sizeof(resp.payload)));
184 
185 		return 0;
186 	}
187 }
188 
189 static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
190 		void *data, u32 size, u32 *payload, u32 payload_size)
191 {
192 	struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE];
193 	int ret, dwords = size >> 2;
194 	u32 seqnum;
195 
196 	seqnum = atomic_inc_return(&queue->seqnum) % 0xfff;
197 
198 	/* First dword of the message is the message header - fill it in */
199 	*((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) |
200 		(dwords << 8) | id;
201 
202 	ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
203 	if (ret) {
204 		DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n",
205 			a6xx_hfi_msg_id[id], seqnum);
206 		return ret;
207 	}
208 
209 	return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size);
210 }
211 
212 static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
213 {
214 	struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 };
215 
216 	msg.dbg_buffer_addr = (u32) gmu->debug.iova;
217 	msg.dbg_buffer_size = (u32) gmu->debug.size;
218 	msg.boot_state = boot_state;
219 
220 	return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg),
221 		NULL, 0);
222 }
223 
224 static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version)
225 {
226 	struct a6xx_hfi_msg_fw_version msg = { 0 };
227 
228 	/* Currently supporting version 1.10 */
229 	msg.supported_version = (1 << 28) | (1 << 19) | (1 << 17);
230 
231 	return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg),
232 		version, sizeof(*version));
233 }
234 
235 static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu)
236 {
237 	struct a6xx_hfi_msg_perf_table_v1 msg = { 0 };
238 	int i;
239 
240 	msg.num_gpu_levels = gmu->nr_gpu_freqs;
241 	msg.num_gmu_levels = gmu->nr_gmu_freqs;
242 
243 	for (i = 0; i < gmu->nr_gpu_freqs; i++) {
244 		msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
245 		msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
246 	}
247 
248 	for (i = 0; i < gmu->nr_gmu_freqs; i++) {
249 		msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
250 		msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
251 	}
252 
253 	return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
254 		NULL, 0);
255 }
256 
257 static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
258 {
259 	struct a6xx_hfi_msg_perf_table msg = { 0 };
260 	int i;
261 
262 	msg.num_gpu_levels = gmu->nr_gpu_freqs;
263 	msg.num_gmu_levels = gmu->nr_gmu_freqs;
264 
265 	for (i = 0; i < gmu->nr_gpu_freqs; i++) {
266 		msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
267 		msg.gx_votes[i].acd = 0xffffffff;
268 		msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
269 	}
270 
271 	for (i = 0; i < gmu->nr_gmu_freqs; i++) {
272 		msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
273 		msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
274 	}
275 
276 	return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
277 		NULL, 0);
278 }
279 
280 static void a6xx_generate_bw_table(const struct a6xx_info *info, struct a6xx_gmu *gmu,
281 				   struct a6xx_hfi_msg_bw_table *msg)
282 {
283 	unsigned int i, j;
284 
285 	for (i = 0; i < GMU_MAX_BCMS; i++) {
286 		if (!info->bcms[i].name)
287 			break;
288 		msg->ddr_cmds_addrs[i] = cmd_db_read_addr(info->bcms[i].name);
289 	}
290 	msg->ddr_cmds_num = i;
291 
292 	for (i = 0; i < gmu->nr_gpu_bws; ++i)
293 		for (j = 0; j < msg->ddr_cmds_num; j++)
294 			msg->ddr_cmds_data[i][j] = gmu->gpu_ib_votes[i][j];
295 	msg->bw_level_num = gmu->nr_gpu_bws;
296 
297 	/* Compute the wait bitmask with each BCM having the commit bit */
298 	msg->ddr_wait_bitmask = 0;
299 	for (j = 0; j < msg->ddr_cmds_num; j++)
300 		if (msg->ddr_cmds_data[0][j] & BCM_TCS_CMD_COMMIT_MASK)
301 			msg->ddr_wait_bitmask |= BIT(j);
302 
303 	/*
304 	 * These are the CX (CNOC) votes - these are used by the GMU
305 	 * The 'CN0' BCM is used on all targets, and votes are basically
306 	 * 'off' and 'on' states with first bit to enable the path.
307 	 */
308 
309 	msg->cnoc_cmds_addrs[0] = cmd_db_read_addr("CN0");
310 	msg->cnoc_cmds_num = 1;
311 
312 	msg->cnoc_cmds_data[0][0] = BCM_TCS_CMD(true, false, 0, 0);
313 	msg->cnoc_cmds_data[1][0] = BCM_TCS_CMD(true, true, 0, BIT(0));
314 
315 	/* Compute the wait bitmask with each BCM having the commit bit */
316 	msg->cnoc_wait_bitmask = 0;
317 	for (j = 0; j < msg->cnoc_cmds_num; j++)
318 		if (msg->cnoc_cmds_data[0][j] & BCM_TCS_CMD_COMMIT_MASK)
319 			msg->cnoc_wait_bitmask |= BIT(j);
320 }
321 
322 static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
323 {
324 	/* Send a single "off" entry since the 618 GMU doesn't do bus scaling */
325 	msg->bw_level_num = 1;
326 
327 	msg->ddr_cmds_num = 3;
328 	msg->ddr_wait_bitmask = 0x01;
329 
330 	msg->ddr_cmds_addrs[0] = 0x50000;
331 	msg->ddr_cmds_addrs[1] = 0x5003c;
332 	msg->ddr_cmds_addrs[2] = 0x5000c;
333 
334 	msg->ddr_cmds_data[0][0] =  0x40000000;
335 	msg->ddr_cmds_data[0][1] =  0x40000000;
336 	msg->ddr_cmds_data[0][2] =  0x40000000;
337 
338 	/*
339 	 * These are the CX (CNOC) votes - these are used by the GMU but the
340 	 * votes are known and fixed for the target
341 	 */
342 	msg->cnoc_cmds_num = 1;
343 	msg->cnoc_wait_bitmask = 0x01;
344 
345 	msg->cnoc_cmds_addrs[0] = 0x5007c;
346 	msg->cnoc_cmds_data[0][0] =  0x40000000;
347 	msg->cnoc_cmds_data[1][0] =  0x60000001;
348 }
349 
350 static void a619_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
351 {
352 	msg->bw_level_num = 13;
353 
354 	msg->ddr_cmds_num = 3;
355 	msg->ddr_wait_bitmask = 0x0;
356 
357 	msg->ddr_cmds_addrs[0] = 0x50000;
358 	msg->ddr_cmds_addrs[1] = 0x50004;
359 	msg->ddr_cmds_addrs[2] = 0x50080;
360 
361 	msg->ddr_cmds_data[0][0]  = 0x40000000;
362 	msg->ddr_cmds_data[0][1]  = 0x40000000;
363 	msg->ddr_cmds_data[0][2]  = 0x40000000;
364 	msg->ddr_cmds_data[1][0]  = 0x6000030c;
365 	msg->ddr_cmds_data[1][1]  = 0x600000db;
366 	msg->ddr_cmds_data[1][2]  = 0x60000008;
367 	msg->ddr_cmds_data[2][0]  = 0x60000618;
368 	msg->ddr_cmds_data[2][1]  = 0x600001b6;
369 	msg->ddr_cmds_data[2][2]  = 0x60000008;
370 	msg->ddr_cmds_data[3][0]  = 0x60000925;
371 	msg->ddr_cmds_data[3][1]  = 0x60000291;
372 	msg->ddr_cmds_data[3][2]  = 0x60000008;
373 	msg->ddr_cmds_data[4][0]  = 0x60000dc1;
374 	msg->ddr_cmds_data[4][1]  = 0x600003dc;
375 	msg->ddr_cmds_data[4][2]  = 0x60000008;
376 	msg->ddr_cmds_data[5][0]  = 0x600010ad;
377 	msg->ddr_cmds_data[5][1]  = 0x600004ae;
378 	msg->ddr_cmds_data[5][2]  = 0x60000008;
379 	msg->ddr_cmds_data[6][0]  = 0x600014c3;
380 	msg->ddr_cmds_data[6][1]  = 0x600005d4;
381 	msg->ddr_cmds_data[6][2]  = 0x60000008;
382 	msg->ddr_cmds_data[7][0]  = 0x6000176a;
383 	msg->ddr_cmds_data[7][1]  = 0x60000693;
384 	msg->ddr_cmds_data[7][2]  = 0x60000008;
385 	msg->ddr_cmds_data[8][0]  = 0x60001f01;
386 	msg->ddr_cmds_data[8][1]  = 0x600008b5;
387 	msg->ddr_cmds_data[8][2]  = 0x60000008;
388 	msg->ddr_cmds_data[9][0]  = 0x60002940;
389 	msg->ddr_cmds_data[9][1]  = 0x60000b95;
390 	msg->ddr_cmds_data[9][2]  = 0x60000008;
391 	msg->ddr_cmds_data[10][0] = 0x60002f68;
392 	msg->ddr_cmds_data[10][1] = 0x60000d50;
393 	msg->ddr_cmds_data[10][2] = 0x60000008;
394 	msg->ddr_cmds_data[11][0] = 0x60003700;
395 	msg->ddr_cmds_data[11][1] = 0x60000f71;
396 	msg->ddr_cmds_data[11][2] = 0x60000008;
397 	msg->ddr_cmds_data[12][0] = 0x60003fce;
398 	msg->ddr_cmds_data[12][1] = 0x600011ea;
399 	msg->ddr_cmds_data[12][2] = 0x60000008;
400 
401 	msg->cnoc_cmds_num = 1;
402 	msg->cnoc_wait_bitmask = 0x0;
403 
404 	msg->cnoc_cmds_addrs[0] = 0x50054;
405 
406 	msg->cnoc_cmds_data[0][0] = 0x40000000;
407 }
408 
409 static void a640_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
410 {
411 	/*
412 	 * Send a single "off" entry just to get things running
413 	 * TODO: bus scaling
414 	 */
415 	msg->bw_level_num = 1;
416 
417 	msg->ddr_cmds_num = 3;
418 	msg->ddr_wait_bitmask = 0x01;
419 
420 	msg->ddr_cmds_addrs[0] = 0x50000;
421 	msg->ddr_cmds_addrs[1] = 0x5003c;
422 	msg->ddr_cmds_addrs[2] = 0x5000c;
423 
424 	msg->ddr_cmds_data[0][0] =  0x40000000;
425 	msg->ddr_cmds_data[0][1] =  0x40000000;
426 	msg->ddr_cmds_data[0][2] =  0x40000000;
427 
428 	/*
429 	 * These are the CX (CNOC) votes - these are used by the GMU but the
430 	 * votes are known and fixed for the target
431 	 */
432 	msg->cnoc_cmds_num = 3;
433 	msg->cnoc_wait_bitmask = 0x01;
434 
435 	msg->cnoc_cmds_addrs[0] = 0x50034;
436 	msg->cnoc_cmds_addrs[1] = 0x5007c;
437 	msg->cnoc_cmds_addrs[2] = 0x5004c;
438 
439 	msg->cnoc_cmds_data[0][0] =  0x40000000;
440 	msg->cnoc_cmds_data[0][1] =  0x00000000;
441 	msg->cnoc_cmds_data[0][2] =  0x40000000;
442 
443 	msg->cnoc_cmds_data[1][0] =  0x60000001;
444 	msg->cnoc_cmds_data[1][1] =  0x20000001;
445 	msg->cnoc_cmds_data[1][2] =  0x60000001;
446 }
447 
448 static void a650_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
449 {
450 	/*
451 	 * Send a single "off" entry just to get things running
452 	 * TODO: bus scaling
453 	 */
454 	msg->bw_level_num = 1;
455 
456 	msg->ddr_cmds_num = 3;
457 	msg->ddr_wait_bitmask = 0x01;
458 
459 	msg->ddr_cmds_addrs[0] = 0x50000;
460 	msg->ddr_cmds_addrs[1] = 0x50004;
461 	msg->ddr_cmds_addrs[2] = 0x5007c;
462 
463 	msg->ddr_cmds_data[0][0] =  0x40000000;
464 	msg->ddr_cmds_data[0][1] =  0x40000000;
465 	msg->ddr_cmds_data[0][2] =  0x40000000;
466 
467 	/*
468 	 * These are the CX (CNOC) votes - these are used by the GMU but the
469 	 * votes are known and fixed for the target
470 	 */
471 	msg->cnoc_cmds_num = 1;
472 	msg->cnoc_wait_bitmask = 0x01;
473 
474 	msg->cnoc_cmds_addrs[0] = 0x500a4;
475 	msg->cnoc_cmds_data[0][0] =  0x40000000;
476 	msg->cnoc_cmds_data[1][0] =  0x60000001;
477 }
478 
479 static void a690_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
480 {
481 	/*
482 	 * Send a single "off" entry just to get things running
483 	 * TODO: bus scaling
484 	 */
485 	msg->bw_level_num = 1;
486 
487 	msg->ddr_cmds_num = 3;
488 	msg->ddr_wait_bitmask = 0x01;
489 
490 	msg->ddr_cmds_addrs[0] = 0x50004;
491 	msg->ddr_cmds_addrs[1] = 0x50000;
492 	msg->ddr_cmds_addrs[2] = 0x500ac;
493 
494 	msg->ddr_cmds_data[0][0] =  0x40000000;
495 	msg->ddr_cmds_data[0][1] =  0x40000000;
496 	msg->ddr_cmds_data[0][2] =  0x40000000;
497 
498 	/*
499 	 * These are the CX (CNOC) votes - these are used by the GMU but the
500 	 * votes are known and fixed for the target
501 	 */
502 	msg->cnoc_cmds_num = 1;
503 	msg->cnoc_wait_bitmask = 0x01;
504 
505 	msg->cnoc_cmds_addrs[0] = 0x5003c;
506 	msg->cnoc_cmds_data[0][0] =  0x40000000;
507 	msg->cnoc_cmds_data[1][0] =  0x60000001;
508 }
509 
510 static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
511 {
512 	/*
513 	 * Send a single "off" entry just to get things running
514 	 * TODO: bus scaling
515 	 */
516 	msg->bw_level_num = 1;
517 
518 	msg->ddr_cmds_num = 3;
519 	msg->ddr_wait_bitmask = 0x01;
520 
521 	msg->ddr_cmds_addrs[0] = 0x50004;
522 	msg->ddr_cmds_addrs[1] = 0x500a0;
523 	msg->ddr_cmds_addrs[2] = 0x50000;
524 
525 	msg->ddr_cmds_data[0][0] =  0x40000000;
526 	msg->ddr_cmds_data[0][1] =  0x40000000;
527 	msg->ddr_cmds_data[0][2] =  0x40000000;
528 
529 	/*
530 	 * These are the CX (CNOC) votes - these are used by the GMU but the
531 	 * votes are known and fixed for the target
532 	 */
533 	msg->cnoc_cmds_num = 1;
534 	msg->cnoc_wait_bitmask = 0x01;
535 
536 	msg->cnoc_cmds_addrs[0] = 0x50070;
537 	msg->cnoc_cmds_data[0][0] =  0x40000000;
538 	msg->cnoc_cmds_data[1][0] =  0x60000001;
539 }
540 
541 static void a663_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
542 {
543 	/*
544 	 * Send a single "off" entry just to get things running
545 	 * TODO: bus scaling
546 	 */
547 	msg->bw_level_num = 1;
548 
549 	msg->ddr_cmds_num = 3;
550 	msg->ddr_wait_bitmask = 0x07;
551 
552 	msg->ddr_cmds_addrs[0] = 0x50004;
553 	msg->ddr_cmds_addrs[1] = 0x50000;
554 	msg->ddr_cmds_addrs[2] = 0x500b4;
555 
556 	msg->ddr_cmds_data[0][0] =  0x40000000;
557 	msg->ddr_cmds_data[0][1] =  0x40000000;
558 	msg->ddr_cmds_data[0][2] =  0x40000000;
559 
560 	/*
561 	 * These are the CX (CNOC) votes - these are used by the GMU but the
562 	 * votes are known and fixed for the target
563 	 */
564 	msg->cnoc_cmds_num = 1;
565 	msg->cnoc_wait_bitmask = 0x01;
566 
567 	msg->cnoc_cmds_addrs[0] = 0x50058;
568 	msg->cnoc_cmds_data[0][0] =  0x40000000;
569 	msg->cnoc_cmds_data[1][0] =  0x60000001;
570 }
571 
572 static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
573 {
574 	/*
575 	 * Send a single "off" entry just to get things running
576 	 * TODO: bus scaling
577 	 */
578 	msg->bw_level_num = 1;
579 
580 	msg->ddr_cmds_num = 3;
581 	msg->ddr_wait_bitmask = 0x07;
582 
583 	msg->ddr_cmds_addrs[0] = 0x50004;
584 	msg->ddr_cmds_addrs[1] = 0x50000;
585 	msg->ddr_cmds_addrs[2] = 0x50088;
586 
587 	msg->ddr_cmds_data[0][0] =  0x40000000;
588 	msg->ddr_cmds_data[0][1] =  0x40000000;
589 	msg->ddr_cmds_data[0][2] =  0x40000000;
590 
591 	/*
592 	 * These are the CX (CNOC) votes - these are used by the GMU but the
593 	 * votes are known and fixed for the target
594 	 */
595 	msg->cnoc_cmds_num = 1;
596 	msg->cnoc_wait_bitmask = 0x01;
597 
598 	msg->cnoc_cmds_addrs[0] = 0x5006c;
599 	msg->cnoc_cmds_data[0][0] =  0x40000000;
600 	msg->cnoc_cmds_data[1][0] =  0x60000001;
601 }
602 
603 static void a730_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
604 {
605 	msg->bw_level_num = 12;
606 
607 	msg->ddr_cmds_num = 3;
608 	msg->ddr_wait_bitmask = 0x7;
609 
610 	msg->ddr_cmds_addrs[0] = cmd_db_read_addr("SH0");
611 	msg->ddr_cmds_addrs[1] = cmd_db_read_addr("MC0");
612 	msg->ddr_cmds_addrs[2] = cmd_db_read_addr("ACV");
613 
614 	msg->ddr_cmds_data[0][0] = 0x40000000;
615 	msg->ddr_cmds_data[0][1] = 0x40000000;
616 	msg->ddr_cmds_data[0][2] = 0x40000000;
617 	msg->ddr_cmds_data[1][0] = 0x600002e8;
618 	msg->ddr_cmds_data[1][1] = 0x600003d0;
619 	msg->ddr_cmds_data[1][2] = 0x60000008;
620 	msg->ddr_cmds_data[2][0] = 0x6000068d;
621 	msg->ddr_cmds_data[2][1] = 0x6000089a;
622 	msg->ddr_cmds_data[2][2] = 0x60000008;
623 	msg->ddr_cmds_data[3][0] = 0x600007f2;
624 	msg->ddr_cmds_data[3][1] = 0x60000a6e;
625 	msg->ddr_cmds_data[3][2] = 0x60000008;
626 	msg->ddr_cmds_data[4][0] = 0x600009e5;
627 	msg->ddr_cmds_data[4][1] = 0x60000cfd;
628 	msg->ddr_cmds_data[4][2] = 0x60000008;
629 	msg->ddr_cmds_data[5][0] = 0x60000b29;
630 	msg->ddr_cmds_data[5][1] = 0x60000ea6;
631 	msg->ddr_cmds_data[5][2] = 0x60000008;
632 	msg->ddr_cmds_data[6][0] = 0x60001698;
633 	msg->ddr_cmds_data[6][1] = 0x60001da8;
634 	msg->ddr_cmds_data[6][2] = 0x60000008;
635 	msg->ddr_cmds_data[7][0] = 0x600018d2;
636 	msg->ddr_cmds_data[7][1] = 0x60002093;
637 	msg->ddr_cmds_data[7][2] = 0x60000008;
638 	msg->ddr_cmds_data[8][0] = 0x60001e66;
639 	msg->ddr_cmds_data[8][1] = 0x600027e6;
640 	msg->ddr_cmds_data[8][2] = 0x60000008;
641 	msg->ddr_cmds_data[9][0] = 0x600027c2;
642 	msg->ddr_cmds_data[9][1] = 0x6000342f;
643 	msg->ddr_cmds_data[9][2] = 0x60000008;
644 	msg->ddr_cmds_data[10][0] = 0x60002e71;
645 	msg->ddr_cmds_data[10][1] = 0x60003cf5;
646 	msg->ddr_cmds_data[10][2] = 0x60000008;
647 	msg->ddr_cmds_data[11][0] = 0x600030ae;
648 	msg->ddr_cmds_data[11][1] = 0x60003fe5;
649 	msg->ddr_cmds_data[11][2] = 0x60000008;
650 
651 	msg->cnoc_cmds_num = 1;
652 	msg->cnoc_wait_bitmask = 0x1;
653 
654 	msg->cnoc_cmds_addrs[0] = cmd_db_read_addr("CN0");
655 	msg->cnoc_cmds_data[0][0] = 0x40000000;
656 	msg->cnoc_cmds_data[1][0] = 0x60000001;
657 }
658 
659 static void a740_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
660 {
661 	msg->bw_level_num = 1;
662 
663 	msg->ddr_cmds_num = 3;
664 	msg->ddr_wait_bitmask = 0x7;
665 
666 	msg->ddr_cmds_addrs[0] = cmd_db_read_addr("SH0");
667 	msg->ddr_cmds_addrs[1] = cmd_db_read_addr("MC0");
668 	msg->ddr_cmds_addrs[2] = cmd_db_read_addr("ACV");
669 
670 	msg->ddr_cmds_data[0][0] = 0x40000000;
671 	msg->ddr_cmds_data[0][1] = 0x40000000;
672 	msg->ddr_cmds_data[0][2] = 0x40000000;
673 
674 	/* TODO: add a proper dvfs table */
675 
676 	msg->cnoc_cmds_num = 1;
677 	msg->cnoc_wait_bitmask = 0x1;
678 
679 	msg->cnoc_cmds_addrs[0] = cmd_db_read_addr("CN0");
680 	msg->cnoc_cmds_data[0][0] = 0x40000000;
681 	msg->cnoc_cmds_data[1][0] = 0x60000001;
682 }
683 
684 static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
685 {
686 	/* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
687 	msg->bw_level_num = 1;
688 
689 	msg->ddr_cmds_num = 3;
690 	msg->ddr_wait_bitmask = 0x07;
691 
692 	msg->ddr_cmds_addrs[0] = 0x50000;
693 	msg->ddr_cmds_addrs[1] = 0x5005c;
694 	msg->ddr_cmds_addrs[2] = 0x5000c;
695 
696 	msg->ddr_cmds_data[0][0] =  0x40000000;
697 	msg->ddr_cmds_data[0][1] =  0x40000000;
698 	msg->ddr_cmds_data[0][2] =  0x40000000;
699 
700 	/*
701 	 * These are the CX (CNOC) votes.  This is used but the values for the
702 	 * sdm845 GMU are known and fixed so we can hard code them.
703 	 */
704 
705 	msg->cnoc_cmds_num = 3;
706 	msg->cnoc_wait_bitmask = 0x05;
707 
708 	msg->cnoc_cmds_addrs[0] = 0x50034;
709 	msg->cnoc_cmds_addrs[1] = 0x5007c;
710 	msg->cnoc_cmds_addrs[2] = 0x5004c;
711 
712 	msg->cnoc_cmds_data[0][0] =  0x40000000;
713 	msg->cnoc_cmds_data[0][1] =  0x00000000;
714 	msg->cnoc_cmds_data[0][2] =  0x40000000;
715 
716 	msg->cnoc_cmds_data[1][0] =  0x60000001;
717 	msg->cnoc_cmds_data[1][1] =  0x20000001;
718 	msg->cnoc_cmds_data[1][2] =  0x60000001;
719 }
720 
721 
722 static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
723 {
724 	struct a6xx_hfi_msg_bw_table *msg;
725 	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
726 	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
727 	const struct a6xx_info *info = adreno_gpu->info->a6xx;
728 
729 	if (gmu->bw_table)
730 		goto send;
731 
732 	msg = devm_kzalloc(gmu->dev, sizeof(*msg), GFP_KERNEL);
733 	if (!msg)
734 		return -ENOMEM;
735 
736 	if (info->bcms && gmu->nr_gpu_bws > 1)
737 		a6xx_generate_bw_table(info, gmu, msg);
738 	else if (adreno_is_a618(adreno_gpu))
739 		a618_build_bw_table(msg);
740 	else if (adreno_is_a619(adreno_gpu))
741 		a619_build_bw_table(msg);
742 	else if (adreno_is_a640_family(adreno_gpu))
743 		a640_build_bw_table(msg);
744 	else if (adreno_is_a650(adreno_gpu))
745 		a650_build_bw_table(msg);
746 	else if (adreno_is_7c3(adreno_gpu))
747 		adreno_7c3_build_bw_table(msg);
748 	else if (adreno_is_a660(adreno_gpu))
749 		a660_build_bw_table(msg);
750 	else if (adreno_is_a663(adreno_gpu))
751 		a663_build_bw_table(msg);
752 	else if (adreno_is_a690(adreno_gpu))
753 		a690_build_bw_table(msg);
754 	else if (adreno_is_a730(adreno_gpu))
755 		a730_build_bw_table(msg);
756 	else if (adreno_is_a740_family(adreno_gpu))
757 		a740_build_bw_table(msg);
758 	else
759 		a6xx_build_bw_table(msg);
760 
761 	gmu->bw_table = msg;
762 
763 send:
764 	return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, gmu->bw_table, sizeof(*(gmu->bw_table)),
765 		NULL, 0);
766 }
767 
768 #define HFI_FEATURE_ACD 12
769 
770 static int a6xx_hfi_enable_acd(struct a6xx_gmu *gmu)
771 {
772 	struct a6xx_hfi_acd_table *acd_table = &gmu->acd_table;
773 	struct a6xx_hfi_msg_feature_ctrl msg = {
774 		.feature = HFI_FEATURE_ACD,
775 		.enable = 1,
776 		.data = 0,
777 	};
778 	int ret;
779 
780 	if (!acd_table->enable_by_level)
781 		return 0;
782 
783 	/* Enable ACD feature at GMU */
784 	ret = a6xx_hfi_send_msg(gmu, HFI_H2F_FEATURE_CTRL, &msg, sizeof(msg), NULL, 0);
785 	if (ret) {
786 		DRM_DEV_ERROR(gmu->dev, "Unable to enable ACD (%d)\n", ret);
787 		return ret;
788 	}
789 
790 	/* Send ACD table to GMU */
791 	ret = a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_ACD, acd_table, sizeof(*acd_table), NULL, 0);
792 	if (ret) {
793 		DRM_DEV_ERROR(gmu->dev, "Unable to ACD table (%d)\n", ret);
794 		return ret;
795 	}
796 
797 	return 0;
798 }
799 
800 static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
801 {
802 	struct a6xx_hfi_msg_test msg = { 0 };
803 
804 	return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg),
805 		NULL, 0);
806 }
807 
808 static int a6xx_hfi_send_start(struct a6xx_gmu *gmu)
809 {
810 	struct a6xx_hfi_msg_start msg = { 0 };
811 
812 	return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_START, &msg, sizeof(msg),
813 		NULL, 0);
814 }
815 
816 static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu)
817 {
818 	struct a6xx_hfi_msg_core_fw_start msg = { 0 };
819 
820 	return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_CORE_FW_START, &msg,
821 		sizeof(msg), NULL, 0);
822 }
823 
824 int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, u32 freq_index, u32 bw_index)
825 {
826 	struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 };
827 
828 	msg.ack_type = 1; /* blocking */
829 	msg.freq = freq_index;
830 	msg.bw = bw_index;
831 
832 	return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg,
833 		sizeof(msg), NULL, 0);
834 }
835 
836 int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu)
837 {
838 	struct a6xx_hfi_prep_slumber_cmd msg = { 0 };
839 
840 	/* TODO: should freq and bw fields be non-zero ? */
841 
842 	return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PREPARE_SLUMBER, &msg,
843 		sizeof(msg), NULL, 0);
844 }
845 
846 static int a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state)
847 {
848 	int ret;
849 
850 	ret = a6xx_hfi_send_gmu_init(gmu, boot_state);
851 	if (ret)
852 		return ret;
853 
854 	ret = a6xx_hfi_get_fw_version(gmu, NULL);
855 	if (ret)
856 		return ret;
857 
858 	/*
859 	 * We have to get exchange version numbers per the sequence but at this
860 	 * point th kernel driver doesn't need to know the exact version of
861 	 * the GMU firmware
862 	 */
863 
864 	ret = a6xx_hfi_send_perf_table_v1(gmu);
865 	if (ret)
866 		return ret;
867 
868 	ret = a6xx_hfi_send_bw_table(gmu);
869 	if (ret)
870 		return ret;
871 
872 	/*
873 	 * Let the GMU know that there won't be any more HFI messages until next
874 	 * boot
875 	 */
876 	a6xx_hfi_send_test(gmu);
877 
878 	return 0;
879 }
880 
881 int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
882 {
883 	int ret;
884 
885 	if (gmu->legacy)
886 		return a6xx_hfi_start_v1(gmu, boot_state);
887 
888 
889 	ret = a6xx_hfi_send_perf_table(gmu);
890 	if (ret)
891 		return ret;
892 
893 	ret = a6xx_hfi_send_bw_table(gmu);
894 	if (ret)
895 		return ret;
896 
897 	ret = a6xx_hfi_enable_acd(gmu);
898 	if (ret)
899 		return ret;
900 
901 	ret = a6xx_hfi_send_core_fw_start(gmu);
902 	if (ret)
903 		return ret;
904 
905 	/*
906 	 * Downstream driver sends this in its "a6xx_hw_init" equivalent,
907 	 * but seems to be no harm in sending it here
908 	 */
909 	ret = a6xx_hfi_send_start(gmu);
910 	if (ret)
911 		return ret;
912 
913 	return 0;
914 }
915 
916 void a6xx_hfi_stop(struct a6xx_gmu *gmu)
917 {
918 	int i;
919 
920 	for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) {
921 		struct a6xx_hfi_queue *queue = &gmu->queues[i];
922 
923 		if (!queue->header)
924 			continue;
925 
926 		if (queue->header->read_index != queue->header->write_index)
927 			DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i);
928 
929 		queue->header->read_index = 0;
930 		queue->header->write_index = 0;
931 
932 		memset(&queue->history, 0xff, sizeof(queue->history));
933 		queue->history_idx = 0;
934 	}
935 }
936 
937 static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
938 		struct a6xx_hfi_queue_header *header, void *virt, u64 iova,
939 		u32 id)
940 {
941 	spin_lock_init(&queue->lock);
942 	queue->header = header;
943 	queue->data = virt;
944 	atomic_set(&queue->seqnum, 0);
945 
946 	memset(&queue->history, 0xff, sizeof(queue->history));
947 	queue->history_idx = 0;
948 
949 	/* Set up the shared memory header */
950 	header->iova = iova;
951 	header->type =  10 << 8 | id;
952 	header->status = 1;
953 	header->size = SZ_4K >> 2;
954 	header->msg_size = 0;
955 	header->dropped = 0;
956 	header->rx_watermark = 1;
957 	header->tx_watermark = 1;
958 	header->rx_request = 1;
959 	header->tx_request = 0;
960 	header->read_index = 0;
961 	header->write_index = 0;
962 }
963 
964 void a6xx_hfi_init(struct a6xx_gmu *gmu)
965 {
966 	struct a6xx_gmu_bo *hfi = &gmu->hfi;
967 	struct a6xx_hfi_queue_table_header *table = hfi->virt;
968 	struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table);
969 	u64 offset;
970 	int table_size;
971 
972 	/*
973 	 * The table size is the size of the table header plus all of the queue
974 	 * headers
975 	 */
976 	table_size = sizeof(*table);
977 	table_size += (ARRAY_SIZE(gmu->queues) *
978 		sizeof(struct a6xx_hfi_queue_header));
979 
980 	table->version = 0;
981 	table->size = table_size;
982 	/* First queue header is located immediately after the table header */
983 	table->qhdr0_offset = sizeof(*table) >> 2;
984 	table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2;
985 	table->num_queues = ARRAY_SIZE(gmu->queues);
986 	table->active_queues = ARRAY_SIZE(gmu->queues);
987 
988 	/* Command queue */
989 	offset = SZ_4K;
990 	a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset,
991 		hfi->iova + offset, 0);
992 
993 	/* GMU response queue */
994 	offset += SZ_4K;
995 	a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset,
996 		hfi->iova + offset, gmu->legacy ? 4 : 1);
997 }
998