xref: /linux/drivers/gpu/drm/msm/adreno/a3xx_gpu.c (revision 91b74e9761d785f41156383ad5ec7c437dfb2960)
17198e6b0SRob Clark /*
27198e6b0SRob Clark  * Copyright (C) 2013 Red Hat
37198e6b0SRob Clark  * Author: Rob Clark <robdclark@gmail.com>
47198e6b0SRob Clark  *
5*91b74e97SAravind Ganesan  * Copyright (c) 2014 The Linux Foundation. All rights reserved.
6*91b74e97SAravind Ganesan  *
77198e6b0SRob Clark  * This program is free software; you can redistribute it and/or modify it
87198e6b0SRob Clark  * under the terms of the GNU General Public License version 2 as published by
97198e6b0SRob Clark  * the Free Software Foundation.
107198e6b0SRob Clark  *
117198e6b0SRob Clark  * This program is distributed in the hope that it will be useful, but WITHOUT
127198e6b0SRob Clark  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
137198e6b0SRob Clark  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
147198e6b0SRob Clark  * more details.
157198e6b0SRob Clark  *
167198e6b0SRob Clark  * You should have received a copy of the GNU General Public License along with
177198e6b0SRob Clark  * this program.  If not, see <http://www.gnu.org/licenses/>.
187198e6b0SRob Clark  */
197198e6b0SRob Clark 
2055459968SRob Clark #ifdef CONFIG_MSM_OCMEM
2155459968SRob Clark #  include <mach/ocmem.h>
2255459968SRob Clark #endif
2355459968SRob Clark 
247198e6b0SRob Clark #include "a3xx_gpu.h"
257198e6b0SRob Clark 
267198e6b0SRob Clark #define A3XX_INT0_MASK \
277198e6b0SRob Clark 	(A3XX_INT0_RBBM_AHB_ERROR |        \
287198e6b0SRob Clark 	 A3XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
297198e6b0SRob Clark 	 A3XX_INT0_CP_T0_PACKET_IN_IB |    \
307198e6b0SRob Clark 	 A3XX_INT0_CP_OPCODE_ERROR |       \
317198e6b0SRob Clark 	 A3XX_INT0_CP_RESERVED_BIT_ERROR | \
327198e6b0SRob Clark 	 A3XX_INT0_CP_HW_FAULT |           \
337198e6b0SRob Clark 	 A3XX_INT0_CP_IB1_INT |            \
347198e6b0SRob Clark 	 A3XX_INT0_CP_IB2_INT |            \
357198e6b0SRob Clark 	 A3XX_INT0_CP_RB_INT |             \
367198e6b0SRob Clark 	 A3XX_INT0_CP_REG_PROTECT_FAULT |  \
377198e6b0SRob Clark 	 A3XX_INT0_CP_AHB_ERROR_HALT |     \
387198e6b0SRob Clark 	 A3XX_INT0_UCHE_OOB_ACCESS)
397198e6b0SRob Clark 
403526e9fbSRob Clark extern bool hang_debug;
415b6ef08eSRob Clark 
425b6ef08eSRob Clark static void a3xx_dump(struct msm_gpu *gpu);
435b6ef08eSRob Clark 
447198e6b0SRob Clark static void a3xx_me_init(struct msm_gpu *gpu)
457198e6b0SRob Clark {
467198e6b0SRob Clark 	struct msm_ringbuffer *ring = gpu->rb;
477198e6b0SRob Clark 
487198e6b0SRob Clark 	OUT_PKT3(ring, CP_ME_INIT, 17);
497198e6b0SRob Clark 	OUT_RING(ring, 0x000003f7);
507198e6b0SRob Clark 	OUT_RING(ring, 0x00000000);
517198e6b0SRob Clark 	OUT_RING(ring, 0x00000000);
527198e6b0SRob Clark 	OUT_RING(ring, 0x00000000);
537198e6b0SRob Clark 	OUT_RING(ring, 0x00000080);
547198e6b0SRob Clark 	OUT_RING(ring, 0x00000100);
557198e6b0SRob Clark 	OUT_RING(ring, 0x00000180);
567198e6b0SRob Clark 	OUT_RING(ring, 0x00006600);
577198e6b0SRob Clark 	OUT_RING(ring, 0x00000150);
587198e6b0SRob Clark 	OUT_RING(ring, 0x0000014e);
597198e6b0SRob Clark 	OUT_RING(ring, 0x00000154);
607198e6b0SRob Clark 	OUT_RING(ring, 0x00000001);
617198e6b0SRob Clark 	OUT_RING(ring, 0x00000000);
627198e6b0SRob Clark 	OUT_RING(ring, 0x00000000);
637198e6b0SRob Clark 	OUT_RING(ring, 0x00000000);
647198e6b0SRob Clark 	OUT_RING(ring, 0x00000000);
657198e6b0SRob Clark 	OUT_RING(ring, 0x00000000);
667198e6b0SRob Clark 
677198e6b0SRob Clark 	gpu->funcs->flush(gpu);
687198e6b0SRob Clark 	gpu->funcs->idle(gpu);
697198e6b0SRob Clark }
707198e6b0SRob Clark 
717198e6b0SRob Clark static int a3xx_hw_init(struct msm_gpu *gpu)
727198e6b0SRob Clark {
737198e6b0SRob Clark 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
7455459968SRob Clark 	struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
757198e6b0SRob Clark 	uint32_t *ptr, len;
767198e6b0SRob Clark 	int i, ret;
777198e6b0SRob Clark 
787198e6b0SRob Clark 	DBG("%s", gpu->name);
797198e6b0SRob Clark 
807198e6b0SRob Clark 	if (adreno_is_a305(adreno_gpu)) {
817198e6b0SRob Clark 		/* Set up 16 deep read/write request queues: */
827198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
837198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
847198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
857198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
867198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
877198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
887198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
897198e6b0SRob Clark 		/* Enable WR-REQ: */
907198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
917198e6b0SRob Clark 		/* Set up round robin arbitration between both AXI ports: */
927198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
937198e6b0SRob Clark 		/* Set up AOOO: */
947198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
957198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
967198e6b0SRob Clark 
977198e6b0SRob Clark 	} else if (adreno_is_a320(adreno_gpu)) {
987198e6b0SRob Clark 		/* Set up 16 deep read/write request queues: */
997198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
1007198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
1017198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
1027198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
1037198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
1047198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
1057198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
1067198e6b0SRob Clark 		/* Enable WR-REQ: */
1077198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
1087198e6b0SRob Clark 		/* Set up round robin arbitration between both AXI ports: */
1097198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
1107198e6b0SRob Clark 		/* Set up AOOO: */
1117198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
1127198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
1137198e6b0SRob Clark 		/* Enable 1K sort: */
1147198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff);
1157198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
1167198e6b0SRob Clark 
11755459968SRob Clark 	} else if (adreno_is_a330v2(adreno_gpu)) {
11855459968SRob Clark 		/*
11955459968SRob Clark 		 * Most of the VBIF registers on 8974v2 have the correct
12055459968SRob Clark 		 * values at power on, so we won't modify those if we don't
12155459968SRob Clark 		 * need to
12255459968SRob Clark 		 */
12355459968SRob Clark 		/* Enable 1k sort: */
12455459968SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
12555459968SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
12655459968SRob Clark 		/* Enable WR-REQ: */
12755459968SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
12855459968SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
12955459968SRob Clark 		/* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
13055459968SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
13155459968SRob Clark 
1327198e6b0SRob Clark 	} else if (adreno_is_a330(adreno_gpu)) {
1337198e6b0SRob Clark 		/* Set up 16 deep read/write request queues: */
1347198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
1357198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x18181818);
1367198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x18181818);
1377198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x18181818);
1387198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
1397198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
1407198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x18181818);
1417198e6b0SRob Clark 		/* Enable WR-REQ: */
1427198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
1437198e6b0SRob Clark 		/* Set up round robin arbitration between both AXI ports: */
1447198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
1457198e6b0SRob Clark 		/* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
1467198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
1477198e6b0SRob Clark 		/* Set up AOOO: */
14855459968SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003f);
14955459968SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003f003f);
1507198e6b0SRob Clark 		/* Enable 1K sort: */
15155459968SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
1527198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
1537198e6b0SRob Clark 		/* Disable VBIF clock gating. This is to enable AXI running
1547198e6b0SRob Clark 		 * higher frequency than GPU:
1557198e6b0SRob Clark 		 */
1567198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_VBIF_CLKON, 0x00000001);
1577198e6b0SRob Clark 
1587198e6b0SRob Clark 	} else {
1597198e6b0SRob Clark 		BUG();
1607198e6b0SRob Clark 	}
1617198e6b0SRob Clark 
1627198e6b0SRob Clark 	/* Make all blocks contribute to the GPU BUSY perf counter: */
1637198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
1647198e6b0SRob Clark 
1657198e6b0SRob Clark 	/* Tune the hystersis counters for SP and CP idle detection: */
1667198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_RBBM_SP_HYST_CNT, 0x10);
1677198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
1687198e6b0SRob Clark 
1697198e6b0SRob Clark 	/* Enable the RBBM error reporting bits.  This lets us get
1707198e6b0SRob Clark 	 * useful information on failure:
1717198e6b0SRob Clark 	 */
1727198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL0, 0x00000001);
1737198e6b0SRob Clark 
1747198e6b0SRob Clark 	/* Enable AHB error reporting: */
1757198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL1, 0xa6ffffff);
1767198e6b0SRob Clark 
1777198e6b0SRob Clark 	/* Turn on the power counters: */
1787198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_RBBM_RBBM_CTL, 0x00030000);
1797198e6b0SRob Clark 
1807198e6b0SRob Clark 	/* Turn on hang detection - this spews a lot of useful information
1817198e6b0SRob Clark 	 * into the RBBM registers on a hang:
1827198e6b0SRob Clark 	 */
1837198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL, 0x00010fff);
1847198e6b0SRob Clark 
1857198e6b0SRob Clark 	/* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0): */
1867198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
1877198e6b0SRob Clark 
1887198e6b0SRob Clark 	/* Enable Clock gating: */
18955459968SRob Clark 	if (adreno_is_a320(adreno_gpu))
1907198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
19155459968SRob Clark 	else if (adreno_is_a330v2(adreno_gpu))
19255459968SRob Clark 		gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
19355459968SRob Clark 	else if (adreno_is_a330(adreno_gpu))
19455459968SRob Clark 		gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbffcffff);
1957198e6b0SRob Clark 
19655459968SRob Clark 	if (adreno_is_a330v2(adreno_gpu))
19755459968SRob Clark 		gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x05515455);
19855459968SRob Clark 	else if (adreno_is_a330(adreno_gpu))
19955459968SRob Clark 		gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000);
20055459968SRob Clark 
20155459968SRob Clark 	/* Set the OCMEM base address for A330, etc */
20255459968SRob Clark 	if (a3xx_gpu->ocmem_hdl) {
20355459968SRob Clark 		gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
20455459968SRob Clark 			(unsigned int)(a3xx_gpu->ocmem_base >> 14));
20555459968SRob Clark 	}
2067198e6b0SRob Clark 
2077198e6b0SRob Clark 	/* Turn on performance counters: */
2087198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01);
2097198e6b0SRob Clark 
21070c70f09SRob Clark 	/* Enable the perfcntrs that we use.. */
21170c70f09SRob Clark 	for (i = 0; i < gpu->num_perfcntrs; i++) {
21270c70f09SRob Clark 		const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i];
21370c70f09SRob Clark 		gpu_write(gpu, perfcntr->select_reg, perfcntr->select_val);
21470c70f09SRob Clark 	}
2157198e6b0SRob Clark 
2167198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_RBBM_INT_0_MASK, A3XX_INT0_MASK);
2177198e6b0SRob Clark 
2187198e6b0SRob Clark 	ret = adreno_hw_init(gpu);
2197198e6b0SRob Clark 	if (ret)
2207198e6b0SRob Clark 		return ret;
2217198e6b0SRob Clark 
2227198e6b0SRob Clark 	/* setup access protection: */
2237198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
2247198e6b0SRob Clark 
2257198e6b0SRob Clark 	/* RBBM registers */
2267198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_CP_PROTECT(0), 0x63000040);
2277198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_CP_PROTECT(1), 0x62000080);
2287198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_CP_PROTECT(2), 0x600000cc);
2297198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_CP_PROTECT(3), 0x60000108);
2307198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_CP_PROTECT(4), 0x64000140);
2317198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_CP_PROTECT(5), 0x66000400);
2327198e6b0SRob Clark 
2337198e6b0SRob Clark 	/* CP registers */
2347198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_CP_PROTECT(6), 0x65000700);
2357198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_CP_PROTECT(7), 0x610007d8);
2367198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_CP_PROTECT(8), 0x620007e0);
2377198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_CP_PROTECT(9), 0x61001178);
2387198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_CP_PROTECT(10), 0x64001180);
2397198e6b0SRob Clark 
2407198e6b0SRob Clark 	/* RB registers */
2417198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_CP_PROTECT(11), 0x60003300);
2427198e6b0SRob Clark 
2437198e6b0SRob Clark 	/* VBIF registers */
2447198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_CP_PROTECT(12), 0x6b00c000);
2457198e6b0SRob Clark 
2467198e6b0SRob Clark 	/* NOTE: PM4/micro-engine firmware registers look to be the same
2477198e6b0SRob Clark 	 * for a2xx and a3xx.. we could possibly push that part down to
2487198e6b0SRob Clark 	 * adreno_gpu base class.  Or push both PM4 and PFP but
2497198e6b0SRob Clark 	 * parameterize the pfp ucode addr/data registers..
2507198e6b0SRob Clark 	 */
2517198e6b0SRob Clark 
2527198e6b0SRob Clark 	/* Load PM4: */
2537198e6b0SRob Clark 	ptr = (uint32_t *)(adreno_gpu->pm4->data);
2547198e6b0SRob Clark 	len = adreno_gpu->pm4->size / 4;
255e529c7e6SRob Clark 	DBG("loading PM4 ucode version: %x", ptr[1]);
2567198e6b0SRob Clark 
2577198e6b0SRob Clark 	gpu_write(gpu, REG_AXXX_CP_DEBUG,
2587198e6b0SRob Clark 			AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE |
2597198e6b0SRob Clark 			AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
2607198e6b0SRob Clark 	gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
2617198e6b0SRob Clark 	for (i = 1; i < len; i++)
2627198e6b0SRob Clark 		gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
2637198e6b0SRob Clark 
2647198e6b0SRob Clark 	/* Load PFP: */
2657198e6b0SRob Clark 	ptr = (uint32_t *)(adreno_gpu->pfp->data);
2667198e6b0SRob Clark 	len = adreno_gpu->pfp->size / 4;
267e529c7e6SRob Clark 	DBG("loading PFP ucode version: %x", ptr[5]);
2687198e6b0SRob Clark 
2697198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0);
2707198e6b0SRob Clark 	for (i = 1; i < len; i++)
2717198e6b0SRob Clark 		gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
2727198e6b0SRob Clark 
2737198e6b0SRob Clark 	/* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
27455459968SRob Clark 	if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu)) {
2757198e6b0SRob Clark 		gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
2767198e6b0SRob Clark 				AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
2777198e6b0SRob Clark 				AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
2787198e6b0SRob Clark 				AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14));
27955459968SRob Clark 	} else if (adreno_is_a330(adreno_gpu)) {
28055459968SRob Clark 		/* NOTE: this (value take from downstream android driver)
28155459968SRob Clark 		 * includes some bits outside of the known bitfields.  But
28255459968SRob Clark 		 * A330 has this "MERCIU queue" thing too, which might
28355459968SRob Clark 		 * explain a new bitfield or reshuffling:
28455459968SRob Clark 		 */
28555459968SRob Clark 		gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x003e2008);
28655459968SRob Clark 	}
2877198e6b0SRob Clark 
2887198e6b0SRob Clark 	/* clear ME_HALT to start micro engine */
2897198e6b0SRob Clark 	gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
2907198e6b0SRob Clark 
2917198e6b0SRob Clark 	a3xx_me_init(gpu);
2927198e6b0SRob Clark 
2937198e6b0SRob Clark 	return 0;
2947198e6b0SRob Clark }
2957198e6b0SRob Clark 
29655459968SRob Clark static void a3xx_recover(struct msm_gpu *gpu)
29755459968SRob Clark {
2985b6ef08eSRob Clark 	/* dump registers before resetting gpu, if enabled: */
2995b6ef08eSRob Clark 	if (hang_debug)
3005b6ef08eSRob Clark 		a3xx_dump(gpu);
30155459968SRob Clark 	gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1);
30255459968SRob Clark 	gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD);
30355459968SRob Clark 	gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0);
30455459968SRob Clark 	adreno_recover(gpu);
30555459968SRob Clark }
30655459968SRob Clark 
3077198e6b0SRob Clark static void a3xx_destroy(struct msm_gpu *gpu)
3087198e6b0SRob Clark {
3097198e6b0SRob Clark 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
3107198e6b0SRob Clark 	struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
3117198e6b0SRob Clark 
3127198e6b0SRob Clark 	DBG("%s", gpu->name);
3137198e6b0SRob Clark 
3147198e6b0SRob Clark 	adreno_gpu_cleanup(adreno_gpu);
31555459968SRob Clark 
31655459968SRob Clark #ifdef CONFIG_MSM_OCMEM
31755459968SRob Clark 	if (a3xx_gpu->ocmem_base)
31855459968SRob Clark 		ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl);
31955459968SRob Clark #endif
32055459968SRob Clark 
3217198e6b0SRob Clark 	kfree(a3xx_gpu);
3227198e6b0SRob Clark }
3237198e6b0SRob Clark 
3247198e6b0SRob Clark static void a3xx_idle(struct msm_gpu *gpu)
3257198e6b0SRob Clark {
3267198e6b0SRob Clark 	/* wait for ringbuffer to drain: */
3277198e6b0SRob Clark 	adreno_idle(gpu);
3287198e6b0SRob Clark 
3297198e6b0SRob Clark 	/* then wait for GPU to finish: */
3300963756fSRob Clark 	if (spin_until(!(gpu_read(gpu, REG_A3XX_RBBM_STATUS) &
3310963756fSRob Clark 			A3XX_RBBM_STATUS_GPU_BUSY)))
3320963756fSRob Clark 		DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
3337198e6b0SRob Clark 
3347198e6b0SRob Clark 	/* TODO maybe we need to reset GPU here to recover from hang? */
3357198e6b0SRob Clark }
3367198e6b0SRob Clark 
3377198e6b0SRob Clark static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
3387198e6b0SRob Clark {
3397198e6b0SRob Clark 	uint32_t status;
3407198e6b0SRob Clark 
3417198e6b0SRob Clark 	status = gpu_read(gpu, REG_A3XX_RBBM_INT_0_STATUS);
3427198e6b0SRob Clark 	DBG("%s: %08x", gpu->name, status);
3437198e6b0SRob Clark 
3447198e6b0SRob Clark 	// TODO
3457198e6b0SRob Clark 
3467198e6b0SRob Clark 	gpu_write(gpu, REG_A3XX_RBBM_INT_CLEAR_CMD, status);
3477198e6b0SRob Clark 
3487198e6b0SRob Clark 	msm_gpu_retire(gpu);
3497198e6b0SRob Clark 
3507198e6b0SRob Clark 	return IRQ_HANDLED;
3517198e6b0SRob Clark }
3527198e6b0SRob Clark 
3537198e6b0SRob Clark static const unsigned int a3xx_registers[] = {
3547198e6b0SRob Clark 	0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027,
3557198e6b0SRob Clark 	0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c,
3567198e6b0SRob Clark 	0x0060, 0x006c, 0x0080, 0x0082, 0x0084, 0x0088, 0x0090, 0x00e5,
3577198e6b0SRob Clark 	0x00ea, 0x00ed, 0x0100, 0x0100, 0x0110, 0x0123, 0x01c0, 0x01c1,
3587198e6b0SRob Clark 	0x01c3, 0x01c5, 0x01c7, 0x01c7, 0x01d5, 0x01d9, 0x01dc, 0x01dd,
3597198e6b0SRob Clark 	0x01ea, 0x01ea, 0x01ee, 0x01f1, 0x01f5, 0x01f5, 0x01fc, 0x01ff,
3607198e6b0SRob Clark 	0x0440, 0x0440, 0x0443, 0x0443, 0x0445, 0x0445, 0x044d, 0x044f,
3617198e6b0SRob Clark 	0x0452, 0x0452, 0x0454, 0x046f, 0x047c, 0x047c, 0x047f, 0x047f,
3627198e6b0SRob Clark 	0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e,
3637198e6b0SRob Clark 	0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f,
3647198e6b0SRob Clark 	0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7,
3657198e6b0SRob Clark 	0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5, 0x0e00, 0x0e05,
3667198e6b0SRob Clark 	0x0e0c, 0x0e0c, 0x0e22, 0x0e23, 0x0e41, 0x0e45, 0x0e64, 0x0e65,
3677198e6b0SRob Clark 	0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7,
3687198e6b0SRob Clark 	0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09,
3697198e6b0SRob Clark 	0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069,
3707198e6b0SRob Clark 	0x206c, 0x206d, 0x2070, 0x2070, 0x2072, 0x2072, 0x2074, 0x2075,
3717198e6b0SRob Clark 	0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109,
3727198e6b0SRob Clark 	0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115,
3737198e6b0SRob Clark 	0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0,
3747198e6b0SRob Clark 	0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e,
3757198e6b0SRob Clark 	0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
3767198e6b0SRob Clark 	0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
3777198e6b0SRob Clark 	0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356,
3787198e6b0SRob Clark 	0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
3797198e6b0SRob Clark 	0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472,
3807198e6b0SRob Clark 	0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
3817198e6b0SRob Clark 	0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
3827198e6b0SRob Clark 	0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed,
3837198e6b0SRob Clark 	0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a,
3847198e6b0SRob Clark 	0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
3857198e6b0SRob Clark 	0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
3867198e6b0SRob Clark 	0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749,
3877198e6b0SRob Clark 	0x2750, 0x2756, 0x2760, 0x2760, 0x300c, 0x300e, 0x301c, 0x301d,
3887198e6b0SRob Clark 	0x302a, 0x302a, 0x302c, 0x302d, 0x3030, 0x3031, 0x3034, 0x3036,
3897198e6b0SRob Clark 	0x303c, 0x303c, 0x305e, 0x305f,
3903bcefb04SRob Clark 	~0   /* sentinel */
3917198e6b0SRob Clark };
3927198e6b0SRob Clark 
3935b6ef08eSRob Clark #ifdef CONFIG_DEBUG_FS
3947198e6b0SRob Clark static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
3957198e6b0SRob Clark {
39637d77c3aSRob Clark 	gpu->funcs->pm_resume(gpu);
3977198e6b0SRob Clark 	seq_printf(m, "status:   %08x\n",
3987198e6b0SRob Clark 			gpu_read(gpu, REG_A3XX_RBBM_STATUS));
39937d77c3aSRob Clark 	gpu->funcs->pm_suspend(gpu);
4003bcefb04SRob Clark 	adreno_show(gpu, m);
4017198e6b0SRob Clark }
4027198e6b0SRob Clark #endif
4037198e6b0SRob Clark 
4045b6ef08eSRob Clark /* would be nice to not have to duplicate the _show() stuff with printk(): */
4055b6ef08eSRob Clark static void a3xx_dump(struct msm_gpu *gpu)
4065b6ef08eSRob Clark {
4075b6ef08eSRob Clark 	printk("status:   %08x\n",
4085b6ef08eSRob Clark 			gpu_read(gpu, REG_A3XX_RBBM_STATUS));
4093bcefb04SRob Clark 	adreno_dump(gpu);
4105b6ef08eSRob Clark }
411*91b74e97SAravind Ganesan /* Register offset defines for A3XX */
412*91b74e97SAravind Ganesan static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
413*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_AXXX_CP_DEBUG),
414*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_AXXX_CP_ME_RAM_WADDR),
415*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_AXXX_CP_ME_RAM_DATA),
416*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
417*91b74e97SAravind Ganesan 			REG_A3XX_CP_PFP_UCODE_DATA),
418*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
419*91b74e97SAravind Ganesan 			REG_A3XX_CP_PFP_UCODE_ADDR),
420*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A3XX_CP_WFI_PEND_CTR),
421*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
422*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
423*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
424*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
425*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A3XX_CP_PROTECT_CTRL),
426*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_AXXX_CP_ME_CNTL),
427*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
428*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_AXXX_CP_IB1_BASE),
429*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_AXXX_CP_IB1_BUFSZ),
430*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_AXXX_CP_IB2_BASE),
431*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_AXXX_CP_IB2_BUFSZ),
432*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
433*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_AXXX_CP_ME_RAM_RADDR),
434*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_AXXX_SCRATCH_ADDR),
435*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_AXXX_SCRATCH_UMSK),
436*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A3XX_CP_ROQ_ADDR),
437*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A3XX_CP_ROQ_DATA),
438*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A3XX_CP_MERCIU_ADDR),
439*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A3XX_CP_MERCIU_DATA),
440*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A3XX_CP_MERCIU_DATA2),
441*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A3XX_CP_MEQ_ADDR),
442*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A3XX_CP_MEQ_DATA),
443*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A3XX_CP_HW_FAULT),
444*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
445*91b74e97SAravind Ganesan 			REG_A3XX_CP_PROTECT_STATUS),
446*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A3XX_RBBM_STATUS),
447*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
448*91b74e97SAravind Ganesan 			REG_A3XX_RBBM_PERFCTR_CTL),
449*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
450*91b74e97SAravind Ganesan 			REG_A3XX_RBBM_PERFCTR_LOAD_CMD0),
451*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
452*91b74e97SAravind Ganesan 			REG_A3XX_RBBM_PERFCTR_LOAD_CMD1),
453*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
454*91b74e97SAravind Ganesan 			REG_A3XX_RBBM_PERFCTR_PWR_1_LO),
455*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A3XX_RBBM_INT_0_MASK),
456*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
457*91b74e97SAravind Ganesan 			REG_A3XX_RBBM_INT_0_STATUS),
458*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
459*91b74e97SAravind Ganesan 			REG_A3XX_RBBM_AHB_ERROR_STATUS),
460*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A3XX_RBBM_AHB_CMD),
461*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
462*91b74e97SAravind Ganesan 			REG_A3XX_RBBM_INT_CLEAR_CMD),
463*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A3XX_RBBM_CLOCK_CTL),
464*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
465*91b74e97SAravind Ganesan 			REG_A3XX_VPC_VPC_DEBUG_RAM_SEL),
466*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
467*91b74e97SAravind Ganesan 			REG_A3XX_VPC_VPC_DEBUG_RAM_READ),
468*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
469*91b74e97SAravind Ganesan 			REG_A3XX_VSC_SIZE_ADDRESS),
470*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A3XX_VFD_CONTROL_0),
471*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_VFD_INDEX_MAX, REG_A3XX_VFD_INDEX_MAX),
472*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
473*91b74e97SAravind Ganesan 			REG_A3XX_SP_VS_PVT_MEM_ADDR_REG),
474*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
475*91b74e97SAravind Ganesan 			REG_A3XX_SP_FS_PVT_MEM_ADDR_REG),
476*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
477*91b74e97SAravind Ganesan 			REG_A3XX_SP_VS_OBJ_START_REG),
478*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
479*91b74e97SAravind Ganesan 			REG_A3XX_SP_FS_OBJ_START_REG),
480*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_PA_SC_AA_CONFIG, REG_A3XX_PA_SC_AA_CONFIG),
481*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PM_OVERRIDE2,
482*91b74e97SAravind Ganesan 			REG_A3XX_RBBM_PM_OVERRIDE2),
483*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_REG2, REG_AXXX_CP_SCRATCH_REG2),
484*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_SQ_GPR_MANAGEMENT,
485*91b74e97SAravind Ganesan 			REG_A3XX_SQ_GPR_MANAGEMENT),
486*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_SQ_INST_STORE_MANAGMENT,
487*91b74e97SAravind Ganesan 			REG_A3XX_SQ_INST_STORE_MANAGMENT),
488*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_TP0_CHICKEN, REG_A3XX_TP0_CHICKEN),
489*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A3XX_RBBM_RBBM_CTL),
490*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
491*91b74e97SAravind Ganesan 			REG_A3XX_RBBM_SW_RESET_CMD),
492*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
493*91b74e97SAravind Ganesan 			REG_A3XX_UCHE_CACHE_INVALIDATE0_REG),
494*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
495*91b74e97SAravind Ganesan 			REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO),
496*91b74e97SAravind Ganesan 	REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
497*91b74e97SAravind Ganesan 			REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI),
498*91b74e97SAravind Ganesan };
4995b6ef08eSRob Clark 
5007198e6b0SRob Clark static const struct adreno_gpu_funcs funcs = {
5017198e6b0SRob Clark 	.base = {
5027198e6b0SRob Clark 		.get_param = adreno_get_param,
5037198e6b0SRob Clark 		.hw_init = a3xx_hw_init,
5047198e6b0SRob Clark 		.pm_suspend = msm_gpu_pm_suspend,
5057198e6b0SRob Clark 		.pm_resume = msm_gpu_pm_resume,
50655459968SRob Clark 		.recover = a3xx_recover,
5077198e6b0SRob Clark 		.last_fence = adreno_last_fence,
5087198e6b0SRob Clark 		.submit = adreno_submit,
5097198e6b0SRob Clark 		.flush = adreno_flush,
5107198e6b0SRob Clark 		.idle = a3xx_idle,
5117198e6b0SRob Clark 		.irq = a3xx_irq,
5127198e6b0SRob Clark 		.destroy = a3xx_destroy,
5137198e6b0SRob Clark #ifdef CONFIG_DEBUG_FS
5147198e6b0SRob Clark 		.show = a3xx_show,
5157198e6b0SRob Clark #endif
5167198e6b0SRob Clark 	},
5177198e6b0SRob Clark };
5187198e6b0SRob Clark 
51970c70f09SRob Clark static const struct msm_gpu_perfcntr perfcntrs[] = {
52070c70f09SRob Clark 	{ REG_A3XX_SP_PERFCOUNTER6_SELECT, REG_A3XX_RBBM_PERFCTR_SP_6_LO,
52170c70f09SRob Clark 			SP_ALU_ACTIVE_CYCLES, "ALUACTIVE" },
52270c70f09SRob Clark 	{ REG_A3XX_SP_PERFCOUNTER7_SELECT, REG_A3XX_RBBM_PERFCTR_SP_7_LO,
52370c70f09SRob Clark 			SP_FS_FULL_ALU_INSTRUCTIONS, "ALUFULL" },
52470c70f09SRob Clark };
52570c70f09SRob Clark 
5267198e6b0SRob Clark struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
5277198e6b0SRob Clark {
5287198e6b0SRob Clark 	struct a3xx_gpu *a3xx_gpu = NULL;
52955459968SRob Clark 	struct adreno_gpu *adreno_gpu;
5307198e6b0SRob Clark 	struct msm_gpu *gpu;
531060530f1SRob Clark 	struct msm_drm_private *priv = dev->dev_private;
532060530f1SRob Clark 	struct platform_device *pdev = priv->gpu_pdev;
5337198e6b0SRob Clark 	int ret;
5347198e6b0SRob Clark 
5357198e6b0SRob Clark 	if (!pdev) {
5367198e6b0SRob Clark 		dev_err(dev->dev, "no a3xx device\n");
5377198e6b0SRob Clark 		ret = -ENXIO;
5387198e6b0SRob Clark 		goto fail;
5397198e6b0SRob Clark 	}
5407198e6b0SRob Clark 
5417198e6b0SRob Clark 	a3xx_gpu = kzalloc(sizeof(*a3xx_gpu), GFP_KERNEL);
5427198e6b0SRob Clark 	if (!a3xx_gpu) {
5437198e6b0SRob Clark 		ret = -ENOMEM;
5447198e6b0SRob Clark 		goto fail;
5457198e6b0SRob Clark 	}
5467198e6b0SRob Clark 
54755459968SRob Clark 	adreno_gpu = &a3xx_gpu->base;
54855459968SRob Clark 	gpu = &adreno_gpu->base;
5497198e6b0SRob Clark 
5507198e6b0SRob Clark 	a3xx_gpu->pdev = pdev;
5517198e6b0SRob Clark 
55270c70f09SRob Clark 	gpu->perfcntrs = perfcntrs;
55370c70f09SRob Clark 	gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
55470c70f09SRob Clark 
5553bcefb04SRob Clark 	adreno_gpu->registers = a3xx_registers;
556*91b74e97SAravind Ganesan 	adreno_gpu->reg_offsets = a3xx_register_offsets;
5573bcefb04SRob Clark 
5583526e9fbSRob Clark 	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
5597198e6b0SRob Clark 	if (ret)
5607198e6b0SRob Clark 		goto fail;
5617198e6b0SRob Clark 
56255459968SRob Clark 	/* if needed, allocate gmem: */
56355459968SRob Clark 	if (adreno_is_a330(adreno_gpu)) {
56455459968SRob Clark #ifdef CONFIG_MSM_OCMEM
56555459968SRob Clark 		/* TODO this is different/missing upstream: */
56655459968SRob Clark 		struct ocmem_buf *ocmem_hdl =
56755459968SRob Clark 				ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
56855459968SRob Clark 
56955459968SRob Clark 		a3xx_gpu->ocmem_hdl = ocmem_hdl;
57055459968SRob Clark 		a3xx_gpu->ocmem_base = ocmem_hdl->addr;
57155459968SRob Clark 		adreno_gpu->gmem = ocmem_hdl->len;
57255459968SRob Clark 		DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
57355459968SRob Clark 				a3xx_gpu->ocmem_base);
57455459968SRob Clark #endif
57555459968SRob Clark 	}
57655459968SRob Clark 
577871d812aSRob Clark 	if (!gpu->mmu) {
578871d812aSRob Clark 		/* TODO we think it is possible to configure the GPU to
579871d812aSRob Clark 		 * restrict access to VRAM carveout.  But the required
580871d812aSRob Clark 		 * registers are unknown.  For now just bail out and
581871d812aSRob Clark 		 * limp along with just modesetting.  If it turns out
582871d812aSRob Clark 		 * to not be possible to restrict access, then we must
583871d812aSRob Clark 		 * implement a cmdstream validator.
584871d812aSRob Clark 		 */
585871d812aSRob Clark 		dev_err(dev->dev, "No memory protection without IOMMU\n");
586871d812aSRob Clark 		ret = -ENXIO;
587871d812aSRob Clark 		goto fail;
588871d812aSRob Clark 	}
589871d812aSRob Clark 
590871d812aSRob Clark 	return gpu;
5917198e6b0SRob Clark 
5927198e6b0SRob Clark fail:
5937198e6b0SRob Clark 	if (a3xx_gpu)
5947198e6b0SRob Clark 		a3xx_destroy(&a3xx_gpu->base.base);
5957198e6b0SRob Clark 
5967198e6b0SRob Clark 	return ERR_PTR(ret);
5977198e6b0SRob Clark }
598