17198e6b0SRob Clark /* 27198e6b0SRob Clark * Copyright (C) 2013 Red Hat 37198e6b0SRob Clark * Author: Rob Clark <robdclark@gmail.com> 47198e6b0SRob Clark * 591b74e97SAravind Ganesan * Copyright (c) 2014 The Linux Foundation. All rights reserved. 691b74e97SAravind Ganesan * 77198e6b0SRob Clark * This program is free software; you can redistribute it and/or modify it 87198e6b0SRob Clark * under the terms of the GNU General Public License version 2 as published by 97198e6b0SRob Clark * the Free Software Foundation. 107198e6b0SRob Clark * 117198e6b0SRob Clark * This program is distributed in the hope that it will be useful, but WITHOUT 127198e6b0SRob Clark * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 137198e6b0SRob Clark * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 147198e6b0SRob Clark * more details. 157198e6b0SRob Clark * 167198e6b0SRob Clark * You should have received a copy of the GNU General Public License along with 177198e6b0SRob Clark * this program. If not, see <http://www.gnu.org/licenses/>. 187198e6b0SRob Clark */ 197198e6b0SRob Clark 2055459968SRob Clark #ifdef CONFIG_MSM_OCMEM 2155459968SRob Clark # include <mach/ocmem.h> 2255459968SRob Clark #endif 2355459968SRob Clark 247198e6b0SRob Clark #include "a3xx_gpu.h" 257198e6b0SRob Clark 267198e6b0SRob Clark #define A3XX_INT0_MASK \ 277198e6b0SRob Clark (A3XX_INT0_RBBM_AHB_ERROR | \ 287198e6b0SRob Clark A3XX_INT0_RBBM_ATB_BUS_OVERFLOW | \ 297198e6b0SRob Clark A3XX_INT0_CP_T0_PACKET_IN_IB | \ 307198e6b0SRob Clark A3XX_INT0_CP_OPCODE_ERROR | \ 317198e6b0SRob Clark A3XX_INT0_CP_RESERVED_BIT_ERROR | \ 327198e6b0SRob Clark A3XX_INT0_CP_HW_FAULT | \ 337198e6b0SRob Clark A3XX_INT0_CP_IB1_INT | \ 347198e6b0SRob Clark A3XX_INT0_CP_IB2_INT | \ 357198e6b0SRob Clark A3XX_INT0_CP_RB_INT | \ 367198e6b0SRob Clark A3XX_INT0_CP_REG_PROTECT_FAULT | \ 377198e6b0SRob Clark A3XX_INT0_CP_AHB_ERROR_HALT | \ 387198e6b0SRob Clark A3XX_INT0_UCHE_OOB_ACCESS) 397198e6b0SRob Clark 403526e9fbSRob Clark extern bool hang_debug; 415b6ef08eSRob Clark 425b6ef08eSRob Clark static void a3xx_dump(struct msm_gpu *gpu); 43e895c7bdSJordan Crouse static bool a3xx_idle(struct msm_gpu *gpu); 445b6ef08eSRob Clark 45c4a8d475SJordan Crouse static bool a3xx_me_init(struct msm_gpu *gpu) 467198e6b0SRob Clark { 47*f97decacSJordan Crouse struct msm_ringbuffer *ring = gpu->rb[0]; 487198e6b0SRob Clark 497198e6b0SRob Clark OUT_PKT3(ring, CP_ME_INIT, 17); 507198e6b0SRob Clark OUT_RING(ring, 0x000003f7); 517198e6b0SRob Clark OUT_RING(ring, 0x00000000); 527198e6b0SRob Clark OUT_RING(ring, 0x00000000); 537198e6b0SRob Clark OUT_RING(ring, 0x00000000); 547198e6b0SRob Clark OUT_RING(ring, 0x00000080); 557198e6b0SRob Clark OUT_RING(ring, 0x00000100); 567198e6b0SRob Clark OUT_RING(ring, 0x00000180); 577198e6b0SRob Clark OUT_RING(ring, 0x00006600); 587198e6b0SRob Clark OUT_RING(ring, 0x00000150); 597198e6b0SRob Clark OUT_RING(ring, 0x0000014e); 607198e6b0SRob Clark OUT_RING(ring, 0x00000154); 617198e6b0SRob Clark OUT_RING(ring, 0x00000001); 627198e6b0SRob Clark OUT_RING(ring, 0x00000000); 637198e6b0SRob Clark OUT_RING(ring, 0x00000000); 647198e6b0SRob Clark OUT_RING(ring, 0x00000000); 657198e6b0SRob Clark OUT_RING(ring, 0x00000000); 667198e6b0SRob Clark OUT_RING(ring, 0x00000000); 677198e6b0SRob Clark 68*f97decacSJordan Crouse gpu->funcs->flush(gpu, ring); 69e895c7bdSJordan Crouse return a3xx_idle(gpu); 707198e6b0SRob Clark } 717198e6b0SRob Clark 727198e6b0SRob Clark static int a3xx_hw_init(struct msm_gpu *gpu) 737198e6b0SRob Clark { 747198e6b0SRob Clark struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 7555459968SRob Clark struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu); 767198e6b0SRob Clark uint32_t *ptr, len; 777198e6b0SRob Clark int i, ret; 787198e6b0SRob Clark 797198e6b0SRob Clark DBG("%s", gpu->name); 807198e6b0SRob Clark 817198e6b0SRob Clark if (adreno_is_a305(adreno_gpu)) { 827198e6b0SRob Clark /* Set up 16 deep read/write request queues: */ 837198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010); 847198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010); 857198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010); 867198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010); 877198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303); 887198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010); 897198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010); 907198e6b0SRob Clark /* Enable WR-REQ: */ 917198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff); 927198e6b0SRob Clark /* Set up round robin arbitration between both AXI ports: */ 937198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030); 947198e6b0SRob Clark /* Set up AOOO: */ 957198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c); 967198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c); 97de558cd2SRob Clark } else if (adreno_is_a306(adreno_gpu)) { 98de558cd2SRob Clark gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003); 99de558cd2SRob Clark gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x0000000a); 100de558cd2SRob Clark gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x0000000a); 1017198e6b0SRob Clark } else if (adreno_is_a320(adreno_gpu)) { 1027198e6b0SRob Clark /* Set up 16 deep read/write request queues: */ 1037198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010); 1047198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010); 1057198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010); 1067198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010); 1077198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303); 1087198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010); 1097198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010); 1107198e6b0SRob Clark /* Enable WR-REQ: */ 1117198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff); 1127198e6b0SRob Clark /* Set up round robin arbitration between both AXI ports: */ 1137198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030); 1147198e6b0SRob Clark /* Set up AOOO: */ 1157198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c); 1167198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c); 1177198e6b0SRob Clark /* Enable 1K sort: */ 1187198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff); 1197198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); 1207198e6b0SRob Clark 12155459968SRob Clark } else if (adreno_is_a330v2(adreno_gpu)) { 12255459968SRob Clark /* 12355459968SRob Clark * Most of the VBIF registers on 8974v2 have the correct 12455459968SRob Clark * values at power on, so we won't modify those if we don't 12555459968SRob Clark * need to 12655459968SRob Clark */ 12755459968SRob Clark /* Enable 1k sort: */ 12855459968SRob Clark gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f); 12955459968SRob Clark gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); 13055459968SRob Clark /* Enable WR-REQ: */ 13155459968SRob Clark gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f); 13255459968SRob Clark gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303); 13355459968SRob Clark /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */ 13455459968SRob Clark gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003); 13555459968SRob Clark 1367198e6b0SRob Clark } else if (adreno_is_a330(adreno_gpu)) { 1377198e6b0SRob Clark /* Set up 16 deep read/write request queues: */ 1387198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818); 1397198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x18181818); 1407198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x18181818); 1417198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x18181818); 1427198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303); 1437198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818); 1447198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x18181818); 1457198e6b0SRob Clark /* Enable WR-REQ: */ 1467198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f); 1477198e6b0SRob Clark /* Set up round robin arbitration between both AXI ports: */ 1487198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030); 1497198e6b0SRob Clark /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */ 1507198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001); 1517198e6b0SRob Clark /* Set up AOOO: */ 15255459968SRob Clark gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003f); 15355459968SRob Clark gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003f003f); 1547198e6b0SRob Clark /* Enable 1K sort: */ 15555459968SRob Clark gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f); 1567198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); 1577198e6b0SRob Clark /* Disable VBIF clock gating. This is to enable AXI running 1587198e6b0SRob Clark * higher frequency than GPU: 1597198e6b0SRob Clark */ 1607198e6b0SRob Clark gpu_write(gpu, REG_A3XX_VBIF_CLKON, 0x00000001); 1617198e6b0SRob Clark 1627198e6b0SRob Clark } else { 1637198e6b0SRob Clark BUG(); 1647198e6b0SRob Clark } 1657198e6b0SRob Clark 1667198e6b0SRob Clark /* Make all blocks contribute to the GPU BUSY perf counter: */ 1677198e6b0SRob Clark gpu_write(gpu, REG_A3XX_RBBM_GPU_BUSY_MASKED, 0xffffffff); 1687198e6b0SRob Clark 1697198e6b0SRob Clark /* Tune the hystersis counters for SP and CP idle detection: */ 1707198e6b0SRob Clark gpu_write(gpu, REG_A3XX_RBBM_SP_HYST_CNT, 0x10); 1717198e6b0SRob Clark gpu_write(gpu, REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10); 1727198e6b0SRob Clark 1737198e6b0SRob Clark /* Enable the RBBM error reporting bits. This lets us get 1747198e6b0SRob Clark * useful information on failure: 1757198e6b0SRob Clark */ 1767198e6b0SRob Clark gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL0, 0x00000001); 1777198e6b0SRob Clark 1787198e6b0SRob Clark /* Enable AHB error reporting: */ 1797198e6b0SRob Clark gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL1, 0xa6ffffff); 1807198e6b0SRob Clark 1817198e6b0SRob Clark /* Turn on the power counters: */ 1827198e6b0SRob Clark gpu_write(gpu, REG_A3XX_RBBM_RBBM_CTL, 0x00030000); 1837198e6b0SRob Clark 1847198e6b0SRob Clark /* Turn on hang detection - this spews a lot of useful information 1857198e6b0SRob Clark * into the RBBM registers on a hang: 1867198e6b0SRob Clark */ 1877198e6b0SRob Clark gpu_write(gpu, REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL, 0x00010fff); 1887198e6b0SRob Clark 1897198e6b0SRob Clark /* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0): */ 1907198e6b0SRob Clark gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001); 1917198e6b0SRob Clark 1927198e6b0SRob Clark /* Enable Clock gating: */ 193de558cd2SRob Clark if (adreno_is_a306(adreno_gpu)) 194de558cd2SRob Clark gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa); 195de558cd2SRob Clark else if (adreno_is_a320(adreno_gpu)) 1967198e6b0SRob Clark gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff); 19755459968SRob Clark else if (adreno_is_a330v2(adreno_gpu)) 19855459968SRob Clark gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa); 19955459968SRob Clark else if (adreno_is_a330(adreno_gpu)) 20055459968SRob Clark gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbffcffff); 2017198e6b0SRob Clark 20255459968SRob Clark if (adreno_is_a330v2(adreno_gpu)) 20355459968SRob Clark gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x05515455); 20455459968SRob Clark else if (adreno_is_a330(adreno_gpu)) 20555459968SRob Clark gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000); 20655459968SRob Clark 20755459968SRob Clark /* Set the OCMEM base address for A330, etc */ 20855459968SRob Clark if (a3xx_gpu->ocmem_hdl) { 20955459968SRob Clark gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR, 21055459968SRob Clark (unsigned int)(a3xx_gpu->ocmem_base >> 14)); 21155459968SRob Clark } 2127198e6b0SRob Clark 2137198e6b0SRob Clark /* Turn on performance counters: */ 2147198e6b0SRob Clark gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01); 2157198e6b0SRob Clark 21670c70f09SRob Clark /* Enable the perfcntrs that we use.. */ 21770c70f09SRob Clark for (i = 0; i < gpu->num_perfcntrs; i++) { 21870c70f09SRob Clark const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i]; 21970c70f09SRob Clark gpu_write(gpu, perfcntr->select_reg, perfcntr->select_val); 22070c70f09SRob Clark } 2217198e6b0SRob Clark 2227198e6b0SRob Clark gpu_write(gpu, REG_A3XX_RBBM_INT_0_MASK, A3XX_INT0_MASK); 2237198e6b0SRob Clark 2247198e6b0SRob Clark ret = adreno_hw_init(gpu); 2257198e6b0SRob Clark if (ret) 2267198e6b0SRob Clark return ret; 2277198e6b0SRob Clark 2287198e6b0SRob Clark /* setup access protection: */ 2297198e6b0SRob Clark gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007); 2307198e6b0SRob Clark 2317198e6b0SRob Clark /* RBBM registers */ 2327198e6b0SRob Clark gpu_write(gpu, REG_A3XX_CP_PROTECT(0), 0x63000040); 2337198e6b0SRob Clark gpu_write(gpu, REG_A3XX_CP_PROTECT(1), 0x62000080); 2347198e6b0SRob Clark gpu_write(gpu, REG_A3XX_CP_PROTECT(2), 0x600000cc); 2357198e6b0SRob Clark gpu_write(gpu, REG_A3XX_CP_PROTECT(3), 0x60000108); 2367198e6b0SRob Clark gpu_write(gpu, REG_A3XX_CP_PROTECT(4), 0x64000140); 2377198e6b0SRob Clark gpu_write(gpu, REG_A3XX_CP_PROTECT(5), 0x66000400); 2387198e6b0SRob Clark 2397198e6b0SRob Clark /* CP registers */ 2407198e6b0SRob Clark gpu_write(gpu, REG_A3XX_CP_PROTECT(6), 0x65000700); 2417198e6b0SRob Clark gpu_write(gpu, REG_A3XX_CP_PROTECT(7), 0x610007d8); 2427198e6b0SRob Clark gpu_write(gpu, REG_A3XX_CP_PROTECT(8), 0x620007e0); 2437198e6b0SRob Clark gpu_write(gpu, REG_A3XX_CP_PROTECT(9), 0x61001178); 2447198e6b0SRob Clark gpu_write(gpu, REG_A3XX_CP_PROTECT(10), 0x64001180); 2457198e6b0SRob Clark 2467198e6b0SRob Clark /* RB registers */ 2477198e6b0SRob Clark gpu_write(gpu, REG_A3XX_CP_PROTECT(11), 0x60003300); 2487198e6b0SRob Clark 2497198e6b0SRob Clark /* VBIF registers */ 2507198e6b0SRob Clark gpu_write(gpu, REG_A3XX_CP_PROTECT(12), 0x6b00c000); 2517198e6b0SRob Clark 2527198e6b0SRob Clark /* NOTE: PM4/micro-engine firmware registers look to be the same 2537198e6b0SRob Clark * for a2xx and a3xx.. we could possibly push that part down to 2547198e6b0SRob Clark * adreno_gpu base class. Or push both PM4 and PFP but 2557198e6b0SRob Clark * parameterize the pfp ucode addr/data registers.. 2567198e6b0SRob Clark */ 2577198e6b0SRob Clark 2587198e6b0SRob Clark /* Load PM4: */ 2597198e6b0SRob Clark ptr = (uint32_t *)(adreno_gpu->pm4->data); 2607198e6b0SRob Clark len = adreno_gpu->pm4->size / 4; 261e529c7e6SRob Clark DBG("loading PM4 ucode version: %x", ptr[1]); 2627198e6b0SRob Clark 2637198e6b0SRob Clark gpu_write(gpu, REG_AXXX_CP_DEBUG, 2647198e6b0SRob Clark AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE | 2657198e6b0SRob Clark AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE); 2667198e6b0SRob Clark gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0); 2677198e6b0SRob Clark for (i = 1; i < len; i++) 2687198e6b0SRob Clark gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]); 2697198e6b0SRob Clark 2707198e6b0SRob Clark /* Load PFP: */ 2717198e6b0SRob Clark ptr = (uint32_t *)(adreno_gpu->pfp->data); 2727198e6b0SRob Clark len = adreno_gpu->pfp->size / 4; 273e529c7e6SRob Clark DBG("loading PFP ucode version: %x", ptr[5]); 2747198e6b0SRob Clark 2757198e6b0SRob Clark gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0); 2767198e6b0SRob Clark for (i = 1; i < len; i++) 2777198e6b0SRob Clark gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]); 2787198e6b0SRob Clark 2797198e6b0SRob Clark /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */ 280de558cd2SRob Clark if (adreno_is_a305(adreno_gpu) || adreno_is_a306(adreno_gpu) || 281de558cd2SRob Clark adreno_is_a320(adreno_gpu)) { 2827198e6b0SRob Clark gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 2837198e6b0SRob Clark AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) | 2847198e6b0SRob Clark AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) | 2857198e6b0SRob Clark AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14)); 28655459968SRob Clark } else if (adreno_is_a330(adreno_gpu)) { 28755459968SRob Clark /* NOTE: this (value take from downstream android driver) 28855459968SRob Clark * includes some bits outside of the known bitfields. But 28955459968SRob Clark * A330 has this "MERCIU queue" thing too, which might 29055459968SRob Clark * explain a new bitfield or reshuffling: 29155459968SRob Clark */ 29255459968SRob Clark gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x003e2008); 29355459968SRob Clark } 2947198e6b0SRob Clark 2957198e6b0SRob Clark /* clear ME_HALT to start micro engine */ 2967198e6b0SRob Clark gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0); 2977198e6b0SRob Clark 298c4a8d475SJordan Crouse return a3xx_me_init(gpu) ? 0 : -EINVAL; 2997198e6b0SRob Clark } 3007198e6b0SRob Clark 30155459968SRob Clark static void a3xx_recover(struct msm_gpu *gpu) 30255459968SRob Clark { 303398efc46SRob Clark int i; 304398efc46SRob Clark 30526716185SRob Clark adreno_dump_info(gpu); 30626716185SRob Clark 307398efc46SRob Clark for (i = 0; i < 8; i++) { 308398efc46SRob Clark printk("CP_SCRATCH_REG%d: %u\n", i, 309398efc46SRob Clark gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i)); 310398efc46SRob Clark } 311398efc46SRob Clark 3125b6ef08eSRob Clark /* dump registers before resetting gpu, if enabled: */ 3135b6ef08eSRob Clark if (hang_debug) 3145b6ef08eSRob Clark a3xx_dump(gpu); 31526716185SRob Clark 31655459968SRob Clark gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1); 31755459968SRob Clark gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD); 31855459968SRob Clark gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0); 31955459968SRob Clark adreno_recover(gpu); 32055459968SRob Clark } 32155459968SRob Clark 3227198e6b0SRob Clark static void a3xx_destroy(struct msm_gpu *gpu) 3237198e6b0SRob Clark { 3247198e6b0SRob Clark struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 3257198e6b0SRob Clark struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu); 3267198e6b0SRob Clark 3277198e6b0SRob Clark DBG("%s", gpu->name); 3287198e6b0SRob Clark 3297198e6b0SRob Clark adreno_gpu_cleanup(adreno_gpu); 33055459968SRob Clark 33155459968SRob Clark #ifdef CONFIG_MSM_OCMEM 33255459968SRob Clark if (a3xx_gpu->ocmem_base) 33355459968SRob Clark ocmem_free(OCMEM_GRAPHICS, a3xx_gpu->ocmem_hdl); 33455459968SRob Clark #endif 33555459968SRob Clark 3367198e6b0SRob Clark kfree(a3xx_gpu); 3377198e6b0SRob Clark } 3387198e6b0SRob Clark 339c4a8d475SJordan Crouse static bool a3xx_idle(struct msm_gpu *gpu) 3407198e6b0SRob Clark { 3417198e6b0SRob Clark /* wait for ringbuffer to drain: */ 342*f97decacSJordan Crouse if (!adreno_idle(gpu, gpu->rb[0])) 343c4a8d475SJordan Crouse return false; 3447198e6b0SRob Clark 3457198e6b0SRob Clark /* then wait for GPU to finish: */ 3460963756fSRob Clark if (spin_until(!(gpu_read(gpu, REG_A3XX_RBBM_STATUS) & 347c4a8d475SJordan Crouse A3XX_RBBM_STATUS_GPU_BUSY))) { 3480963756fSRob Clark DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name); 3497198e6b0SRob Clark 3507198e6b0SRob Clark /* TODO maybe we need to reset GPU here to recover from hang? */ 351c4a8d475SJordan Crouse return false; 352c4a8d475SJordan Crouse } 353c4a8d475SJordan Crouse 354c4a8d475SJordan Crouse return true; 3557198e6b0SRob Clark } 3567198e6b0SRob Clark 3577198e6b0SRob Clark static irqreturn_t a3xx_irq(struct msm_gpu *gpu) 3587198e6b0SRob Clark { 3597198e6b0SRob Clark uint32_t status; 3607198e6b0SRob Clark 3617198e6b0SRob Clark status = gpu_read(gpu, REG_A3XX_RBBM_INT_0_STATUS); 3627198e6b0SRob Clark DBG("%s: %08x", gpu->name, status); 3637198e6b0SRob Clark 3647198e6b0SRob Clark // TODO 3657198e6b0SRob Clark 3667198e6b0SRob Clark gpu_write(gpu, REG_A3XX_RBBM_INT_CLEAR_CMD, status); 3677198e6b0SRob Clark 3687198e6b0SRob Clark msm_gpu_retire(gpu); 3697198e6b0SRob Clark 3707198e6b0SRob Clark return IRQ_HANDLED; 3717198e6b0SRob Clark } 3727198e6b0SRob Clark 3737198e6b0SRob Clark static const unsigned int a3xx_registers[] = { 3747198e6b0SRob Clark 0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027, 3757198e6b0SRob Clark 0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c, 3767198e6b0SRob Clark 0x0060, 0x006c, 0x0080, 0x0082, 0x0084, 0x0088, 0x0090, 0x00e5, 3777198e6b0SRob Clark 0x00ea, 0x00ed, 0x0100, 0x0100, 0x0110, 0x0123, 0x01c0, 0x01c1, 3787198e6b0SRob Clark 0x01c3, 0x01c5, 0x01c7, 0x01c7, 0x01d5, 0x01d9, 0x01dc, 0x01dd, 3797198e6b0SRob Clark 0x01ea, 0x01ea, 0x01ee, 0x01f1, 0x01f5, 0x01f5, 0x01fc, 0x01ff, 3807198e6b0SRob Clark 0x0440, 0x0440, 0x0443, 0x0443, 0x0445, 0x0445, 0x044d, 0x044f, 3817198e6b0SRob Clark 0x0452, 0x0452, 0x0454, 0x046f, 0x047c, 0x047c, 0x047f, 0x047f, 3827198e6b0SRob Clark 0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e, 3837198e6b0SRob Clark 0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f, 3847198e6b0SRob Clark 0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7, 3857198e6b0SRob Clark 0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5, 0x0e00, 0x0e05, 3867198e6b0SRob Clark 0x0e0c, 0x0e0c, 0x0e22, 0x0e23, 0x0e41, 0x0e45, 0x0e64, 0x0e65, 3877198e6b0SRob Clark 0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7, 3887198e6b0SRob Clark 0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09, 3897198e6b0SRob Clark 0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069, 3907198e6b0SRob Clark 0x206c, 0x206d, 0x2070, 0x2070, 0x2072, 0x2072, 0x2074, 0x2075, 3917198e6b0SRob Clark 0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109, 3927198e6b0SRob Clark 0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115, 3937198e6b0SRob Clark 0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0, 3947198e6b0SRob Clark 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e, 3957198e6b0SRob Clark 0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8, 3967198e6b0SRob Clark 0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7, 3977198e6b0SRob Clark 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356, 3987198e6b0SRob Clark 0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d, 3997198e6b0SRob Clark 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472, 4007198e6b0SRob Clark 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef, 4017198e6b0SRob Clark 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511, 4027198e6b0SRob Clark 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed, 4037198e6b0SRob Clark 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a, 4047198e6b0SRob Clark 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce, 4057198e6b0SRob Clark 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec, 4067198e6b0SRob Clark 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749, 4077198e6b0SRob Clark 0x2750, 0x2756, 0x2760, 0x2760, 0x300c, 0x300e, 0x301c, 0x301d, 4087198e6b0SRob Clark 0x302a, 0x302a, 0x302c, 0x302d, 0x3030, 0x3031, 0x3034, 0x3036, 4097198e6b0SRob Clark 0x303c, 0x303c, 0x305e, 0x305f, 4103bcefb04SRob Clark ~0 /* sentinel */ 4117198e6b0SRob Clark }; 4127198e6b0SRob Clark 4135b6ef08eSRob Clark #ifdef CONFIG_DEBUG_FS 4147198e6b0SRob Clark static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m) 4157198e6b0SRob Clark { 4167198e6b0SRob Clark seq_printf(m, "status: %08x\n", 4177198e6b0SRob Clark gpu_read(gpu, REG_A3XX_RBBM_STATUS)); 4183bcefb04SRob Clark adreno_show(gpu, m); 4197198e6b0SRob Clark } 4207198e6b0SRob Clark #endif 4217198e6b0SRob Clark 4225b6ef08eSRob Clark /* would be nice to not have to duplicate the _show() stuff with printk(): */ 4235b6ef08eSRob Clark static void a3xx_dump(struct msm_gpu *gpu) 4245b6ef08eSRob Clark { 4255b6ef08eSRob Clark printk("status: %08x\n", 4265b6ef08eSRob Clark gpu_read(gpu, REG_A3XX_RBBM_STATUS)); 4273bcefb04SRob Clark adreno_dump(gpu); 4285b6ef08eSRob Clark } 42991b74e97SAravind Ganesan /* Register offset defines for A3XX */ 43091b74e97SAravind Ganesan static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { 43191b74e97SAravind Ganesan REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE), 432fb039981SJordan Crouse REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI), 43391b74e97SAravind Ganesan REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR), 434fb039981SJordan Crouse REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI), 43591b74e97SAravind Ganesan REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR), 43691b74e97SAravind Ganesan REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR), 43791b74e97SAravind Ganesan REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL), 43891b74e97SAravind Ganesan }; 4395b6ef08eSRob Clark 4407198e6b0SRob Clark static const struct adreno_gpu_funcs funcs = { 4417198e6b0SRob Clark .base = { 4427198e6b0SRob Clark .get_param = adreno_get_param, 4437198e6b0SRob Clark .hw_init = a3xx_hw_init, 4447198e6b0SRob Clark .pm_suspend = msm_gpu_pm_suspend, 4457198e6b0SRob Clark .pm_resume = msm_gpu_pm_resume, 44655459968SRob Clark .recover = a3xx_recover, 4477198e6b0SRob Clark .submit = adreno_submit, 4487198e6b0SRob Clark .flush = adreno_flush, 449*f97decacSJordan Crouse .active_ring = adreno_active_ring, 4507198e6b0SRob Clark .irq = a3xx_irq, 4517198e6b0SRob Clark .destroy = a3xx_destroy, 4527198e6b0SRob Clark #ifdef CONFIG_DEBUG_FS 4537198e6b0SRob Clark .show = a3xx_show, 4547198e6b0SRob Clark #endif 4557198e6b0SRob Clark }, 4567198e6b0SRob Clark }; 4577198e6b0SRob Clark 45870c70f09SRob Clark static const struct msm_gpu_perfcntr perfcntrs[] = { 45970c70f09SRob Clark { REG_A3XX_SP_PERFCOUNTER6_SELECT, REG_A3XX_RBBM_PERFCTR_SP_6_LO, 46070c70f09SRob Clark SP_ALU_ACTIVE_CYCLES, "ALUACTIVE" }, 46170c70f09SRob Clark { REG_A3XX_SP_PERFCOUNTER7_SELECT, REG_A3XX_RBBM_PERFCTR_SP_7_LO, 46270c70f09SRob Clark SP_FS_FULL_ALU_INSTRUCTIONS, "ALUFULL" }, 46370c70f09SRob Clark }; 46470c70f09SRob Clark 4657198e6b0SRob Clark struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) 4667198e6b0SRob Clark { 4677198e6b0SRob Clark struct a3xx_gpu *a3xx_gpu = NULL; 46855459968SRob Clark struct adreno_gpu *adreno_gpu; 4697198e6b0SRob Clark struct msm_gpu *gpu; 470060530f1SRob Clark struct msm_drm_private *priv = dev->dev_private; 471060530f1SRob Clark struct platform_device *pdev = priv->gpu_pdev; 4727198e6b0SRob Clark int ret; 4737198e6b0SRob Clark 4747198e6b0SRob Clark if (!pdev) { 4757198e6b0SRob Clark dev_err(dev->dev, "no a3xx device\n"); 4767198e6b0SRob Clark ret = -ENXIO; 4777198e6b0SRob Clark goto fail; 4787198e6b0SRob Clark } 4797198e6b0SRob Clark 4807198e6b0SRob Clark a3xx_gpu = kzalloc(sizeof(*a3xx_gpu), GFP_KERNEL); 4817198e6b0SRob Clark if (!a3xx_gpu) { 4827198e6b0SRob Clark ret = -ENOMEM; 4837198e6b0SRob Clark goto fail; 4847198e6b0SRob Clark } 4857198e6b0SRob Clark 48655459968SRob Clark adreno_gpu = &a3xx_gpu->base; 48755459968SRob Clark gpu = &adreno_gpu->base; 4887198e6b0SRob Clark 48970c70f09SRob Clark gpu->perfcntrs = perfcntrs; 49070c70f09SRob Clark gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs); 49170c70f09SRob Clark 4923bcefb04SRob Clark adreno_gpu->registers = a3xx_registers; 49391b74e97SAravind Ganesan adreno_gpu->reg_offsets = a3xx_register_offsets; 4943bcefb04SRob Clark 495*f97decacSJordan Crouse ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); 4967198e6b0SRob Clark if (ret) 4977198e6b0SRob Clark goto fail; 4987198e6b0SRob Clark 49955459968SRob Clark /* if needed, allocate gmem: */ 50055459968SRob Clark if (adreno_is_a330(adreno_gpu)) { 50155459968SRob Clark #ifdef CONFIG_MSM_OCMEM 50255459968SRob Clark /* TODO this is different/missing upstream: */ 50355459968SRob Clark struct ocmem_buf *ocmem_hdl = 50455459968SRob Clark ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem); 50555459968SRob Clark 50655459968SRob Clark a3xx_gpu->ocmem_hdl = ocmem_hdl; 50755459968SRob Clark a3xx_gpu->ocmem_base = ocmem_hdl->addr; 50855459968SRob Clark adreno_gpu->gmem = ocmem_hdl->len; 50955459968SRob Clark DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024, 51055459968SRob Clark a3xx_gpu->ocmem_base); 51155459968SRob Clark #endif 51255459968SRob Clark } 51355459968SRob Clark 514667ce33eSRob Clark if (!gpu->aspace) { 515871d812aSRob Clark /* TODO we think it is possible to configure the GPU to 516871d812aSRob Clark * restrict access to VRAM carveout. But the required 517871d812aSRob Clark * registers are unknown. For now just bail out and 518871d812aSRob Clark * limp along with just modesetting. If it turns out 519871d812aSRob Clark * to not be possible to restrict access, then we must 520871d812aSRob Clark * implement a cmdstream validator. 521871d812aSRob Clark */ 522871d812aSRob Clark dev_err(dev->dev, "No memory protection without IOMMU\n"); 523871d812aSRob Clark ret = -ENXIO; 524871d812aSRob Clark goto fail; 525871d812aSRob Clark } 526871d812aSRob Clark 527871d812aSRob Clark return gpu; 5287198e6b0SRob Clark 5297198e6b0SRob Clark fail: 5307198e6b0SRob Clark if (a3xx_gpu) 5317198e6b0SRob Clark a3xx_destroy(&a3xx_gpu->base.base); 5327198e6b0SRob Clark 5337198e6b0SRob Clark return ERR_PTR(ret); 5347198e6b0SRob Clark } 535