1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/pci.h> 25 26 #include "amdgpu.h" 27 #include "amdgpu_ih.h" 28 #include "sid.h" 29 #include "si_ih.h" 30 #include "oss/oss_1_0_d.h" 31 #include "oss/oss_1_0_sh_mask.h" 32 33 static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev); 34 35 static void si_ih_enable_interrupts(struct amdgpu_device *adev) 36 { 37 u32 ih_cntl = RREG32(IH_CNTL); 38 u32 ih_rb_cntl = RREG32(IH_RB_CNTL); 39 40 ih_cntl |= ENABLE_INTR; 41 ih_rb_cntl |= IH_RB_ENABLE; 42 WREG32(IH_CNTL, ih_cntl); 43 WREG32(IH_RB_CNTL, ih_rb_cntl); 44 adev->irq.ih.enabled = true; 45 } 46 47 static void si_ih_disable_interrupts(struct amdgpu_device *adev) 48 { 49 u32 ih_rb_cntl = RREG32(IH_RB_CNTL); 50 u32 ih_cntl = RREG32(IH_CNTL); 51 52 ih_rb_cntl &= ~IH_RB_ENABLE; 53 ih_cntl &= ~ENABLE_INTR; 54 WREG32(IH_RB_CNTL, ih_rb_cntl); 55 WREG32(IH_CNTL, ih_cntl); 56 WREG32(IH_RB_RPTR, 0); 57 WREG32(IH_RB_WPTR, 0); 58 adev->irq.ih.enabled = false; 59 adev->irq.ih.rptr = 0; 60 } 61 62 static int si_ih_irq_init(struct amdgpu_device *adev) 63 { 64 struct amdgpu_ih_ring *ih = &adev->irq.ih; 65 int rb_bufsz; 66 u32 interrupt_cntl, ih_cntl, ih_rb_cntl; 67 68 si_ih_disable_interrupts(adev); 69 /* set dummy read address to dummy page address */ 70 WREG32(INTERRUPT_CNTL2, adev->dummy_page_addr >> 8); 71 interrupt_cntl = RREG32(INTERRUPT_CNTL); 72 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE; 73 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN; 74 WREG32(INTERRUPT_CNTL, interrupt_cntl); 75 76 WREG32(IH_RB_BASE, adev->irq.ih.gpu_addr >> 8); 77 rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4); 78 79 ih_rb_cntl = IH_WPTR_OVERFLOW_ENABLE | 80 IH_WPTR_OVERFLOW_CLEAR | 81 (rb_bufsz << 1) | 82 IH_WPTR_WRITEBACK_ENABLE; 83 84 WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr)); 85 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF); 86 WREG32(IH_RB_CNTL, ih_rb_cntl); 87 WREG32(IH_RB_RPTR, 0); 88 WREG32(IH_RB_WPTR, 0); 89 90 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0); 91 if (adev->irq.msi_enabled) 92 ih_cntl |= RPTR_REARM; 93 WREG32(IH_CNTL, ih_cntl); 94 95 pci_set_master(adev->pdev); 96 si_ih_enable_interrupts(adev); 97 98 return 0; 99 } 100 101 static void si_ih_irq_disable(struct amdgpu_device *adev) 102 { 103 si_ih_disable_interrupts(adev); 104 mdelay(1); 105 } 106 107 static u32 si_ih_get_wptr(struct amdgpu_device *adev, 108 struct amdgpu_ih_ring *ih) 109 { 110 u32 wptr, tmp; 111 112 wptr = le32_to_cpu(*ih->wptr_cpu); 113 114 if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) { 115 wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK; 116 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", 117 wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); 118 ih->rptr = (wptr + 16) & ih->ptr_mask; 119 tmp = RREG32(IH_RB_CNTL); 120 tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; 121 WREG32(IH_RB_CNTL, tmp); 122 123 /* Unset the CLEAR_OVERFLOW bit immediately so new overflows 124 * can be detected. 125 */ 126 tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; 127 WREG32(IH_RB_CNTL, tmp); 128 } 129 return (wptr & ih->ptr_mask); 130 } 131 132 static void si_ih_decode_iv(struct amdgpu_device *adev, 133 struct amdgpu_ih_ring *ih, 134 struct amdgpu_iv_entry *entry) 135 { 136 u32 ring_index = ih->rptr >> 2; 137 uint32_t dw[4]; 138 139 dw[0] = le32_to_cpu(ih->ring[ring_index + 0]); 140 dw[1] = le32_to_cpu(ih->ring[ring_index + 1]); 141 dw[2] = le32_to_cpu(ih->ring[ring_index + 2]); 142 dw[3] = le32_to_cpu(ih->ring[ring_index + 3]); 143 144 entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 145 entry->src_id = dw[0] & 0xff; 146 entry->src_data[0] = dw[1] & 0xfffffff; 147 entry->ring_id = dw[2] & 0xff; 148 entry->vmid = (dw[2] >> 8) & 0xff; 149 150 ih->rptr += 16; 151 } 152 153 static void si_ih_set_rptr(struct amdgpu_device *adev, 154 struct amdgpu_ih_ring *ih) 155 { 156 WREG32(IH_RB_RPTR, ih->rptr); 157 } 158 159 static int si_ih_early_init(struct amdgpu_ip_block *ip_block) 160 { 161 struct amdgpu_device *adev = ip_block->adev; 162 163 si_ih_set_interrupt_funcs(adev); 164 165 return 0; 166 } 167 168 static int si_ih_sw_init(struct amdgpu_ip_block *ip_block) 169 { 170 int r; 171 struct amdgpu_device *adev = ip_block->adev; 172 173 r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false); 174 if (r) 175 return r; 176 177 return amdgpu_irq_init(adev); 178 } 179 180 static int si_ih_sw_fini(struct amdgpu_ip_block *ip_block) 181 { 182 struct amdgpu_device *adev = ip_block->adev; 183 184 amdgpu_irq_fini_sw(adev); 185 186 return 0; 187 } 188 189 static int si_ih_hw_init(struct amdgpu_ip_block *ip_block) 190 { 191 struct amdgpu_device *adev = ip_block->adev; 192 193 return si_ih_irq_init(adev); 194 } 195 196 static int si_ih_hw_fini(struct amdgpu_ip_block *ip_block) 197 { 198 si_ih_irq_disable(ip_block->adev); 199 200 return 0; 201 } 202 203 static int si_ih_suspend(struct amdgpu_ip_block *ip_block) 204 { 205 return si_ih_hw_fini(ip_block); 206 } 207 208 static int si_ih_resume(struct amdgpu_ip_block *ip_block) 209 { 210 return si_ih_hw_init(ip_block); 211 } 212 213 static bool si_ih_is_idle(void *handle) 214 { 215 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 216 u32 tmp = RREG32(SRBM_STATUS); 217 218 if (tmp & SRBM_STATUS__IH_BUSY_MASK) 219 return false; 220 221 return true; 222 } 223 224 static int si_ih_wait_for_idle(struct amdgpu_ip_block *ip_block) 225 { 226 unsigned i; 227 struct amdgpu_device *adev = ip_block->adev; 228 229 for (i = 0; i < adev->usec_timeout; i++) { 230 if (si_ih_is_idle(adev)) 231 return 0; 232 udelay(1); 233 } 234 return -ETIMEDOUT; 235 } 236 237 static int si_ih_soft_reset(struct amdgpu_ip_block *ip_block) 238 { 239 struct amdgpu_device *adev = ip_block->adev; 240 241 u32 srbm_soft_reset = 0; 242 u32 tmp = RREG32(SRBM_STATUS); 243 244 if (tmp & SRBM_STATUS__IH_BUSY_MASK) 245 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK; 246 247 if (srbm_soft_reset) { 248 tmp = RREG32(SRBM_SOFT_RESET); 249 tmp |= srbm_soft_reset; 250 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 251 WREG32(SRBM_SOFT_RESET, tmp); 252 tmp = RREG32(SRBM_SOFT_RESET); 253 254 udelay(50); 255 256 tmp &= ~srbm_soft_reset; 257 WREG32(SRBM_SOFT_RESET, tmp); 258 tmp = RREG32(SRBM_SOFT_RESET); 259 260 udelay(50); 261 } 262 263 return 0; 264 } 265 266 static int si_ih_set_clockgating_state(void *handle, 267 enum amd_clockgating_state state) 268 { 269 return 0; 270 } 271 272 static int si_ih_set_powergating_state(void *handle, 273 enum amd_powergating_state state) 274 { 275 return 0; 276 } 277 278 static const struct amd_ip_funcs si_ih_ip_funcs = { 279 .name = "si_ih", 280 .early_init = si_ih_early_init, 281 .sw_init = si_ih_sw_init, 282 .sw_fini = si_ih_sw_fini, 283 .hw_init = si_ih_hw_init, 284 .hw_fini = si_ih_hw_fini, 285 .suspend = si_ih_suspend, 286 .resume = si_ih_resume, 287 .is_idle = si_ih_is_idle, 288 .wait_for_idle = si_ih_wait_for_idle, 289 .soft_reset = si_ih_soft_reset, 290 .set_clockgating_state = si_ih_set_clockgating_state, 291 .set_powergating_state = si_ih_set_powergating_state, 292 }; 293 294 static const struct amdgpu_ih_funcs si_ih_funcs = { 295 .get_wptr = si_ih_get_wptr, 296 .decode_iv = si_ih_decode_iv, 297 .set_rptr = si_ih_set_rptr 298 }; 299 300 static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev) 301 { 302 adev->irq.ih_funcs = &si_ih_funcs; 303 } 304 305 const struct amdgpu_ip_block_version si_ih_ip_block = 306 { 307 .type = AMD_IP_BLOCK_TYPE_IH, 308 .major = 1, 309 .minor = 0, 310 .rev = 0, 311 .funcs = &si_ih_ip_funcs, 312 }; 313