1 /* 2 * Copyright 2018 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "priv.h" 23 24 #include <core/memory.h> 25 #include <subdev/mmu.h> 26 #include <engine/fifo.h> 27 28 #include <nvif/class.h> 29 30 void 31 gv100_fault_buffer_process(struct work_struct *work) 32 { 33 struct nvkm_fault *fault = container_of(work, typeof(*fault), nrpfb_work); 34 struct nvkm_fault_buffer *buffer = fault->buffer[0]; 35 struct nvkm_device *device = fault->subdev.device; 36 struct nvkm_memory *mem = buffer->mem; 37 u32 get = nvkm_rd32(device, buffer->get); 38 u32 put = nvkm_rd32(device, buffer->put); 39 if (put == get) 40 return; 41 42 nvkm_kmap(mem); 43 while (get != put) { 44 const u32 base = get * buffer->fault->func->buffer.entry_size; 45 const u32 instlo = nvkm_ro32(mem, base + 0x00); 46 const u32 insthi = nvkm_ro32(mem, base + 0x04); 47 const u32 addrlo = nvkm_ro32(mem, base + 0x08); 48 const u32 addrhi = nvkm_ro32(mem, base + 0x0c); 49 const u32 timelo = nvkm_ro32(mem, base + 0x10); 50 const u32 timehi = nvkm_ro32(mem, base + 0x14); 51 const u32 info0 = nvkm_ro32(mem, base + 0x18); 52 const u32 info1 = nvkm_ro32(mem, base + 0x1c); 53 struct nvkm_fault_data info; 54 55 if (++get == buffer->entries) 56 get = 0; 57 nvkm_wr32(device, buffer->get, get); 58 59 info.addr = ((u64)addrhi << 32) | addrlo; 60 info.inst = ((u64)insthi << 32) | instlo; 61 info.time = ((u64)timehi << 32) | timelo; 62 info.engine = (info0 & 0x000000ff); 63 info.valid = (info1 & 0x80000000) >> 31; 64 info.gpc = (info1 & 0x1f000000) >> 24; 65 info.hub = (info1 & 0x00100000) >> 20; 66 info.access = (info1 & 0x000f0000) >> 16; 67 info.client = (info1 & 0x00007f00) >> 8; 68 info.reason = (info1 & 0x0000001f); 69 70 nvkm_fifo_fault(device->fifo, &info); 71 } 72 nvkm_done(mem); 73 } 74 75 static void 76 gv100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable) 77 { 78 struct nvkm_device *device = buffer->fault->subdev.device; 79 const u32 intr = buffer->id ? 0x08000000 : 0x20000000; 80 if (enable) 81 nvkm_mask(device, 0x100a2c, intr, intr); 82 else 83 nvkm_mask(device, 0x100a34, intr, intr); 84 } 85 86 static void 87 gv100_fault_buffer_fini(struct nvkm_fault_buffer *buffer) 88 { 89 struct nvkm_device *device = buffer->fault->subdev.device; 90 const u32 foff = buffer->id * 0x14; 91 nvkm_mask(device, 0x100e34 + foff, 0x80000000, 0x00000000); 92 } 93 94 static void 95 gv100_fault_buffer_init(struct nvkm_fault_buffer *buffer) 96 { 97 struct nvkm_device *device = buffer->fault->subdev.device; 98 const u32 foff = buffer->id * 0x14; 99 100 nvkm_mask(device, 0x100e34 + foff, 0xc0000000, 0x40000000); 101 nvkm_wr32(device, 0x100e28 + foff, upper_32_bits(buffer->addr)); 102 nvkm_wr32(device, 0x100e24 + foff, lower_32_bits(buffer->addr)); 103 nvkm_mask(device, 0x100e34 + foff, 0x80000000, 0x80000000); 104 } 105 106 static void 107 gv100_fault_buffer_info(struct nvkm_fault_buffer *buffer) 108 { 109 struct nvkm_device *device = buffer->fault->subdev.device; 110 const u32 foff = buffer->id * 0x14; 111 112 nvkm_mask(device, 0x100e34 + foff, 0x40000000, 0x40000000); 113 114 buffer->entries = nvkm_rd32(device, 0x100e34 + foff) & 0x000fffff; 115 buffer->get = 0x100e2c + foff; 116 buffer->put = 0x100e30 + foff; 117 } 118 119 static int 120 gv100_fault_ntfy_nrpfb(struct nvkm_event_ntfy *ntfy, u32 bits) 121 { 122 struct nvkm_fault *fault = container_of(ntfy, typeof(*fault), nrpfb); 123 124 schedule_work(&fault->nrpfb_work); 125 return NVKM_EVENT_KEEP; 126 } 127 128 static void 129 gv100_fault_intr_fault(struct nvkm_fault *fault) 130 { 131 struct nvkm_subdev *subdev = &fault->subdev; 132 struct nvkm_device *device = subdev->device; 133 struct nvkm_fault_data info; 134 const u32 addrlo = nvkm_rd32(device, 0x100e4c); 135 const u32 addrhi = nvkm_rd32(device, 0x100e50); 136 const u32 info0 = nvkm_rd32(device, 0x100e54); 137 const u32 insthi = nvkm_rd32(device, 0x100e58); 138 const u32 info1 = nvkm_rd32(device, 0x100e5c); 139 140 info.addr = ((u64)addrhi << 32) | addrlo; 141 info.inst = ((u64)insthi << 32) | (info0 & 0xfffff000); 142 info.time = 0; 143 info.engine = (info0 & 0x000000ff); 144 info.valid = (info1 & 0x80000000) >> 31; 145 info.gpc = (info1 & 0x1f000000) >> 24; 146 info.hub = (info1 & 0x00100000) >> 20; 147 info.access = (info1 & 0x000f0000) >> 16; 148 info.client = (info1 & 0x00007f00) >> 8; 149 info.reason = (info1 & 0x0000001f); 150 151 nvkm_fifo_fault(device->fifo, &info); 152 } 153 154 static void 155 gv100_fault_intr(struct nvkm_fault *fault) 156 { 157 struct nvkm_subdev *subdev = &fault->subdev; 158 struct nvkm_device *device = subdev->device; 159 u32 stat = nvkm_rd32(device, 0x100a20); 160 161 if (stat & 0x80000000) { 162 gv100_fault_intr_fault(fault); 163 nvkm_wr32(device, 0x100e60, 0x80000000); 164 stat &= ~0x80000000; 165 } 166 167 if (stat & 0x20000000) { 168 if (fault->buffer[0]) { 169 nvkm_event_ntfy(&fault->event, 0, NVKM_FAULT_BUFFER_EVENT_PENDING); 170 stat &= ~0x20000000; 171 } 172 } 173 174 if (stat & 0x08000000) { 175 if (fault->buffer[1]) { 176 nvkm_event_ntfy(&fault->event, 1, NVKM_FAULT_BUFFER_EVENT_PENDING); 177 stat &= ~0x08000000; 178 } 179 } 180 181 if (stat) { 182 nvkm_debug(subdev, "intr %08x\n", stat); 183 } 184 } 185 186 static void 187 gv100_fault_fini(struct nvkm_fault *fault) 188 { 189 nvkm_event_ntfy_block(&fault->nrpfb); 190 flush_work(&fault->nrpfb_work); 191 192 if (fault->buffer[0]) 193 fault->func->buffer.fini(fault->buffer[0]); 194 195 nvkm_mask(fault->subdev.device, 0x100a34, 0x80000000, 0x80000000); 196 } 197 198 static void 199 gv100_fault_init(struct nvkm_fault *fault) 200 { 201 nvkm_mask(fault->subdev.device, 0x100a2c, 0x80000000, 0x80000000); 202 fault->func->buffer.init(fault->buffer[0]); 203 nvkm_event_ntfy_allow(&fault->nrpfb); 204 } 205 206 int 207 gv100_fault_oneinit(struct nvkm_fault *fault) 208 { 209 nvkm_event_ntfy_add(&fault->event, 0, NVKM_FAULT_BUFFER_EVENT_PENDING, true, 210 gv100_fault_ntfy_nrpfb, &fault->nrpfb); 211 return 0; 212 } 213 214 static const struct nvkm_fault_func 215 gv100_fault = { 216 .oneinit = gv100_fault_oneinit, 217 .init = gv100_fault_init, 218 .fini = gv100_fault_fini, 219 .intr = gv100_fault_intr, 220 .buffer.nr = 2, 221 .buffer.entry_size = 32, 222 .buffer.info = gv100_fault_buffer_info, 223 .buffer.pin = gp100_fault_buffer_pin, 224 .buffer.init = gv100_fault_buffer_init, 225 .buffer.fini = gv100_fault_buffer_fini, 226 .buffer.intr = gv100_fault_buffer_intr, 227 /*TODO: Figure out how to expose non-replayable fault buffer, which, 228 * for some reason, is where recoverable CE faults appear... 229 * 230 * It's a bit tricky, as both NVKM and SVM will need access to 231 * the non-replayable fault buffer. 232 */ 233 .user = { { 0, 0, VOLTA_FAULT_BUFFER_A }, 1 }, 234 }; 235 236 int 237 gv100_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, 238 struct nvkm_fault **pfault) 239 { 240 int ret = nvkm_fault_new_(&gv100_fault, device, type, inst, pfault); 241 if (ret) 242 return ret; 243 244 INIT_WORK(&(*pfault)->nrpfb_work, gv100_fault_buffer_process); 245 return 0; 246 } 247