1 /* 2 * Copyright 2016 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "priv.h" 25 #include "chan.h" 26 #include "runl.h" 27 #include "gk104.h" 28 #include "changk104.h" 29 30 #include <core/gpuobj.h> 31 #include <subdev/fault.h> 32 33 #include <nvif/class.h> 34 35 const struct nvkm_chan_func 36 gm107_chan = { 37 }; 38 39 static void 40 gm107_fifo_runlist_chan(struct gk104_fifo_chan *chan, 41 struct nvkm_memory *memory, u32 offset) 42 { 43 nvkm_wo32(memory, offset + 0, chan->base.chid); 44 nvkm_wo32(memory, offset + 4, chan->base.inst->addr >> 12); 45 } 46 47 const struct gk104_fifo_runlist_func 48 gm107_fifo_runlist = { 49 .size = 8, 50 .cgrp = gk110_fifo_runlist_cgrp, 51 .chan = gm107_fifo_runlist_chan, 52 .commit = gk104_fifo_runlist_commit, 53 }; 54 55 const struct nvkm_runl_func 56 gm107_runl = { 57 }; 58 59 const struct nvkm_enum 60 gm107_fifo_fault_engine[] = { 61 { 0x01, "DISPLAY" }, 62 { 0x02, "CAPTURE" }, 63 { 0x03, "IFB", NULL, NVKM_ENGINE_IFB }, 64 { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR }, 65 { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM }, 66 { 0x06, "SCHED" }, 67 { 0x07, "HOST0", NULL, NVKM_ENGINE_FIFO }, 68 { 0x08, "HOST1", NULL, NVKM_ENGINE_FIFO }, 69 { 0x09, "HOST2", NULL, NVKM_ENGINE_FIFO }, 70 { 0x0a, "HOST3", NULL, NVKM_ENGINE_FIFO }, 71 { 0x0b, "HOST4", NULL, NVKM_ENGINE_FIFO }, 72 { 0x0c, "HOST5", NULL, NVKM_ENGINE_FIFO }, 73 { 0x0d, "HOST6", NULL, NVKM_ENGINE_FIFO }, 74 { 0x0e, "HOST7", NULL, NVKM_ENGINE_FIFO }, 75 { 0x0f, "HOSTSR" }, 76 { 0x13, "PERF" }, 77 { 0x17, "PMU" }, 78 { 0x18, "PTP" }, 79 {} 80 }; 81 82 const struct nvkm_fifo_func_mmu_fault 83 gm107_fifo_mmu_fault = { 84 .recover = gk104_fifo_fault, 85 }; 86 87 void 88 gm107_fifo_intr_mmu_fault_unit(struct nvkm_fifo *fifo, int unit) 89 { 90 struct nvkm_device *device = fifo->engine.subdev.device; 91 u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10)); 92 u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10)); 93 u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10)); 94 u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10)); 95 struct nvkm_fault_data info; 96 97 info.inst = (u64)inst << 12; 98 info.addr = ((u64)vahi << 32) | valo; 99 info.time = 0; 100 info.engine = unit; 101 info.valid = 1; 102 info.gpc = (type & 0x1f000000) >> 24; 103 info.client = (type & 0x00003f00) >> 8; 104 info.access = (type & 0x00000080) >> 7; 105 info.hub = (type & 0x00000040) >> 6; 106 info.reason = (type & 0x0000000f); 107 108 nvkm_fifo_fault(fifo, &info); 109 } 110 111 static int 112 gm107_fifo_chid_nr(struct nvkm_fifo *fifo) 113 { 114 return 2048; 115 } 116 117 static const struct nvkm_fifo_func 118 gm107_fifo = { 119 .dtor = gk104_fifo_dtor, 120 .oneinit = gk104_fifo_oneinit, 121 .chid_nr = gm107_fifo_chid_nr, 122 .chid_ctor = gk110_fifo_chid_ctor, 123 .runq_nr = gf100_fifo_runq_nr, 124 .runl_ctor = gk104_fifo_runl_ctor, 125 .init = gk104_fifo_init, 126 .fini = gk104_fifo_fini, 127 .intr = gk104_fifo_intr, 128 .intr_mmu_fault_unit = gm107_fifo_intr_mmu_fault_unit, 129 .mmu_fault = &gm107_fifo_mmu_fault, 130 .fault.access = gk104_fifo_fault_access, 131 .fault.engine = gm107_fifo_fault_engine, 132 .fault.reason = gk104_fifo_fault_reason, 133 .fault.hubclient = gk104_fifo_fault_hubclient, 134 .fault.gpcclient = gk104_fifo_fault_gpcclient, 135 .engine_id = gk104_fifo_engine_id, 136 .uevent_init = gk104_fifo_uevent_init, 137 .uevent_fini = gk104_fifo_uevent_fini, 138 .recover_chan = gk104_fifo_recover_chan, 139 .runlist = &gm107_fifo_runlist, 140 .pbdma = &gk208_fifo_pbdma, 141 .runl = &gm107_runl, 142 .runq = &gk208_runq, 143 .engn = &gk104_engn, 144 .engn_ce = &gk104_engn_ce, 145 .cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &gk110_cgrp }, 146 .chan = {{ 0, 0, KEPLER_CHANNEL_GPFIFO_B }, &gm107_chan, .ctor = &gk104_fifo_gpfifo_new }, 147 }; 148 149 int 150 gm107_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, 151 struct nvkm_fifo **pfifo) 152 { 153 return gk104_fifo_new_(&gm107_fifo, device, type, inst, 0, pfifo); 154 } 155