1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "priv.h" 25 26 #include <core/gpuobj.h> 27 #include <subdev/fb.h> 28 #include <subdev/mmu.h> 29 #include <subdev/timer.h> 30 31 struct nv50_bar { 32 struct nvkm_bar base; 33 spinlock_t lock; 34 struct nvkm_gpuobj *mem; 35 struct nvkm_gpuobj *pad; 36 struct nvkm_gpuobj *pgd; 37 struct nvkm_vm *bar1_vm; 38 struct nvkm_gpuobj *bar1; 39 struct nvkm_vm *bar3_vm; 40 struct nvkm_gpuobj *bar3; 41 }; 42 43 static int 44 nv50_bar_kmap(struct nvkm_bar *obj, struct nvkm_mem *mem, u32 flags, 45 struct nvkm_vma *vma) 46 { 47 struct nv50_bar *bar = container_of(obj, typeof(*bar), base); 48 int ret; 49 50 ret = nvkm_vm_get(bar->bar3_vm, mem->size << 12, 12, flags, vma); 51 if (ret) 52 return ret; 53 54 nvkm_vm_map(vma, mem); 55 return 0; 56 } 57 58 static int 59 nv50_bar_umap(struct nvkm_bar *obj, struct nvkm_mem *mem, u32 flags, 60 struct nvkm_vma *vma) 61 { 62 struct nv50_bar *bar = container_of(obj, typeof(*bar), base); 63 int ret; 64 65 ret = nvkm_vm_get(bar->bar1_vm, mem->size << 12, 12, flags, vma); 66 if (ret) 67 return ret; 68 69 nvkm_vm_map(vma, mem); 70 return 0; 71 } 72 73 static void 74 nv50_bar_unmap(struct nvkm_bar *bar, struct nvkm_vma *vma) 75 { 76 nvkm_vm_unmap(vma); 77 nvkm_vm_put(vma); 78 } 79 80 static void 81 nv50_bar_flush(struct nvkm_bar *obj) 82 { 83 struct nv50_bar *bar = container_of(obj, typeof(*bar), base); 84 unsigned long flags; 85 spin_lock_irqsave(&bar->lock, flags); 86 nv_wr32(bar, 0x00330c, 0x00000001); 87 if (!nv_wait(bar, 0x00330c, 0x00000002, 0x00000000)) 88 nv_warn(bar, "flush timeout\n"); 89 spin_unlock_irqrestore(&bar->lock, flags); 90 } 91 92 void 93 g84_bar_flush(struct nvkm_bar *obj) 94 { 95 struct nv50_bar *bar = container_of(obj, typeof(*bar), base); 96 unsigned long flags; 97 spin_lock_irqsave(&bar->lock, flags); 98 nv_wr32(bar, 0x070000, 0x00000001); 99 if (!nv_wait(bar, 0x070000, 0x00000002, 0x00000000)) 100 nv_warn(bar, "flush timeout\n"); 101 spin_unlock_irqrestore(&bar->lock, flags); 102 } 103 104 static int 105 nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine, 106 struct nvkm_oclass *oclass, void *data, u32 size, 107 struct nvkm_object **pobject) 108 { 109 struct nvkm_device *device = nv_device(parent); 110 struct nvkm_object *heap; 111 struct nvkm_vm *vm; 112 struct nv50_bar *bar; 113 u64 start, limit; 114 int ret; 115 116 ret = nvkm_bar_create(parent, engine, oclass, &bar); 117 *pobject = nv_object(bar); 118 if (ret) 119 return ret; 120 121 ret = nvkm_gpuobj_new(nv_object(bar), NULL, 0x20000, 0, 122 NVOBJ_FLAG_HEAP, &bar->mem); 123 heap = nv_object(bar->mem); 124 if (ret) 125 return ret; 126 127 ret = nvkm_gpuobj_new(nv_object(bar), heap, 128 (device->chipset == 0x50) ? 0x1400 : 0x0200, 129 0, 0, &bar->pad); 130 if (ret) 131 return ret; 132 133 ret = nvkm_gpuobj_new(nv_object(bar), heap, 0x4000, 0, 0, &bar->pgd); 134 if (ret) 135 return ret; 136 137 /* BAR3 */ 138 start = 0x0100000000ULL; 139 limit = start + nv_device_resource_len(device, 3); 140 141 ret = nvkm_vm_new(device, start, limit, start, &vm); 142 if (ret) 143 return ret; 144 145 atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]); 146 147 ret = nvkm_gpuobj_new(nv_object(bar), heap, 148 ((limit-- - start) >> 12) * 8, 0x1000, 149 NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]); 150 vm->pgt[0].refcount[0] = 1; 151 if (ret) 152 return ret; 153 154 ret = nvkm_vm_ref(vm, &bar->bar3_vm, bar->pgd); 155 nvkm_vm_ref(NULL, &vm, NULL); 156 if (ret) 157 return ret; 158 159 ret = nvkm_gpuobj_new(nv_object(bar), heap, 24, 16, 0, &bar->bar3); 160 if (ret) 161 return ret; 162 163 nv_wo32(bar->bar3, 0x00, 0x7fc00000); 164 nv_wo32(bar->bar3, 0x04, lower_32_bits(limit)); 165 nv_wo32(bar->bar3, 0x08, lower_32_bits(start)); 166 nv_wo32(bar->bar3, 0x0c, upper_32_bits(limit) << 24 | 167 upper_32_bits(start)); 168 nv_wo32(bar->bar3, 0x10, 0x00000000); 169 nv_wo32(bar->bar3, 0x14, 0x00000000); 170 171 /* BAR1 */ 172 start = 0x0000000000ULL; 173 limit = start + nv_device_resource_len(device, 1); 174 175 ret = nvkm_vm_new(device, start, limit--, start, &vm); 176 if (ret) 177 return ret; 178 179 atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]); 180 181 ret = nvkm_vm_ref(vm, &bar->bar1_vm, bar->pgd); 182 nvkm_vm_ref(NULL, &vm, NULL); 183 if (ret) 184 return ret; 185 186 ret = nvkm_gpuobj_new(nv_object(bar), heap, 24, 16, 0, &bar->bar1); 187 if (ret) 188 return ret; 189 190 nv_wo32(bar->bar1, 0x00, 0x7fc00000); 191 nv_wo32(bar->bar1, 0x04, lower_32_bits(limit)); 192 nv_wo32(bar->bar1, 0x08, lower_32_bits(start)); 193 nv_wo32(bar->bar1, 0x0c, upper_32_bits(limit) << 24 | 194 upper_32_bits(start)); 195 nv_wo32(bar->bar1, 0x10, 0x00000000); 196 nv_wo32(bar->bar1, 0x14, 0x00000000); 197 198 bar->base.alloc = nvkm_bar_alloc; 199 bar->base.kmap = nv50_bar_kmap; 200 bar->base.umap = nv50_bar_umap; 201 bar->base.unmap = nv50_bar_unmap; 202 if (device->chipset == 0x50) 203 bar->base.flush = nv50_bar_flush; 204 else 205 bar->base.flush = g84_bar_flush; 206 spin_lock_init(&bar->lock); 207 return 0; 208 } 209 210 static void 211 nv50_bar_dtor(struct nvkm_object *object) 212 { 213 struct nv50_bar *bar = (void *)object; 214 nvkm_gpuobj_ref(NULL, &bar->bar1); 215 nvkm_vm_ref(NULL, &bar->bar1_vm, bar->pgd); 216 nvkm_gpuobj_ref(NULL, &bar->bar3); 217 if (bar->bar3_vm) { 218 nvkm_gpuobj_ref(NULL, &bar->bar3_vm->pgt[0].obj[0]); 219 nvkm_vm_ref(NULL, &bar->bar3_vm, bar->pgd); 220 } 221 nvkm_gpuobj_ref(NULL, &bar->pgd); 222 nvkm_gpuobj_ref(NULL, &bar->pad); 223 nvkm_gpuobj_ref(NULL, &bar->mem); 224 nvkm_bar_destroy(&bar->base); 225 } 226 227 static int 228 nv50_bar_init(struct nvkm_object *object) 229 { 230 struct nv50_bar *bar = (void *)object; 231 int ret, i; 232 233 ret = nvkm_bar_init(&bar->base); 234 if (ret) 235 return ret; 236 237 nv_mask(bar, 0x000200, 0x00000100, 0x00000000); 238 nv_mask(bar, 0x000200, 0x00000100, 0x00000100); 239 nv_wr32(bar, 0x100c80, 0x00060001); 240 if (!nv_wait(bar, 0x100c80, 0x00000001, 0x00000000)) { 241 nv_error(bar, "vm flush timeout\n"); 242 return -EBUSY; 243 } 244 245 nv_wr32(bar, 0x001704, 0x00000000 | bar->mem->addr >> 12); 246 nv_wr32(bar, 0x001704, 0x40000000 | bar->mem->addr >> 12); 247 nv_wr32(bar, 0x001708, 0x80000000 | bar->bar1->node->offset >> 4); 248 nv_wr32(bar, 0x00170c, 0x80000000 | bar->bar3->node->offset >> 4); 249 for (i = 0; i < 8; i++) 250 nv_wr32(bar, 0x001900 + (i * 4), 0x00000000); 251 return 0; 252 } 253 254 static int 255 nv50_bar_fini(struct nvkm_object *object, bool suspend) 256 { 257 struct nv50_bar *bar = (void *)object; 258 return nvkm_bar_fini(&bar->base, suspend); 259 } 260 261 struct nvkm_oclass 262 nv50_bar_oclass = { 263 .handle = NV_SUBDEV(BAR, 0x50), 264 .ofuncs = &(struct nvkm_ofuncs) { 265 .ctor = nv50_bar_ctor, 266 .dtor = nv50_bar_dtor, 267 .init = nv50_bar_init, 268 .fini = nv50_bar_fini, 269 }, 270 }; 271