1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "priv.h" 25 #include "ram.h" 26 27 #include <core/memory.h> 28 #include <core/option.h> 29 #include <subdev/bios.h> 30 #include <subdev/bios/M0203.h> 31 #include <engine/gr.h> 32 #include <engine/mpeg.h> 33 34 void 35 nvkm_fb_tile_fini(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile) 36 { 37 fb->func->tile.fini(fb, region, tile); 38 } 39 40 void 41 nvkm_fb_tile_init(struct nvkm_fb *fb, int region, u32 addr, u32 size, 42 u32 pitch, u32 flags, struct nvkm_fb_tile *tile) 43 { 44 fb->func->tile.init(fb, region, addr, size, pitch, flags, tile); 45 } 46 47 void 48 nvkm_fb_tile_prog(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile) 49 { 50 struct nvkm_device *device = fb->subdev.device; 51 if (fb->func->tile.prog) { 52 fb->func->tile.prog(fb, region, tile); 53 if (device->gr) 54 nvkm_engine_tile(&device->gr->engine, region); 55 if (device->mpeg) 56 nvkm_engine_tile(device->mpeg, region); 57 } 58 } 59 60 static void 61 nvkm_fb_sysmem_flush_page_init(struct nvkm_device *device) 62 { 63 struct nvkm_fb *fb = device->fb; 64 65 if (fb->func->sysmem.flush_page_init) 66 fb->func->sysmem.flush_page_init(fb); 67 } 68 69 int 70 nvkm_fb_bios_memtype(struct nvkm_bios *bios) 71 { 72 struct nvkm_subdev *subdev = &bios->subdev; 73 struct nvkm_device *device = subdev->device; 74 const u8 ramcfg = (nvkm_rd32(device, 0x101000) & 0x0000003c) >> 2; 75 struct nvbios_M0203E M0203E; 76 u8 ver, hdr; 77 78 if (nvbios_M0203Em(bios, ramcfg, &ver, &hdr, &M0203E)) { 79 switch (M0203E.type) { 80 case M0203E_TYPE_DDR2 : return NVKM_RAM_TYPE_DDR2; 81 case M0203E_TYPE_DDR3 : return NVKM_RAM_TYPE_DDR3; 82 case M0203E_TYPE_GDDR3 : return NVKM_RAM_TYPE_GDDR3; 83 case M0203E_TYPE_GDDR5 : return NVKM_RAM_TYPE_GDDR5; 84 case M0203E_TYPE_GDDR5X: return NVKM_RAM_TYPE_GDDR5X; 85 case M0203E_TYPE_GDDR6 : return NVKM_RAM_TYPE_GDDR6; 86 case M0203E_TYPE_HBM2 : return NVKM_RAM_TYPE_HBM2; 87 default: 88 nvkm_warn(subdev, "M0203E type %02x\n", M0203E.type); 89 return NVKM_RAM_TYPE_UNKNOWN; 90 } 91 } 92 93 nvkm_warn(subdev, "M0203E not matched!\n"); 94 return NVKM_RAM_TYPE_UNKNOWN; 95 } 96 97 static void 98 nvkm_fb_intr(struct nvkm_subdev *subdev) 99 { 100 struct nvkm_fb *fb = nvkm_fb(subdev); 101 if (fb->func->intr) 102 fb->func->intr(fb); 103 } 104 105 static int 106 nvkm_fb_oneinit(struct nvkm_subdev *subdev) 107 { 108 struct nvkm_fb *fb = nvkm_fb(subdev); 109 u32 tags = 0; 110 111 if (fb->func->ram_new) { 112 int ret = fb->func->ram_new(fb, &fb->ram); 113 if (ret) { 114 nvkm_error(subdev, "vram setup failed, %d\n", ret); 115 return ret; 116 } 117 } 118 119 if (fb->func->oneinit) { 120 int ret = fb->func->oneinit(fb); 121 if (ret) 122 return ret; 123 } 124 125 /* Initialise compression tag allocator. 126 * 127 * LTC oneinit() will override this on Fermi and newer. 128 */ 129 if (fb->func->tags) { 130 tags = fb->func->tags(fb); 131 nvkm_debug(subdev, "%d comptags\n", tags); 132 } 133 134 return nvkm_mm_init(&fb->tags.mm, 0, 0, tags, 1); 135 } 136 137 int 138 nvkm_fb_mem_unlock(struct nvkm_fb *fb) 139 { 140 struct nvkm_subdev *subdev = &fb->subdev; 141 int ret; 142 143 if (!fb->func->vpr.scrub_required) 144 return 0; 145 146 ret = nvkm_subdev_oneinit(subdev); 147 if (ret) 148 return ret; 149 150 if (!fb->func->vpr.scrub_required(fb)) { 151 nvkm_debug(subdev, "VPR not locked\n"); 152 return 0; 153 } 154 155 nvkm_debug(subdev, "VPR locked, running scrubber binary\n"); 156 157 if (!fb->vpr_scrubber.fw.img) { 158 nvkm_warn(subdev, "VPR locked, but no scrubber binary!\n"); 159 return 0; 160 } 161 162 ret = fb->func->vpr.scrub(fb); 163 if (ret) { 164 nvkm_error(subdev, "VPR scrubber binary failed\n"); 165 return ret; 166 } 167 168 if (fb->func->vpr.scrub_required(fb)) { 169 nvkm_error(subdev, "VPR still locked after scrub!\n"); 170 return -EIO; 171 } 172 173 nvkm_debug(subdev, "VPR scrubber binary successful\n"); 174 return 0; 175 } 176 177 u64 178 nvkm_fb_vidmem_size(struct nvkm_device *device) 179 { 180 struct nvkm_fb *fb = device->fb; 181 182 if (fb && fb->func->vidmem.size) 183 return fb->func->vidmem.size(fb); 184 185 WARN_ON(1); 186 return 0; 187 } 188 189 static int 190 nvkm_fb_init(struct nvkm_subdev *subdev) 191 { 192 struct nvkm_fb *fb = nvkm_fb(subdev); 193 int ret, i; 194 195 if (fb->ram) { 196 ret = nvkm_ram_init(fb->ram); 197 if (ret) 198 return ret; 199 } 200 201 for (i = 0; i < fb->tile.regions; i++) 202 fb->func->tile.prog(fb, i, &fb->tile.region[i]); 203 204 nvkm_fb_sysmem_flush_page_init(subdev->device); 205 206 if (fb->func->init) 207 fb->func->init(fb); 208 209 if (fb->func->init_remapper) 210 fb->func->init_remapper(fb); 211 212 if (fb->func->init_page) { 213 ret = fb->func->init_page(fb); 214 if (WARN_ON(ret)) 215 return ret; 216 } 217 218 if (fb->func->init_unkn) 219 fb->func->init_unkn(fb); 220 221 return 0; 222 } 223 224 static int 225 nvkm_fb_preinit(struct nvkm_subdev *subdev) 226 { 227 nvkm_fb_sysmem_flush_page_init(subdev->device); 228 return 0; 229 } 230 231 static void * 232 nvkm_fb_dtor(struct nvkm_subdev *subdev) 233 { 234 struct nvkm_fb *fb = nvkm_fb(subdev); 235 int i; 236 237 nvkm_memory_unref(&fb->mmu_wr); 238 nvkm_memory_unref(&fb->mmu_rd); 239 240 for (i = 0; i < fb->tile.regions; i++) 241 fb->func->tile.fini(fb, i, &fb->tile.region[i]); 242 243 nvkm_mm_fini(&fb->tags.mm); 244 mutex_destroy(&fb->tags.mutex); 245 246 nvkm_ram_del(&fb->ram); 247 248 nvkm_falcon_fw_dtor(&fb->vpr_scrubber); 249 250 if (fb->sysmem.flush_page) { 251 dma_unmap_page(subdev->device->dev, fb->sysmem.flush_page_addr, 252 PAGE_SIZE, DMA_BIDIRECTIONAL); 253 __free_page(fb->sysmem.flush_page); 254 } 255 256 if (fb->func->dtor) 257 return fb->func->dtor(fb); 258 259 return fb; 260 } 261 262 static const struct nvkm_subdev_func 263 nvkm_fb = { 264 .dtor = nvkm_fb_dtor, 265 .preinit = nvkm_fb_preinit, 266 .oneinit = nvkm_fb_oneinit, 267 .init = nvkm_fb_init, 268 .intr = nvkm_fb_intr, 269 }; 270 271 int 272 nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device, 273 enum nvkm_subdev_type type, int inst, struct nvkm_fb *fb) 274 { 275 nvkm_subdev_ctor(&nvkm_fb, device, type, inst, &fb->subdev); 276 fb->func = func; 277 fb->tile.regions = fb->func->tile.regions; 278 fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", fb->func->default_bigpage); 279 mutex_init(&fb->tags.mutex); 280 281 if (func->sysmem.flush_page_init) { 282 fb->sysmem.flush_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 283 if (!fb->sysmem.flush_page) 284 return -ENOMEM; 285 286 fb->sysmem.flush_page_addr = dma_map_page(device->dev, fb->sysmem.flush_page, 287 0, PAGE_SIZE, DMA_BIDIRECTIONAL); 288 if (dma_mapping_error(device->dev, fb->sysmem.flush_page_addr)) 289 return -EFAULT; 290 } 291 292 return 0; 293 } 294 295 int 296 nvkm_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device, 297 enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) 298 { 299 if (!(*pfb = kzalloc(sizeof(**pfb), GFP_KERNEL))) 300 return -ENOMEM; 301 return nvkm_fb_ctor(func, device, type, inst, *pfb); 302 } 303