1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 #include "priv.h" 25 #include "ram.h" 26 27 #include <core/memory.h> 28 #include <core/option.h> 29 #include <subdev/bios.h> 30 #include <subdev/bios/M0203.h> 31 #include <engine/gr.h> 32 #include <engine/mpeg.h> 33 34 void 35 nvkm_fb_tile_fini(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile) 36 { 37 fb->func->tile.fini(fb, region, tile); 38 } 39 40 void 41 nvkm_fb_tile_init(struct nvkm_fb *fb, int region, u32 addr, u32 size, 42 u32 pitch, u32 flags, struct nvkm_fb_tile *tile) 43 { 44 fb->func->tile.init(fb, region, addr, size, pitch, flags, tile); 45 } 46 47 void 48 nvkm_fb_tile_prog(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile) 49 { 50 struct nvkm_device *device = fb->subdev.device; 51 if (fb->func->tile.prog) { 52 fb->func->tile.prog(fb, region, tile); 53 if (device->gr) 54 nvkm_engine_tile(&device->gr->engine, region); 55 if (device->mpeg) 56 nvkm_engine_tile(device->mpeg, region); 57 } 58 } 59 60 static void 61 nvkm_fb_sysmem_flush_page_init(struct nvkm_device *device) 62 { 63 struct nvkm_fb *fb = device->fb; 64 65 if (fb->func->sysmem.flush_page_init) 66 fb->func->sysmem.flush_page_init(fb); 67 } 68 69 int 70 nvkm_fb_bios_memtype(struct nvkm_bios *bios) 71 { 72 struct nvkm_subdev *subdev = &bios->subdev; 73 struct nvkm_device *device = subdev->device; 74 const u8 ramcfg = (nvkm_rd32(device, 0x101000) & 0x0000003c) >> 2; 75 struct nvbios_M0203E M0203E; 76 u8 ver, hdr; 77 78 if (nvbios_M0203Em(bios, ramcfg, &ver, &hdr, &M0203E)) { 79 switch (M0203E.type) { 80 case M0203E_TYPE_DDR2 : return NVKM_RAM_TYPE_DDR2; 81 case M0203E_TYPE_DDR3 : return NVKM_RAM_TYPE_DDR3; 82 case M0203E_TYPE_GDDR3 : return NVKM_RAM_TYPE_GDDR3; 83 case M0203E_TYPE_GDDR5 : return NVKM_RAM_TYPE_GDDR5; 84 case M0203E_TYPE_GDDR5X: return NVKM_RAM_TYPE_GDDR5X; 85 case M0203E_TYPE_GDDR6 : return NVKM_RAM_TYPE_GDDR6; 86 case M0203E_TYPE_HBM2 : return NVKM_RAM_TYPE_HBM2; 87 default: 88 nvkm_warn(subdev, "M0203E type %02x\n", M0203E.type); 89 return NVKM_RAM_TYPE_UNKNOWN; 90 } 91 } 92 93 nvkm_warn(subdev, "M0203E not matched!\n"); 94 return NVKM_RAM_TYPE_UNKNOWN; 95 } 96 97 static void 98 nvkm_fb_intr(struct nvkm_subdev *subdev) 99 { 100 struct nvkm_fb *fb = nvkm_fb(subdev); 101 if (fb->func->intr) 102 fb->func->intr(fb); 103 } 104 105 static int 106 nvkm_fb_oneinit(struct nvkm_subdev *subdev) 107 { 108 struct nvkm_fb *fb = nvkm_fb(subdev); 109 u32 tags = 0; 110 111 if (fb->func->ram_new) { 112 int ret = fb->func->ram_new(fb, &fb->ram); 113 if (ret) { 114 nvkm_error(subdev, "vram setup failed, %d\n", ret); 115 return ret; 116 } 117 } 118 119 if (fb->func->oneinit) { 120 int ret = fb->func->oneinit(fb); 121 if (ret) 122 return ret; 123 } 124 125 /* Initialise compression tag allocator. 126 * 127 * LTC oneinit() will override this on Fermi and newer. 128 */ 129 if (fb->func->tags) { 130 tags = fb->func->tags(fb); 131 nvkm_debug(subdev, "%d comptags\n", tags); 132 } 133 134 return nvkm_mm_init(&fb->tags.mm, 0, 0, tags, 1); 135 } 136 137 int 138 nvkm_fb_mem_unlock(struct nvkm_fb *fb) 139 { 140 struct nvkm_subdev *subdev = &fb->subdev; 141 int ret; 142 143 if (!fb->func->vpr.scrub_required) 144 return 0; 145 146 if (!fb->func->vpr.scrub_required(fb)) { 147 nvkm_debug(subdev, "VPR not locked\n"); 148 return 0; 149 } 150 151 nvkm_debug(subdev, "VPR locked, running scrubber binary\n"); 152 153 if (!fb->vpr_scrubber.size) { 154 nvkm_warn(subdev, "VPR locked, but no scrubber binary!\n"); 155 return 0; 156 } 157 158 ret = fb->func->vpr.scrub(fb); 159 if (ret) { 160 nvkm_error(subdev, "VPR scrubber binary failed\n"); 161 return ret; 162 } 163 164 if (fb->func->vpr.scrub_required(fb)) { 165 nvkm_error(subdev, "VPR still locked after scrub!\n"); 166 return -EIO; 167 } 168 169 nvkm_debug(subdev, "VPR scrubber binary successful\n"); 170 return 0; 171 } 172 173 static int 174 nvkm_fb_init(struct nvkm_subdev *subdev) 175 { 176 struct nvkm_fb *fb = nvkm_fb(subdev); 177 int ret, i; 178 179 if (fb->ram) { 180 ret = nvkm_ram_init(fb->ram); 181 if (ret) 182 return ret; 183 } 184 185 for (i = 0; i < fb->tile.regions; i++) 186 fb->func->tile.prog(fb, i, &fb->tile.region[i]); 187 188 nvkm_fb_sysmem_flush_page_init(subdev->device); 189 190 if (fb->func->init) 191 fb->func->init(fb); 192 193 if (fb->func->init_remapper) 194 fb->func->init_remapper(fb); 195 196 if (fb->func->init_page) { 197 ret = fb->func->init_page(fb); 198 if (WARN_ON(ret)) 199 return ret; 200 } 201 202 if (fb->func->init_unkn) 203 fb->func->init_unkn(fb); 204 205 return 0; 206 } 207 208 static int 209 nvkm_fb_preinit(struct nvkm_subdev *subdev) 210 { 211 nvkm_fb_sysmem_flush_page_init(subdev->device); 212 return 0; 213 } 214 215 static void * 216 nvkm_fb_dtor(struct nvkm_subdev *subdev) 217 { 218 struct nvkm_fb *fb = nvkm_fb(subdev); 219 int i; 220 221 nvkm_memory_unref(&fb->mmu_wr); 222 nvkm_memory_unref(&fb->mmu_rd); 223 224 for (i = 0; i < fb->tile.regions; i++) 225 fb->func->tile.fini(fb, i, &fb->tile.region[i]); 226 227 nvkm_mm_fini(&fb->tags.mm); 228 mutex_destroy(&fb->tags.mutex); 229 230 nvkm_ram_del(&fb->ram); 231 232 nvkm_blob_dtor(&fb->vpr_scrubber); 233 234 if (fb->sysmem.flush_page) { 235 dma_unmap_page(subdev->device->dev, fb->sysmem.flush_page_addr, 236 PAGE_SIZE, DMA_BIDIRECTIONAL); 237 __free_page(fb->sysmem.flush_page); 238 } 239 240 if (fb->func->dtor) 241 return fb->func->dtor(fb); 242 243 return fb; 244 } 245 246 static const struct nvkm_subdev_func 247 nvkm_fb = { 248 .dtor = nvkm_fb_dtor, 249 .preinit = nvkm_fb_preinit, 250 .oneinit = nvkm_fb_oneinit, 251 .init = nvkm_fb_init, 252 .intr = nvkm_fb_intr, 253 }; 254 255 int 256 nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device, 257 enum nvkm_subdev_type type, int inst, struct nvkm_fb *fb) 258 { 259 nvkm_subdev_ctor(&nvkm_fb, device, type, inst, &fb->subdev); 260 fb->func = func; 261 fb->tile.regions = fb->func->tile.regions; 262 fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", fb->func->default_bigpage); 263 mutex_init(&fb->tags.mutex); 264 265 if (func->sysmem.flush_page_init) { 266 fb->sysmem.flush_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 267 if (!fb->sysmem.flush_page) 268 return -ENOMEM; 269 270 fb->sysmem.flush_page_addr = dma_map_page(device->dev, fb->sysmem.flush_page, 271 0, PAGE_SIZE, DMA_BIDIRECTIONAL); 272 if (dma_mapping_error(device->dev, fb->sysmem.flush_page_addr)) 273 return -EFAULT; 274 } 275 276 return 0; 277 } 278 279 int 280 nvkm_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device, 281 enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) 282 { 283 if (!(*pfb = kzalloc(sizeof(**pfb), GFP_KERNEL))) 284 return -ENOMEM; 285 return nvkm_fb_ctor(func, device, type, inst, *pfb); 286 } 287