1 /* 2 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA, 3 * All Rights Reserved. 4 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA, 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the "Software"), 9 * to deal in the Software without restriction, including without limitation 10 * the rights to use, copy, modify, merge, publish, distribute, sub license, 11 * and/or sell copies of the Software, and to permit persons to whom the 12 * Software is furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 */ 26 27 #include "nouveau_drm.h" 28 #include "nouveau_ttm.h" 29 #include "nouveau_gem.h" 30 31 #include "drm_legacy.h" 32 static int 33 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 34 { 35 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 36 struct nvkm_fb *fb = nvxx_fb(&drm->device); 37 man->priv = fb; 38 return 0; 39 } 40 41 static int 42 nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) 43 { 44 man->priv = NULL; 45 return 0; 46 } 47 48 static inline void 49 nvkm_mem_node_cleanup(struct nvkm_mem *node) 50 { 51 if (node->vma[0].node) { 52 nvkm_vm_unmap(&node->vma[0]); 53 nvkm_vm_put(&node->vma[0]); 54 } 55 56 if (node->vma[1].node) { 57 nvkm_vm_unmap(&node->vma[1]); 58 nvkm_vm_put(&node->vma[1]); 59 } 60 } 61 62 static void 63 nouveau_vram_manager_del(struct ttm_mem_type_manager *man, 64 struct ttm_mem_reg *mem) 65 { 66 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 67 struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram; 68 nvkm_mem_node_cleanup(mem->mm_node); 69 ram->func->put(ram, (struct nvkm_mem **)&mem->mm_node); 70 } 71 72 static int 73 nouveau_vram_manager_new(struct ttm_mem_type_manager *man, 74 struct ttm_buffer_object *bo, 75 const struct ttm_place *place, 76 struct ttm_mem_reg *mem) 77 { 78 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 79 struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram; 80 struct nouveau_bo *nvbo = nouveau_bo(bo); 81 struct nvkm_mem *node; 82 u32 size_nc = 0; 83 int ret; 84 85 if (drm->device.info.ram_size == 0) 86 return -ENOMEM; 87 88 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) 89 size_nc = 1 << nvbo->page_shift; 90 91 ret = ram->func->get(ram, mem->num_pages << PAGE_SHIFT, 92 mem->page_alignment << PAGE_SHIFT, size_nc, 93 (nvbo->tile_flags >> 8) & 0x3ff, &node); 94 if (ret) { 95 mem->mm_node = NULL; 96 return (ret == -ENOSPC) ? 0 : ret; 97 } 98 99 node->page_shift = nvbo->page_shift; 100 101 mem->mm_node = node; 102 mem->start = node->offset >> PAGE_SHIFT; 103 return 0; 104 } 105 106 const struct ttm_mem_type_manager_func nouveau_vram_manager = { 107 nouveau_vram_manager_init, 108 nouveau_vram_manager_fini, 109 nouveau_vram_manager_new, 110 nouveau_vram_manager_del, 111 }; 112 113 static int 114 nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 115 { 116 return 0; 117 } 118 119 static int 120 nouveau_gart_manager_fini(struct ttm_mem_type_manager *man) 121 { 122 return 0; 123 } 124 125 static void 126 nouveau_gart_manager_del(struct ttm_mem_type_manager *man, 127 struct ttm_mem_reg *mem) 128 { 129 nvkm_mem_node_cleanup(mem->mm_node); 130 kfree(mem->mm_node); 131 mem->mm_node = NULL; 132 } 133 134 static int 135 nouveau_gart_manager_new(struct ttm_mem_type_manager *man, 136 struct ttm_buffer_object *bo, 137 const struct ttm_place *place, 138 struct ttm_mem_reg *mem) 139 { 140 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 141 struct nouveau_bo *nvbo = nouveau_bo(bo); 142 struct nvkm_mem *node; 143 144 node = kzalloc(sizeof(*node), GFP_KERNEL); 145 if (!node) 146 return -ENOMEM; 147 148 node->page_shift = 12; 149 150 switch (drm->device.info.family) { 151 case NV_DEVICE_INFO_V0_TNT: 152 case NV_DEVICE_INFO_V0_CELSIUS: 153 case NV_DEVICE_INFO_V0_KELVIN: 154 case NV_DEVICE_INFO_V0_RANKINE: 155 case NV_DEVICE_INFO_V0_CURIE: 156 break; 157 case NV_DEVICE_INFO_V0_TESLA: 158 if (drm->device.info.chipset != 0x50) 159 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; 160 break; 161 case NV_DEVICE_INFO_V0_FERMI: 162 case NV_DEVICE_INFO_V0_KEPLER: 163 case NV_DEVICE_INFO_V0_MAXWELL: 164 node->memtype = (nvbo->tile_flags & 0xff00) >> 8; 165 break; 166 default: 167 NV_WARN(drm, "%s: unhandled family type %x\n", __func__, 168 drm->device.info.family); 169 break; 170 } 171 172 mem->mm_node = node; 173 mem->start = 0; 174 return 0; 175 } 176 177 static void 178 nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) 179 { 180 } 181 182 const struct ttm_mem_type_manager_func nouveau_gart_manager = { 183 nouveau_gart_manager_init, 184 nouveau_gart_manager_fini, 185 nouveau_gart_manager_new, 186 nouveau_gart_manager_del, 187 nouveau_gart_manager_debug 188 }; 189 190 /*XXX*/ 191 #include <subdev/mmu/nv04.h> 192 static int 193 nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 194 { 195 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 196 struct nvkm_mmu *mmu = nvxx_mmu(&drm->device); 197 struct nv04_mmu *priv = (void *)mmu; 198 struct nvkm_vm *vm = NULL; 199 nvkm_vm_ref(priv->vm, &vm, NULL); 200 man->priv = vm; 201 return 0; 202 } 203 204 static int 205 nv04_gart_manager_fini(struct ttm_mem_type_manager *man) 206 { 207 struct nvkm_vm *vm = man->priv; 208 nvkm_vm_ref(NULL, &vm, NULL); 209 man->priv = NULL; 210 return 0; 211 } 212 213 static void 214 nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) 215 { 216 struct nvkm_mem *node = mem->mm_node; 217 if (node->vma[0].node) 218 nvkm_vm_put(&node->vma[0]); 219 kfree(mem->mm_node); 220 mem->mm_node = NULL; 221 } 222 223 static int 224 nv04_gart_manager_new(struct ttm_mem_type_manager *man, 225 struct ttm_buffer_object *bo, 226 const struct ttm_place *place, 227 struct ttm_mem_reg *mem) 228 { 229 struct nvkm_mem *node; 230 int ret; 231 232 node = kzalloc(sizeof(*node), GFP_KERNEL); 233 if (!node) 234 return -ENOMEM; 235 236 node->page_shift = 12; 237 238 ret = nvkm_vm_get(man->priv, mem->num_pages << 12, node->page_shift, 239 NV_MEM_ACCESS_RW, &node->vma[0]); 240 if (ret) { 241 kfree(node); 242 return ret; 243 } 244 245 mem->mm_node = node; 246 mem->start = node->vma[0].offset >> PAGE_SHIFT; 247 return 0; 248 } 249 250 static void 251 nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) 252 { 253 } 254 255 const struct ttm_mem_type_manager_func nv04_gart_manager = { 256 nv04_gart_manager_init, 257 nv04_gart_manager_fini, 258 nv04_gart_manager_new, 259 nv04_gart_manager_del, 260 nv04_gart_manager_debug 261 }; 262 263 int 264 nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) 265 { 266 struct drm_file *file_priv = filp->private_data; 267 struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); 268 269 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 270 return drm_legacy_mmap(filp, vma); 271 272 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); 273 } 274 275 static int 276 nouveau_ttm_mem_global_init(struct drm_global_reference *ref) 277 { 278 return ttm_mem_global_init(ref->object); 279 } 280 281 static void 282 nouveau_ttm_mem_global_release(struct drm_global_reference *ref) 283 { 284 ttm_mem_global_release(ref->object); 285 } 286 287 int 288 nouveau_ttm_global_init(struct nouveau_drm *drm) 289 { 290 struct drm_global_reference *global_ref; 291 int ret; 292 293 global_ref = &drm->ttm.mem_global_ref; 294 global_ref->global_type = DRM_GLOBAL_TTM_MEM; 295 global_ref->size = sizeof(struct ttm_mem_global); 296 global_ref->init = &nouveau_ttm_mem_global_init; 297 global_ref->release = &nouveau_ttm_mem_global_release; 298 299 ret = drm_global_item_ref(global_ref); 300 if (unlikely(ret != 0)) { 301 DRM_ERROR("Failed setting up TTM memory accounting\n"); 302 drm->ttm.mem_global_ref.release = NULL; 303 return ret; 304 } 305 306 drm->ttm.bo_global_ref.mem_glob = global_ref->object; 307 global_ref = &drm->ttm.bo_global_ref.ref; 308 global_ref->global_type = DRM_GLOBAL_TTM_BO; 309 global_ref->size = sizeof(struct ttm_bo_global); 310 global_ref->init = &ttm_bo_global_init; 311 global_ref->release = &ttm_bo_global_release; 312 313 ret = drm_global_item_ref(global_ref); 314 if (unlikely(ret != 0)) { 315 DRM_ERROR("Failed setting up TTM BO subsystem\n"); 316 drm_global_item_unref(&drm->ttm.mem_global_ref); 317 drm->ttm.mem_global_ref.release = NULL; 318 return ret; 319 } 320 321 return 0; 322 } 323 324 void 325 nouveau_ttm_global_release(struct nouveau_drm *drm) 326 { 327 if (drm->ttm.mem_global_ref.release == NULL) 328 return; 329 330 drm_global_item_unref(&drm->ttm.bo_global_ref.ref); 331 drm_global_item_unref(&drm->ttm.mem_global_ref); 332 drm->ttm.mem_global_ref.release = NULL; 333 } 334 335 int 336 nouveau_ttm_init(struct nouveau_drm *drm) 337 { 338 struct nvkm_device *device = nvxx_device(&drm->device); 339 struct nvkm_pci *pci = device->pci; 340 struct drm_device *dev = drm->dev; 341 u32 bits; 342 int ret; 343 344 if (pci && pci->agp.bridge) { 345 drm->agp.bridge = pci->agp.bridge; 346 drm->agp.base = pci->agp.base; 347 drm->agp.size = pci->agp.size; 348 drm->agp.cma = pci->agp.cma; 349 } 350 351 bits = nvxx_mmu(&drm->device)->dma_bits; 352 if (nvxx_device(&drm->device)->func->pci) { 353 if (drm->agp.bridge || 354 !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits))) 355 bits = 32; 356 357 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits)); 358 if (ret) 359 return ret; 360 361 ret = pci_set_consistent_dma_mask(dev->pdev, 362 DMA_BIT_MASK(bits)); 363 if (ret) 364 pci_set_consistent_dma_mask(dev->pdev, 365 DMA_BIT_MASK(32)); 366 } 367 368 ret = nouveau_ttm_global_init(drm); 369 if (ret) 370 return ret; 371 372 ret = ttm_bo_device_init(&drm->ttm.bdev, 373 drm->ttm.bo_global_ref.ref.object, 374 &nouveau_bo_driver, 375 dev->anon_inode->i_mapping, 376 DRM_FILE_PAGE_OFFSET, 377 bits <= 32 ? true : false); 378 if (ret) { 379 NV_ERROR(drm, "error initialising bo driver, %d\n", ret); 380 return ret; 381 } 382 383 /* VRAM init */ 384 drm->gem.vram_available = drm->device.info.ram_user; 385 386 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM, 387 drm->gem.vram_available >> PAGE_SHIFT); 388 if (ret) { 389 NV_ERROR(drm, "VRAM mm init failed, %d\n", ret); 390 return ret; 391 } 392 393 drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1), 394 device->func->resource_size(device, 1)); 395 396 /* GART init */ 397 if (!drm->agp.bridge) { 398 drm->gem.gart_available = nvxx_mmu(&drm->device)->limit; 399 } else { 400 drm->gem.gart_available = drm->agp.size; 401 } 402 403 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT, 404 drm->gem.gart_available >> PAGE_SHIFT); 405 if (ret) { 406 NV_ERROR(drm, "GART mm init failed, %d\n", ret); 407 return ret; 408 } 409 410 NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20)); 411 NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20)); 412 return 0; 413 } 414 415 void 416 nouveau_ttm_fini(struct nouveau_drm *drm) 417 { 418 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM); 419 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT); 420 421 ttm_bo_device_release(&drm->ttm.bdev); 422 423 nouveau_ttm_global_release(drm); 424 425 arch_phys_wc_del(drm->ttm.mtrr); 426 drm->ttm.mtrr = 0; 427 } 428