1 /* 2 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA, 3 * All Rights Reserved. 4 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA, 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the "Software"), 9 * to deal in the Software without restriction, including without limitation 10 * the rights to use, copy, modify, merge, publish, distribute, sub license, 11 * and/or sell copies of the Software, and to permit persons to whom the 12 * Software is furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 */ 26 27 #include "nouveau_drm.h" 28 #include "nouveau_ttm.h" 29 #include "nouveau_gem.h" 30 31 #include "drm_legacy.h" 32 static int 33 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 34 { 35 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 36 struct nvkm_fb *pfb = nvxx_fb(&drm->device); 37 man->priv = pfb; 38 return 0; 39 } 40 41 static int 42 nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) 43 { 44 man->priv = NULL; 45 return 0; 46 } 47 48 static inline void 49 nvkm_mem_node_cleanup(struct nvkm_mem *node) 50 { 51 if (node->vma[0].node) { 52 nvkm_vm_unmap(&node->vma[0]); 53 nvkm_vm_put(&node->vma[0]); 54 } 55 56 if (node->vma[1].node) { 57 nvkm_vm_unmap(&node->vma[1]); 58 nvkm_vm_put(&node->vma[1]); 59 } 60 } 61 62 static void 63 nouveau_vram_manager_del(struct ttm_mem_type_manager *man, 64 struct ttm_mem_reg *mem) 65 { 66 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 67 struct nvkm_fb *pfb = nvxx_fb(&drm->device); 68 nvkm_mem_node_cleanup(mem->mm_node); 69 pfb->ram->put(pfb, (struct nvkm_mem **)&mem->mm_node); 70 } 71 72 static int 73 nouveau_vram_manager_new(struct ttm_mem_type_manager *man, 74 struct ttm_buffer_object *bo, 75 const struct ttm_place *place, 76 struct ttm_mem_reg *mem) 77 { 78 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 79 struct nvkm_fb *pfb = nvxx_fb(&drm->device); 80 struct nouveau_bo *nvbo = nouveau_bo(bo); 81 struct nvkm_mem *node; 82 u32 size_nc = 0; 83 int ret; 84 85 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) 86 size_nc = 1 << nvbo->page_shift; 87 88 ret = pfb->ram->get(pfb, mem->num_pages << PAGE_SHIFT, 89 mem->page_alignment << PAGE_SHIFT, size_nc, 90 (nvbo->tile_flags >> 8) & 0x3ff, &node); 91 if (ret) { 92 mem->mm_node = NULL; 93 return (ret == -ENOSPC) ? 0 : ret; 94 } 95 96 node->page_shift = nvbo->page_shift; 97 98 mem->mm_node = node; 99 mem->start = node->offset >> PAGE_SHIFT; 100 return 0; 101 } 102 103 static void 104 nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) 105 { 106 struct nvkm_fb *pfb = man->priv; 107 struct nvkm_mm *mm = &pfb->vram; 108 struct nvkm_mm_node *r; 109 u32 total = 0, free = 0; 110 111 mutex_lock(&nv_subdev(pfb)->mutex); 112 list_for_each_entry(r, &mm->nodes, nl_entry) { 113 printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n", 114 prefix, r->type, ((u64)r->offset << 12), 115 (((u64)r->offset + r->length) << 12)); 116 117 total += r->length; 118 if (!r->type) 119 free += r->length; 120 } 121 mutex_unlock(&nv_subdev(pfb)->mutex); 122 123 printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n", 124 prefix, (u64)total << 12, (u64)free << 12); 125 printk(KERN_DEBUG "%s block: 0x%08x\n", 126 prefix, mm->block_size << 12); 127 } 128 129 const struct ttm_mem_type_manager_func nouveau_vram_manager = { 130 nouveau_vram_manager_init, 131 nouveau_vram_manager_fini, 132 nouveau_vram_manager_new, 133 nouveau_vram_manager_del, 134 nouveau_vram_manager_debug 135 }; 136 137 static int 138 nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 139 { 140 return 0; 141 } 142 143 static int 144 nouveau_gart_manager_fini(struct ttm_mem_type_manager *man) 145 { 146 return 0; 147 } 148 149 static void 150 nouveau_gart_manager_del(struct ttm_mem_type_manager *man, 151 struct ttm_mem_reg *mem) 152 { 153 nvkm_mem_node_cleanup(mem->mm_node); 154 kfree(mem->mm_node); 155 mem->mm_node = NULL; 156 } 157 158 static int 159 nouveau_gart_manager_new(struct ttm_mem_type_manager *man, 160 struct ttm_buffer_object *bo, 161 const struct ttm_place *place, 162 struct ttm_mem_reg *mem) 163 { 164 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 165 struct nouveau_bo *nvbo = nouveau_bo(bo); 166 struct nvkm_mem *node; 167 168 node = kzalloc(sizeof(*node), GFP_KERNEL); 169 if (!node) 170 return -ENOMEM; 171 172 node->page_shift = 12; 173 174 switch (drm->device.info.family) { 175 case NV_DEVICE_INFO_V0_TESLA: 176 if (drm->device.info.chipset != 0x50) 177 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; 178 break; 179 case NV_DEVICE_INFO_V0_FERMI: 180 case NV_DEVICE_INFO_V0_KEPLER: 181 node->memtype = (nvbo->tile_flags & 0xff00) >> 8; 182 break; 183 default: 184 break; 185 } 186 187 mem->mm_node = node; 188 mem->start = 0; 189 return 0; 190 } 191 192 static void 193 nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) 194 { 195 } 196 197 const struct ttm_mem_type_manager_func nouveau_gart_manager = { 198 nouveau_gart_manager_init, 199 nouveau_gart_manager_fini, 200 nouveau_gart_manager_new, 201 nouveau_gart_manager_del, 202 nouveau_gart_manager_debug 203 }; 204 205 /*XXX*/ 206 #include <subdev/mmu/nv04.h> 207 static int 208 nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 209 { 210 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 211 struct nvkm_mmu *mmu = nvxx_mmu(&drm->device); 212 struct nv04_mmu_priv *priv = (void *)mmu; 213 struct nvkm_vm *vm = NULL; 214 nvkm_vm_ref(priv->vm, &vm, NULL); 215 man->priv = vm; 216 return 0; 217 } 218 219 static int 220 nv04_gart_manager_fini(struct ttm_mem_type_manager *man) 221 { 222 struct nvkm_vm *vm = man->priv; 223 nvkm_vm_ref(NULL, &vm, NULL); 224 man->priv = NULL; 225 return 0; 226 } 227 228 static void 229 nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) 230 { 231 struct nvkm_mem *node = mem->mm_node; 232 if (node->vma[0].node) 233 nvkm_vm_put(&node->vma[0]); 234 kfree(mem->mm_node); 235 mem->mm_node = NULL; 236 } 237 238 static int 239 nv04_gart_manager_new(struct ttm_mem_type_manager *man, 240 struct ttm_buffer_object *bo, 241 const struct ttm_place *place, 242 struct ttm_mem_reg *mem) 243 { 244 struct nvkm_mem *node; 245 int ret; 246 247 node = kzalloc(sizeof(*node), GFP_KERNEL); 248 if (!node) 249 return -ENOMEM; 250 251 node->page_shift = 12; 252 253 ret = nvkm_vm_get(man->priv, mem->num_pages << 12, node->page_shift, 254 NV_MEM_ACCESS_RW, &node->vma[0]); 255 if (ret) { 256 kfree(node); 257 return ret; 258 } 259 260 mem->mm_node = node; 261 mem->start = node->vma[0].offset >> PAGE_SHIFT; 262 return 0; 263 } 264 265 static void 266 nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) 267 { 268 } 269 270 const struct ttm_mem_type_manager_func nv04_gart_manager = { 271 nv04_gart_manager_init, 272 nv04_gart_manager_fini, 273 nv04_gart_manager_new, 274 nv04_gart_manager_del, 275 nv04_gart_manager_debug 276 }; 277 278 int 279 nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) 280 { 281 struct drm_file *file_priv = filp->private_data; 282 struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); 283 284 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 285 return drm_legacy_mmap(filp, vma); 286 287 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); 288 } 289 290 static int 291 nouveau_ttm_mem_global_init(struct drm_global_reference *ref) 292 { 293 return ttm_mem_global_init(ref->object); 294 } 295 296 static void 297 nouveau_ttm_mem_global_release(struct drm_global_reference *ref) 298 { 299 ttm_mem_global_release(ref->object); 300 } 301 302 int 303 nouveau_ttm_global_init(struct nouveau_drm *drm) 304 { 305 struct drm_global_reference *global_ref; 306 int ret; 307 308 global_ref = &drm->ttm.mem_global_ref; 309 global_ref->global_type = DRM_GLOBAL_TTM_MEM; 310 global_ref->size = sizeof(struct ttm_mem_global); 311 global_ref->init = &nouveau_ttm_mem_global_init; 312 global_ref->release = &nouveau_ttm_mem_global_release; 313 314 ret = drm_global_item_ref(global_ref); 315 if (unlikely(ret != 0)) { 316 DRM_ERROR("Failed setting up TTM memory accounting\n"); 317 drm->ttm.mem_global_ref.release = NULL; 318 return ret; 319 } 320 321 drm->ttm.bo_global_ref.mem_glob = global_ref->object; 322 global_ref = &drm->ttm.bo_global_ref.ref; 323 global_ref->global_type = DRM_GLOBAL_TTM_BO; 324 global_ref->size = sizeof(struct ttm_bo_global); 325 global_ref->init = &ttm_bo_global_init; 326 global_ref->release = &ttm_bo_global_release; 327 328 ret = drm_global_item_ref(global_ref); 329 if (unlikely(ret != 0)) { 330 DRM_ERROR("Failed setting up TTM BO subsystem\n"); 331 drm_global_item_unref(&drm->ttm.mem_global_ref); 332 drm->ttm.mem_global_ref.release = NULL; 333 return ret; 334 } 335 336 return 0; 337 } 338 339 void 340 nouveau_ttm_global_release(struct nouveau_drm *drm) 341 { 342 if (drm->ttm.mem_global_ref.release == NULL) 343 return; 344 345 drm_global_item_unref(&drm->ttm.bo_global_ref.ref); 346 drm_global_item_unref(&drm->ttm.mem_global_ref); 347 drm->ttm.mem_global_ref.release = NULL; 348 } 349 350 int 351 nouveau_ttm_init(struct nouveau_drm *drm) 352 { 353 struct drm_device *dev = drm->dev; 354 u32 bits; 355 int ret; 356 357 bits = nvxx_mmu(&drm->device)->dma_bits; 358 if (nv_device_is_pci(nvxx_device(&drm->device))) { 359 if (drm->agp.stat == ENABLED || 360 !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits))) 361 bits = 32; 362 363 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits)); 364 if (ret) 365 return ret; 366 367 ret = pci_set_consistent_dma_mask(dev->pdev, 368 DMA_BIT_MASK(bits)); 369 if (ret) 370 pci_set_consistent_dma_mask(dev->pdev, 371 DMA_BIT_MASK(32)); 372 } 373 374 ret = nouveau_ttm_global_init(drm); 375 if (ret) 376 return ret; 377 378 ret = ttm_bo_device_init(&drm->ttm.bdev, 379 drm->ttm.bo_global_ref.ref.object, 380 &nouveau_bo_driver, 381 dev->anon_inode->i_mapping, 382 DRM_FILE_PAGE_OFFSET, 383 bits <= 32 ? true : false); 384 if (ret) { 385 NV_ERROR(drm, "error initialising bo driver, %d\n", ret); 386 return ret; 387 } 388 389 /* VRAM init */ 390 drm->gem.vram_available = drm->device.info.ram_user; 391 392 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM, 393 drm->gem.vram_available >> PAGE_SHIFT); 394 if (ret) { 395 NV_ERROR(drm, "VRAM mm init failed, %d\n", ret); 396 return ret; 397 } 398 399 drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(nvxx_device(&drm->device), 1), 400 nv_device_resource_len(nvxx_device(&drm->device), 1)); 401 402 /* GART init */ 403 if (drm->agp.stat != ENABLED) { 404 drm->gem.gart_available = nvxx_mmu(&drm->device)->limit; 405 } else { 406 drm->gem.gart_available = drm->agp.size; 407 } 408 409 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT, 410 drm->gem.gart_available >> PAGE_SHIFT); 411 if (ret) { 412 NV_ERROR(drm, "GART mm init failed, %d\n", ret); 413 return ret; 414 } 415 416 NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20)); 417 NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20)); 418 return 0; 419 } 420 421 void 422 nouveau_ttm_fini(struct nouveau_drm *drm) 423 { 424 mutex_lock(&drm->dev->struct_mutex); 425 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM); 426 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT); 427 mutex_unlock(&drm->dev->struct_mutex); 428 429 ttm_bo_device_release(&drm->ttm.bdev); 430 431 nouveau_ttm_global_release(drm); 432 433 arch_phys_wc_del(drm->ttm.mtrr); 434 drm->ttm.mtrr = 0; 435 } 436