Lines Matching +full:mem +full:- +full:base
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
39 ttm_bo_mem_put(bo, &bo->mem); in ttm_bo_free_old_node()
46 struct ttm_tt *ttm = bo->ttm; in ttm_bo_move_ttm()
47 struct ttm_mem_reg *old_mem = &bo->mem; in ttm_bo_move_ttm()
50 if (old_mem->mem_type != TTM_PL_SYSTEM) { in ttm_bo_move_ttm()
53 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, in ttm_bo_move_ttm()
55 old_mem->mem_type = TTM_PL_SYSTEM; in ttm_bo_move_ttm()
58 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); in ttm_bo_move_ttm()
62 if (new_mem->mem_type != TTM_PL_SYSTEM) { in ttm_bo_move_ttm()
69 new_mem->mm_node = NULL; in ttm_bo_move_ttm()
76 if (likely(man->io_reserve_fastpath)) in ttm_mem_io_lock()
80 if (sx_xlock_sig(&man->io_reserve_mutex)) in ttm_mem_io_lock()
81 return (-EINTR); in ttm_mem_io_lock()
86 sx_xlock(&man->io_reserve_mutex); in ttm_mem_io_lock()
92 if (likely(man->io_reserve_fastpath)) in ttm_mem_io_unlock()
95 sx_xunlock(&man->io_reserve_mutex); in ttm_mem_io_unlock()
102 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) in ttm_mem_io_evict()
103 return -EAGAIN; in ttm_mem_io_evict()
105 bo = list_first_entry(&man->io_reserve_lru, in ttm_mem_io_evict()
108 list_del_init(&bo->io_reserve_lru); in ttm_mem_io_evict()
115 struct ttm_mem_reg *mem) in ttm_mem_io_reserve() argument
117 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; in ttm_mem_io_reserve()
120 if (!bdev->driver->io_mem_reserve) in ttm_mem_io_reserve()
122 if (likely(man->io_reserve_fastpath)) in ttm_mem_io_reserve()
123 return bdev->driver->io_mem_reserve(bdev, mem); in ttm_mem_io_reserve()
125 if (bdev->driver->io_mem_reserve && in ttm_mem_io_reserve()
126 mem->bus.io_reserved_count++ == 0) { in ttm_mem_io_reserve()
128 ret = bdev->driver->io_mem_reserve(bdev, mem); in ttm_mem_io_reserve()
129 if (ret == -EAGAIN) { in ttm_mem_io_reserve()
139 struct ttm_mem_reg *mem) in ttm_mem_io_free() argument
141 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; in ttm_mem_io_free()
143 if (likely(man->io_reserve_fastpath)) in ttm_mem_io_free()
146 if (bdev->driver->io_mem_reserve && in ttm_mem_io_free()
147 --mem->bus.io_reserved_count == 0 && in ttm_mem_io_free()
148 bdev->driver->io_mem_free) in ttm_mem_io_free()
149 bdev->driver->io_mem_free(bdev, mem); in ttm_mem_io_free()
155 struct ttm_mem_reg *mem = &bo->mem; in ttm_mem_io_reserve_vm() local
158 if (!mem->bus.io_reserved_vm) { in ttm_mem_io_reserve_vm()
160 &bo->bdev->man[mem->mem_type]; in ttm_mem_io_reserve_vm()
162 ret = ttm_mem_io_reserve(bo->bdev, mem); in ttm_mem_io_reserve_vm()
165 mem->bus.io_reserved_vm = true; in ttm_mem_io_reserve_vm()
166 if (man->use_io_reserve_lru) in ttm_mem_io_reserve_vm()
167 list_add_tail(&bo->io_reserve_lru, in ttm_mem_io_reserve_vm()
168 &man->io_reserve_lru); in ttm_mem_io_reserve_vm()
175 struct ttm_mem_reg *mem = &bo->mem; in ttm_mem_io_free_vm() local
177 if (mem->bus.io_reserved_vm) { in ttm_mem_io_free_vm()
178 mem->bus.io_reserved_vm = false; in ttm_mem_io_free_vm()
179 list_del_init(&bo->io_reserve_lru); in ttm_mem_io_free_vm()
180 ttm_mem_io_free(bo->bdev, mem); in ttm_mem_io_free_vm()
185 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, in ttm_mem_reg_ioremap() argument
188 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; in ttm_mem_reg_ioremap()
194 ret = ttm_mem_io_reserve(bdev, mem); in ttm_mem_reg_ioremap()
196 if (ret || !mem->bus.is_iomem) in ttm_mem_reg_ioremap()
199 if (mem->bus.addr) { in ttm_mem_reg_ioremap()
200 addr = mem->bus.addr; in ttm_mem_reg_ioremap()
202 addr = pmap_mapdev_attr(mem->bus.base + mem->bus.offset, in ttm_mem_reg_ioremap()
203 mem->bus.size, (mem->placement & TTM_PL_FLAG_WC) ? in ttm_mem_reg_ioremap()
207 ttm_mem_io_free(bdev, mem); in ttm_mem_reg_ioremap()
209 return -ENOMEM; in ttm_mem_reg_ioremap()
217 void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, in ttm_mem_reg_iounmap() argument
222 man = &bdev->man[mem->mem_type]; in ttm_mem_reg_iounmap()
224 if (virtual && mem->bus.addr == NULL) in ttm_mem_reg_iounmap()
225 pmap_unmapdev(virtual, mem->bus.size); in ttm_mem_reg_iounmap()
227 ttm_mem_io_free(bdev, mem); in ttm_mem_reg_iounmap()
249 vm_page_t d = ttm->pages[page]; in ttm_copy_io_ttm_page()
253 return -ENOMEM; in ttm_copy_io_ttm_page()
260 return -ENOMEM; in ttm_copy_io_ttm_page()
273 vm_page_t s = ttm->pages[page]; in ttm_copy_ttm_io_page()
277 return -ENOMEM; in ttm_copy_ttm_io_page()
282 return -ENOMEM; in ttm_copy_ttm_io_page()
295 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_move_memcpy()
296 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; in ttm_bo_move_memcpy()
297 struct ttm_tt *ttm = bo->ttm; in ttm_bo_move_memcpy()
298 struct ttm_mem_reg *old_mem = &bo->mem; in ttm_bo_move_memcpy()
320 if (ttm->state == tt_unpopulated) { in ttm_bo_move_memcpy()
321 ret = ttm->bdev->driver->ttm_tt_populate(ttm); in ttm_bo_move_memcpy()
333 if ((old_mem->mem_type == new_mem->mem_type) && in ttm_bo_move_memcpy()
334 (new_mem->start < old_mem->start + old_mem->size)) { in ttm_bo_move_memcpy()
335 dir = -1; in ttm_bo_move_memcpy()
336 add = new_mem->num_pages - 1; in ttm_bo_move_memcpy()
339 for (i = 0; i < new_mem->num_pages; ++i) { in ttm_bo_move_memcpy()
342 vm_memattr_t prot = ttm_io_prot(old_mem->placement); in ttm_bo_move_memcpy()
346 vm_memattr_t prot = ttm_io_prot(new_mem->placement); in ttm_bo_move_memcpy()
352 /* failing here, means keep old copy as-is */ in ttm_bo_move_memcpy()
361 new_mem->mm_node = NULL; in ttm_bo_move_memcpy()
363 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { in ttm_bo_move_memcpy()
366 bo->ttm = NULL; in ttm_bo_move_memcpy()
404 struct ttm_bo_device *bdev = bo->bdev; in ttm_buffer_object_transfer()
405 struct ttm_bo_driver *driver = bdev->driver; in ttm_buffer_object_transfer()
415 INIT_LIST_HEAD(&fbo->ddestroy); in ttm_buffer_object_transfer()
416 INIT_LIST_HEAD(&fbo->lru); in ttm_buffer_object_transfer()
417 INIT_LIST_HEAD(&fbo->swap); in ttm_buffer_object_transfer()
418 INIT_LIST_HEAD(&fbo->io_reserve_lru); in ttm_buffer_object_transfer()
419 fbo->vm_node = NULL; in ttm_buffer_object_transfer()
420 atomic_set(&fbo->cpu_writers, 0); in ttm_buffer_object_transfer()
422 mtx_lock(&bdev->fence_lock); in ttm_buffer_object_transfer()
423 if (bo->sync_obj) in ttm_buffer_object_transfer()
424 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); in ttm_buffer_object_transfer()
426 fbo->sync_obj = NULL; in ttm_buffer_object_transfer()
427 mtx_unlock(&bdev->fence_lock); in ttm_buffer_object_transfer()
428 refcount_init(&fbo->list_kref, 1); in ttm_buffer_object_transfer()
429 refcount_init(&fbo->kref, 1); in ttm_buffer_object_transfer()
430 fbo->destroy = &ttm_transfered_destroy; in ttm_buffer_object_transfer()
431 fbo->acc_size = 0; in ttm_buffer_object_transfer()
460 struct ttm_mem_reg *mem = &bo->mem; in ttm_bo_ioremap() local
462 if (bo->mem.bus.addr) { in ttm_bo_ioremap()
463 map->bo_kmap_type = ttm_bo_map_premapped; in ttm_bo_ioremap()
464 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); in ttm_bo_ioremap()
466 map->bo_kmap_type = ttm_bo_map_iomap; in ttm_bo_ioremap()
467 map->virtual = pmap_mapdev_attr(bo->mem.bus.base + in ttm_bo_ioremap()
468 bo->mem.bus.offset + offset, size, in ttm_bo_ioremap()
469 (mem->placement & TTM_PL_FLAG_WC) ? in ttm_bo_ioremap()
471 map->size = size; in ttm_bo_ioremap()
473 return (!map->virtual) ? -ENOMEM : 0; in ttm_bo_ioremap()
481 struct ttm_mem_reg *mem = &bo->mem; in ttm_bo_kmap_ttm() local
483 struct ttm_tt *ttm = bo->ttm; in ttm_bo_kmap_ttm()
488 if (ttm->state == tt_unpopulated) { in ttm_bo_kmap_ttm()
489 ret = ttm->bdev->driver->ttm_tt_populate(ttm); in ttm_bo_kmap_ttm()
494 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { in ttm_bo_kmap_ttm()
500 map->bo_kmap_type = ttm_bo_map_kmap; in ttm_bo_kmap_ttm()
501 map->page = ttm->pages[start_page]; in ttm_bo_kmap_ttm()
502 map->sf = sf_buf_alloc(map->page, 0); in ttm_bo_kmap_ttm()
503 map->virtual = (void *)sf_buf_kva(map->sf); in ttm_bo_kmap_ttm()
509 prot = (mem->placement & TTM_PL_FLAG_CACHED) ? in ttm_bo_kmap_ttm()
510 VM_MEMATTR_DEFAULT : ttm_io_prot(mem->placement); in ttm_bo_kmap_ttm()
511 map->bo_kmap_type = ttm_bo_map_vmap; in ttm_bo_kmap_ttm()
512 map->num_pages = num_pages; in ttm_bo_kmap_ttm()
513 map->virtual = (void *)kva_alloc(num_pages * PAGE_SIZE); in ttm_bo_kmap_ttm()
514 if (map->virtual != NULL) { in ttm_bo_kmap_ttm()
517 pmap_page_set_memattr(ttm->pages[start_page + in ttm_bo_kmap_ttm()
520 pmap_qenter((vm_offset_t)map->virtual, in ttm_bo_kmap_ttm()
521 &ttm->pages[start_page], num_pages); in ttm_bo_kmap_ttm()
524 return (!map->virtual) ? -ENOMEM : 0; in ttm_bo_kmap_ttm()
532 &bo->bdev->man[bo->mem.mem_type]; in ttm_bo_kmap()
536 MPASS(list_empty(&bo->swap)); in ttm_bo_kmap()
537 map->virtual = NULL; in ttm_bo_kmap()
538 map->bo = bo; in ttm_bo_kmap()
539 if (num_pages > bo->num_pages) in ttm_bo_kmap()
540 return -EINVAL; in ttm_bo_kmap()
541 if (start_page > bo->num_pages) in ttm_bo_kmap()
542 return -EINVAL; in ttm_bo_kmap()
545 return -EPERM; in ttm_bo_kmap()
548 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); in ttm_bo_kmap()
552 if (!bo->mem.bus.is_iomem) { in ttm_bo_kmap()
563 struct ttm_buffer_object *bo = map->bo; in ttm_bo_kunmap()
565 &bo->bdev->man[bo->mem.mem_type]; in ttm_bo_kunmap()
567 if (!map->virtual) in ttm_bo_kunmap()
569 switch (map->bo_kmap_type) { in ttm_bo_kunmap()
571 pmap_unmapdev(map->virtual, map->size); in ttm_bo_kunmap()
574 pmap_qremove((vm_offset_t)(map->virtual), map->num_pages); in ttm_bo_kunmap()
575 kva_free((vm_offset_t)map->virtual, in ttm_bo_kunmap()
576 map->num_pages * PAGE_SIZE); in ttm_bo_kunmap()
579 sf_buf_free(map->sf); in ttm_bo_kunmap()
587 ttm_mem_io_free(map->bo->bdev, &map->bo->mem); in ttm_bo_kunmap()
589 map->virtual = NULL; in ttm_bo_kunmap()
590 map->page = NULL; in ttm_bo_kunmap()
591 map->sf = NULL; in ttm_bo_kunmap()
600 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_move_accel_cleanup()
601 struct ttm_bo_driver *driver = bdev->driver; in ttm_bo_move_accel_cleanup()
602 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; in ttm_bo_move_accel_cleanup()
603 struct ttm_mem_reg *old_mem = &bo->mem; in ttm_bo_move_accel_cleanup()
608 mtx_lock(&bdev->fence_lock); in ttm_bo_move_accel_cleanup()
609 if (bo->sync_obj) { in ttm_bo_move_accel_cleanup()
610 tmp_obj = bo->sync_obj; in ttm_bo_move_accel_cleanup()
611 bo->sync_obj = NULL; in ttm_bo_move_accel_cleanup()
613 bo->sync_obj = driver->sync_obj_ref(sync_obj); in ttm_bo_move_accel_cleanup()
616 mtx_unlock(&bdev->fence_lock); in ttm_bo_move_accel_cleanup()
618 driver->sync_obj_unref(&tmp_obj); in ttm_bo_move_accel_cleanup()
622 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && in ttm_bo_move_accel_cleanup()
623 (bo->ttm != NULL)) { in ttm_bo_move_accel_cleanup()
624 ttm_tt_unbind(bo->ttm); in ttm_bo_move_accel_cleanup()
625 ttm_tt_destroy(bo->ttm); in ttm_bo_move_accel_cleanup()
626 bo->ttm = NULL; in ttm_bo_move_accel_cleanup()
638 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); in ttm_bo_move_accel_cleanup()
639 mtx_unlock(&bdev->fence_lock); in ttm_bo_move_accel_cleanup()
641 driver->sync_obj_unref(&tmp_obj); in ttm_bo_move_accel_cleanup()
653 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) in ttm_bo_move_accel_cleanup()
654 ghost_obj->ttm = NULL; in ttm_bo_move_accel_cleanup()
656 bo->ttm = NULL; in ttm_bo_move_accel_cleanup()
663 new_mem->mm_node = NULL; in ttm_bo_move_accel_cleanup()