1592ffb21SWarner Losh /************************************************************************** 2592ffb21SWarner Losh * 3592ffb21SWarner Losh * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4592ffb21SWarner Losh * All Rights Reserved. 5592ffb21SWarner Losh * 6592ffb21SWarner Losh * Permission is hereby granted, free of charge, to any person obtaining a 7592ffb21SWarner Losh * copy of this software and associated documentation files (the 8592ffb21SWarner Losh * "Software"), to deal in the Software without restriction, including 9592ffb21SWarner Losh * without limitation the rights to use, copy, modify, merge, publish, 10592ffb21SWarner Losh * distribute, sub license, and/or sell copies of the Software, and to 11592ffb21SWarner Losh * permit persons to whom the Software is furnished to do so, subject to 12592ffb21SWarner Losh * the following conditions: 13592ffb21SWarner Losh * 14592ffb21SWarner Losh * The above copyright notice and this permission notice (including the 15592ffb21SWarner Losh * next paragraph) shall be included in all copies or substantial portions 16592ffb21SWarner Losh * of the Software. 17592ffb21SWarner Losh * 18592ffb21SWarner Losh * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19592ffb21SWarner Losh * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20592ffb21SWarner Losh * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21592ffb21SWarner Losh * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22592ffb21SWarner Losh * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23592ffb21SWarner Losh * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24592ffb21SWarner Losh * USE OR OTHER DEALINGS IN THE SOFTWARE. 25592ffb21SWarner Losh * 26592ffb21SWarner Losh **************************************************************************/ 27592ffb21SWarner Losh /* 28592ffb21SWarner Losh * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29592ffb21SWarner Losh */ 30592ffb21SWarner Losh /* 31592ffb21SWarner Losh * Copyright (c) 2013 The FreeBSD Foundation 32592ffb21SWarner Losh * All rights reserved. 33592ffb21SWarner Losh * 34592ffb21SWarner Losh * Portions of this software were developed by Konstantin Belousov 35592ffb21SWarner Losh * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation. 36592ffb21SWarner Losh */ 37592ffb21SWarner Losh 38592ffb21SWarner Losh #include <sys/cdefs.h> 39592ffb21SWarner Losh __FBSDID("$FreeBSD$"); 40592ffb21SWarner Losh 41592ffb21SWarner Losh #include "opt_vm.h" 42592ffb21SWarner Losh 43592ffb21SWarner Losh #include <dev/drm2/drmP.h> 44592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_module.h> 45592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_bo_driver.h> 46592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_placement.h> 47592ffb21SWarner Losh 48592ffb21SWarner Losh #include <vm/vm.h> 49592ffb21SWarner Losh #include <vm/vm_page.h> 50592ffb21SWarner Losh #include <vm/vm_pageout.h> 51592ffb21SWarner Losh 52592ffb21SWarner Losh #define TTM_BO_VM_NUM_PREFAULT 16 53592ffb21SWarner Losh 54592ffb21SWarner Losh RB_GENERATE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb, 55592ffb21SWarner Losh ttm_bo_cmp_rb_tree_items); 56592ffb21SWarner Losh 57592ffb21SWarner Losh int 58592ffb21SWarner Losh ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a, 59592ffb21SWarner Losh struct ttm_buffer_object *b) 60592ffb21SWarner Losh { 61592ffb21SWarner Losh 62592ffb21SWarner Losh if (a->vm_node->start < b->vm_node->start) { 63592ffb21SWarner Losh return (-1); 64592ffb21SWarner Losh } else if (a->vm_node->start > b->vm_node->start) { 65592ffb21SWarner Losh return (1); 66592ffb21SWarner Losh } else { 67592ffb21SWarner Losh return (0); 68592ffb21SWarner Losh } 69592ffb21SWarner Losh } 70592ffb21SWarner Losh 71592ffb21SWarner Losh static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev, 72592ffb21SWarner Losh unsigned long page_start, 73592ffb21SWarner Losh unsigned long num_pages) 74592ffb21SWarner Losh { 75592ffb21SWarner Losh unsigned long cur_offset; 76592ffb21SWarner Losh struct ttm_buffer_object *bo; 77592ffb21SWarner Losh struct ttm_buffer_object *best_bo = NULL; 78592ffb21SWarner Losh 79592ffb21SWarner Losh bo = RB_ROOT(&bdev->addr_space_rb); 80592ffb21SWarner Losh while (bo != NULL) { 81592ffb21SWarner Losh cur_offset = bo->vm_node->start; 82592ffb21SWarner Losh if (page_start >= cur_offset) { 83592ffb21SWarner Losh best_bo = bo; 84592ffb21SWarner Losh if (page_start == cur_offset) 85592ffb21SWarner Losh break; 86592ffb21SWarner Losh bo = RB_RIGHT(bo, vm_rb); 87592ffb21SWarner Losh } else 88592ffb21SWarner Losh bo = RB_LEFT(bo, vm_rb); 89592ffb21SWarner Losh } 90592ffb21SWarner Losh 91592ffb21SWarner Losh if (unlikely(best_bo == NULL)) 92592ffb21SWarner Losh return NULL; 93592ffb21SWarner Losh 94592ffb21SWarner Losh if (unlikely((best_bo->vm_node->start + best_bo->num_pages) < 95592ffb21SWarner Losh (page_start + num_pages))) 96592ffb21SWarner Losh return NULL; 97592ffb21SWarner Losh 98592ffb21SWarner Losh return best_bo; 99592ffb21SWarner Losh } 100592ffb21SWarner Losh 101592ffb21SWarner Losh static int 102592ffb21SWarner Losh ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset, 103592ffb21SWarner Losh int prot, vm_page_t *mres) 104592ffb21SWarner Losh { 105592ffb21SWarner Losh 106592ffb21SWarner Losh struct ttm_buffer_object *bo = vm_obj->handle; 107592ffb21SWarner Losh struct ttm_bo_device *bdev = bo->bdev; 108592ffb21SWarner Losh struct ttm_tt *ttm = NULL; 109592ffb21SWarner Losh vm_page_t m, m1; 110592ffb21SWarner Losh int ret; 111592ffb21SWarner Losh int retval = VM_PAGER_OK; 112592ffb21SWarner Losh struct ttm_mem_type_manager *man = 113592ffb21SWarner Losh &bdev->man[bo->mem.mem_type]; 114592ffb21SWarner Losh 115592ffb21SWarner Losh vm_object_pip_add(vm_obj, 1); 116592ffb21SWarner Losh if (*mres != NULL) { 1170fd977b3SMark Johnston (void)vm_page_remove(*mres); 118592ffb21SWarner Losh } 119592ffb21SWarner Losh retry: 120592ffb21SWarner Losh VM_OBJECT_WUNLOCK(vm_obj); 121592ffb21SWarner Losh m = NULL; 122592ffb21SWarner Losh 123592ffb21SWarner Losh reserve: 124592ffb21SWarner Losh ret = ttm_bo_reserve(bo, false, false, false, 0); 125592ffb21SWarner Losh if (unlikely(ret != 0)) { 126592ffb21SWarner Losh if (ret == -EBUSY) { 127592ffb21SWarner Losh kern_yield(PRI_USER); 128592ffb21SWarner Losh goto reserve; 129592ffb21SWarner Losh } 130592ffb21SWarner Losh } 131592ffb21SWarner Losh 132592ffb21SWarner Losh if (bdev->driver->fault_reserve_notify) { 133592ffb21SWarner Losh ret = bdev->driver->fault_reserve_notify(bo); 134592ffb21SWarner Losh switch (ret) { 135592ffb21SWarner Losh case 0: 136592ffb21SWarner Losh break; 137592ffb21SWarner Losh case -EBUSY: 138592ffb21SWarner Losh case -ERESTARTSYS: 139592ffb21SWarner Losh case -EINTR: 140592ffb21SWarner Losh kern_yield(PRI_USER); 141592ffb21SWarner Losh goto reserve; 142592ffb21SWarner Losh default: 143592ffb21SWarner Losh retval = VM_PAGER_ERROR; 144592ffb21SWarner Losh goto out_unlock; 145592ffb21SWarner Losh } 146592ffb21SWarner Losh } 147592ffb21SWarner Losh 148592ffb21SWarner Losh /* 149592ffb21SWarner Losh * Wait for buffer data in transit, due to a pipelined 150592ffb21SWarner Losh * move. 151592ffb21SWarner Losh */ 152592ffb21SWarner Losh 153592ffb21SWarner Losh mtx_lock(&bdev->fence_lock); 154592ffb21SWarner Losh if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { 155592ffb21SWarner Losh /* 156592ffb21SWarner Losh * Here, the behavior differs between Linux and FreeBSD. 157592ffb21SWarner Losh * 158592ffb21SWarner Losh * On Linux, the wait is interruptible (3rd argument to 159592ffb21SWarner Losh * ttm_bo_wait). There must be some mechanism to resume 160592ffb21SWarner Losh * page fault handling, once the signal is processed. 161592ffb21SWarner Losh * 162592ffb21SWarner Losh * On FreeBSD, the wait is uninteruptible. This is not a 163592ffb21SWarner Losh * problem as we can't end up with an unkillable process 164592ffb21SWarner Losh * here, because the wait will eventually time out. 165592ffb21SWarner Losh * 166592ffb21SWarner Losh * An example of this situation is the Xorg process 167592ffb21SWarner Losh * which uses SIGALRM internally. The signal could 168592ffb21SWarner Losh * interrupt the wait, causing the page fault to fail 169592ffb21SWarner Losh * and the process to receive SIGSEGV. 170592ffb21SWarner Losh */ 171592ffb21SWarner Losh ret = ttm_bo_wait(bo, false, false, false); 172592ffb21SWarner Losh mtx_unlock(&bdev->fence_lock); 173592ffb21SWarner Losh if (unlikely(ret != 0)) { 174592ffb21SWarner Losh retval = VM_PAGER_ERROR; 175592ffb21SWarner Losh goto out_unlock; 176592ffb21SWarner Losh } 177592ffb21SWarner Losh } else 178592ffb21SWarner Losh mtx_unlock(&bdev->fence_lock); 179592ffb21SWarner Losh 180592ffb21SWarner Losh ret = ttm_mem_io_lock(man, true); 181592ffb21SWarner Losh if (unlikely(ret != 0)) { 182592ffb21SWarner Losh retval = VM_PAGER_ERROR; 183592ffb21SWarner Losh goto out_unlock; 184592ffb21SWarner Losh } 185592ffb21SWarner Losh ret = ttm_mem_io_reserve_vm(bo); 186592ffb21SWarner Losh if (unlikely(ret != 0)) { 187592ffb21SWarner Losh retval = VM_PAGER_ERROR; 188592ffb21SWarner Losh goto out_io_unlock; 189592ffb21SWarner Losh } 190592ffb21SWarner Losh 191592ffb21SWarner Losh /* 192592ffb21SWarner Losh * Strictly, we're not allowed to modify vma->vm_page_prot here, 193592ffb21SWarner Losh * since the mmap_sem is only held in read mode. However, we 194592ffb21SWarner Losh * modify only the caching bits of vma->vm_page_prot and 195592ffb21SWarner Losh * consider those bits protected by 196592ffb21SWarner Losh * the bo->mutex, as we should be the only writers. 197592ffb21SWarner Losh * There shouldn't really be any readers of these bits except 198592ffb21SWarner Losh * within vm_insert_mixed()? fork? 199592ffb21SWarner Losh * 200592ffb21SWarner Losh * TODO: Add a list of vmas to the bo, and change the 201592ffb21SWarner Losh * vma->vm_page_prot when the object changes caching policy, with 202592ffb21SWarner Losh * the correct locks held. 203592ffb21SWarner Losh */ 204592ffb21SWarner Losh if (!bo->mem.bus.is_iomem) { 205592ffb21SWarner Losh /* Allocate all page at once, most common usage */ 206592ffb21SWarner Losh ttm = bo->ttm; 207592ffb21SWarner Losh if (ttm->bdev->driver->ttm_tt_populate(ttm)) { 208592ffb21SWarner Losh retval = VM_PAGER_ERROR; 209592ffb21SWarner Losh goto out_io_unlock; 210592ffb21SWarner Losh } 211592ffb21SWarner Losh } 212592ffb21SWarner Losh 213592ffb21SWarner Losh if (bo->mem.bus.is_iomem) { 214592ffb21SWarner Losh m = PHYS_TO_VM_PAGE(bo->mem.bus.base + bo->mem.bus.offset + 215592ffb21SWarner Losh offset); 216592ffb21SWarner Losh KASSERT((m->flags & PG_FICTITIOUS) != 0, 217592ffb21SWarner Losh ("physical address %#jx not fictitious", 218592ffb21SWarner Losh (uintmax_t)(bo->mem.bus.base + bo->mem.bus.offset 219592ffb21SWarner Losh + offset))); 220592ffb21SWarner Losh pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement)); 221592ffb21SWarner Losh } else { 222592ffb21SWarner Losh ttm = bo->ttm; 223592ffb21SWarner Losh m = ttm->pages[OFF_TO_IDX(offset)]; 224592ffb21SWarner Losh if (unlikely(!m)) { 225592ffb21SWarner Losh retval = VM_PAGER_ERROR; 226592ffb21SWarner Losh goto out_io_unlock; 227592ffb21SWarner Losh } 228592ffb21SWarner Losh pmap_page_set_memattr(m, 229592ffb21SWarner Losh (bo->mem.placement & TTM_PL_FLAG_CACHED) ? 230592ffb21SWarner Losh VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement)); 231592ffb21SWarner Losh } 232592ffb21SWarner Losh 233592ffb21SWarner Losh VM_OBJECT_WLOCK(vm_obj); 234*c7575748SJeff Roberson if (vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL) == 0) { 235592ffb21SWarner Losh ttm_mem_io_unlock(man); 236592ffb21SWarner Losh ttm_bo_unreserve(bo); 237592ffb21SWarner Losh goto retry; 238592ffb21SWarner Losh } 239592ffb21SWarner Losh m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset)); 240592ffb21SWarner Losh if (m1 == NULL) { 241592ffb21SWarner Losh if (vm_page_insert(m, vm_obj, OFF_TO_IDX(offset))) { 242*c7575748SJeff Roberson vm_page_xunbusy(m); 243592ffb21SWarner Losh VM_OBJECT_WUNLOCK(vm_obj); 244592ffb21SWarner Losh vm_wait(vm_obj); 245592ffb21SWarner Losh VM_OBJECT_WLOCK(vm_obj); 246592ffb21SWarner Losh ttm_mem_io_unlock(man); 247592ffb21SWarner Losh ttm_bo_unreserve(bo); 248592ffb21SWarner Losh goto retry; 249592ffb21SWarner Losh } 250592ffb21SWarner Losh } else { 251592ffb21SWarner Losh KASSERT(m == m1, 252592ffb21SWarner Losh ("inconsistent insert bo %p m %p m1 %p offset %jx", 253592ffb21SWarner Losh bo, m, m1, (uintmax_t)offset)); 254592ffb21SWarner Losh } 255592ffb21SWarner Losh m->valid = VM_PAGE_BITS_ALL; 256592ffb21SWarner Losh if (*mres != NULL) { 257592ffb21SWarner Losh KASSERT(*mres != m, ("losing %p %p", *mres, m)); 258592ffb21SWarner Losh vm_page_free(*mres); 259592ffb21SWarner Losh } 260592ffb21SWarner Losh *mres = m; 261592ffb21SWarner Losh 262592ffb21SWarner Losh out_io_unlock1: 263592ffb21SWarner Losh ttm_mem_io_unlock(man); 264592ffb21SWarner Losh out_unlock1: 265592ffb21SWarner Losh ttm_bo_unreserve(bo); 266592ffb21SWarner Losh vm_object_pip_wakeup(vm_obj); 267592ffb21SWarner Losh return (retval); 268592ffb21SWarner Losh 269592ffb21SWarner Losh out_io_unlock: 270592ffb21SWarner Losh VM_OBJECT_WLOCK(vm_obj); 271592ffb21SWarner Losh goto out_io_unlock1; 272592ffb21SWarner Losh 273592ffb21SWarner Losh out_unlock: 274592ffb21SWarner Losh VM_OBJECT_WLOCK(vm_obj); 275592ffb21SWarner Losh goto out_unlock1; 276592ffb21SWarner Losh } 277592ffb21SWarner Losh 278592ffb21SWarner Losh static int 279592ffb21SWarner Losh ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 280592ffb21SWarner Losh vm_ooffset_t foff, struct ucred *cred, u_short *color) 281592ffb21SWarner Losh { 282592ffb21SWarner Losh 283592ffb21SWarner Losh /* 284592ffb21SWarner Losh * On Linux, a reference to the buffer object is acquired here. 285592ffb21SWarner Losh * The reason is that this function is not called when the 286592ffb21SWarner Losh * mmap() is initialized, but only when a process forks for 287592ffb21SWarner Losh * instance. Therefore on Linux, the reference on the bo is 288592ffb21SWarner Losh * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's 289592ffb21SWarner Losh * then released in ttm_bo_vm_close(). 290592ffb21SWarner Losh * 291592ffb21SWarner Losh * Here, this function is called during mmap() initialization. 292592ffb21SWarner Losh * Thus, the reference acquired in ttm_bo_mmap_single() is 293592ffb21SWarner Losh * sufficient. 294592ffb21SWarner Losh */ 295592ffb21SWarner Losh 296592ffb21SWarner Losh *color = 0; 297592ffb21SWarner Losh return (0); 298592ffb21SWarner Losh } 299592ffb21SWarner Losh 300592ffb21SWarner Losh static void 301592ffb21SWarner Losh ttm_bo_vm_dtor(void *handle) 302592ffb21SWarner Losh { 303592ffb21SWarner Losh struct ttm_buffer_object *bo = handle; 304592ffb21SWarner Losh 305592ffb21SWarner Losh ttm_bo_unref(&bo); 306592ffb21SWarner Losh } 307592ffb21SWarner Losh 308592ffb21SWarner Losh static struct cdev_pager_ops ttm_pager_ops = { 309592ffb21SWarner Losh .cdev_pg_fault = ttm_bo_vm_fault, 310592ffb21SWarner Losh .cdev_pg_ctor = ttm_bo_vm_ctor, 311592ffb21SWarner Losh .cdev_pg_dtor = ttm_bo_vm_dtor 312592ffb21SWarner Losh }; 313592ffb21SWarner Losh 314592ffb21SWarner Losh int 315592ffb21SWarner Losh ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size, 316592ffb21SWarner Losh struct vm_object **obj_res, int nprot) 317592ffb21SWarner Losh { 318592ffb21SWarner Losh struct ttm_bo_driver *driver; 319592ffb21SWarner Losh struct ttm_buffer_object *bo; 320592ffb21SWarner Losh struct vm_object *vm_obj; 321592ffb21SWarner Losh int ret; 322592ffb21SWarner Losh 323592ffb21SWarner Losh rw_wlock(&bdev->vm_lock); 324592ffb21SWarner Losh bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size)); 325592ffb21SWarner Losh if (likely(bo != NULL)) 326592ffb21SWarner Losh refcount_acquire(&bo->kref); 327592ffb21SWarner Losh rw_wunlock(&bdev->vm_lock); 328592ffb21SWarner Losh 329592ffb21SWarner Losh if (unlikely(bo == NULL)) { 330592ffb21SWarner Losh printf("[TTM] Could not find buffer object to map\n"); 331592ffb21SWarner Losh return (-EINVAL); 332592ffb21SWarner Losh } 333592ffb21SWarner Losh 334592ffb21SWarner Losh driver = bo->bdev->driver; 335592ffb21SWarner Losh if (unlikely(!driver->verify_access)) { 336592ffb21SWarner Losh ret = -EPERM; 337592ffb21SWarner Losh goto out_unref; 338592ffb21SWarner Losh } 339592ffb21SWarner Losh ret = driver->verify_access(bo); 340592ffb21SWarner Losh if (unlikely(ret != 0)) 341592ffb21SWarner Losh goto out_unref; 342592ffb21SWarner Losh 343592ffb21SWarner Losh vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops, 344592ffb21SWarner Losh size, nprot, 0, curthread->td_ucred); 345592ffb21SWarner Losh if (vm_obj == NULL) { 346592ffb21SWarner Losh ret = -EINVAL; 347592ffb21SWarner Losh goto out_unref; 348592ffb21SWarner Losh } 349592ffb21SWarner Losh /* 350592ffb21SWarner Losh * Note: We're transferring the bo reference to vm_obj->handle here. 351592ffb21SWarner Losh */ 352592ffb21SWarner Losh *offset = 0; 353592ffb21SWarner Losh *obj_res = vm_obj; 354592ffb21SWarner Losh return 0; 355592ffb21SWarner Losh out_unref: 356592ffb21SWarner Losh ttm_bo_unref(&bo); 357592ffb21SWarner Losh return ret; 358592ffb21SWarner Losh } 359592ffb21SWarner Losh 360592ffb21SWarner Losh void 361592ffb21SWarner Losh ttm_bo_release_mmap(struct ttm_buffer_object *bo) 362592ffb21SWarner Losh { 363592ffb21SWarner Losh vm_object_t vm_obj; 364592ffb21SWarner Losh vm_page_t m; 365592ffb21SWarner Losh int i; 366592ffb21SWarner Losh 367592ffb21SWarner Losh vm_obj = cdev_pager_lookup(bo); 368592ffb21SWarner Losh if (vm_obj == NULL) 369592ffb21SWarner Losh return; 370592ffb21SWarner Losh 371592ffb21SWarner Losh VM_OBJECT_WLOCK(vm_obj); 372592ffb21SWarner Losh retry: 373592ffb21SWarner Losh for (i = 0; i < bo->num_pages; i++) { 374592ffb21SWarner Losh m = vm_page_lookup(vm_obj, i); 375592ffb21SWarner Losh if (m == NULL) 376592ffb21SWarner Losh continue; 377*c7575748SJeff Roberson if (vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL) == 0) 378592ffb21SWarner Losh goto retry; 379592ffb21SWarner Losh cdev_pager_free_page(vm_obj, m); 380592ffb21SWarner Losh } 381592ffb21SWarner Losh VM_OBJECT_WUNLOCK(vm_obj); 382592ffb21SWarner Losh 383592ffb21SWarner Losh vm_object_deallocate(vm_obj); 384592ffb21SWarner Losh } 385592ffb21SWarner Losh 386592ffb21SWarner Losh #if 0 387592ffb21SWarner Losh int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) 388592ffb21SWarner Losh { 389592ffb21SWarner Losh if (vma->vm_pgoff != 0) 390592ffb21SWarner Losh return -EACCES; 391592ffb21SWarner Losh 392592ffb21SWarner Losh vma->vm_ops = &ttm_bo_vm_ops; 393592ffb21SWarner Losh vma->vm_private_data = ttm_bo_reference(bo); 394592ffb21SWarner Losh vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND; 395592ffb21SWarner Losh return 0; 396592ffb21SWarner Losh } 397592ffb21SWarner Losh 398592ffb21SWarner Losh ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, 399592ffb21SWarner Losh const char __user *wbuf, char __user *rbuf, size_t count, 400592ffb21SWarner Losh loff_t *f_pos, bool write) 401592ffb21SWarner Losh { 402592ffb21SWarner Losh struct ttm_buffer_object *bo; 403592ffb21SWarner Losh struct ttm_bo_driver *driver; 404592ffb21SWarner Losh struct ttm_bo_kmap_obj map; 405592ffb21SWarner Losh unsigned long dev_offset = (*f_pos >> PAGE_SHIFT); 406592ffb21SWarner Losh unsigned long kmap_offset; 407592ffb21SWarner Losh unsigned long kmap_end; 408592ffb21SWarner Losh unsigned long kmap_num; 409592ffb21SWarner Losh size_t io_size; 410592ffb21SWarner Losh unsigned int page_offset; 411592ffb21SWarner Losh char *virtual; 412592ffb21SWarner Losh int ret; 413592ffb21SWarner Losh bool no_wait = false; 414592ffb21SWarner Losh bool dummy; 415592ffb21SWarner Losh 416592ffb21SWarner Losh read_lock(&bdev->vm_lock); 417592ffb21SWarner Losh bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1); 418592ffb21SWarner Losh if (likely(bo != NULL)) 419592ffb21SWarner Losh ttm_bo_reference(bo); 420592ffb21SWarner Losh read_unlock(&bdev->vm_lock); 421592ffb21SWarner Losh 422592ffb21SWarner Losh if (unlikely(bo == NULL)) 423592ffb21SWarner Losh return -EFAULT; 424592ffb21SWarner Losh 425592ffb21SWarner Losh driver = bo->bdev->driver; 426592ffb21SWarner Losh if (unlikely(!driver->verify_access)) { 427592ffb21SWarner Losh ret = -EPERM; 428592ffb21SWarner Losh goto out_unref; 429592ffb21SWarner Losh } 430592ffb21SWarner Losh 431592ffb21SWarner Losh ret = driver->verify_access(bo, filp); 432592ffb21SWarner Losh if (unlikely(ret != 0)) 433592ffb21SWarner Losh goto out_unref; 434592ffb21SWarner Losh 435592ffb21SWarner Losh kmap_offset = dev_offset - bo->vm_node->start; 436592ffb21SWarner Losh if (unlikely(kmap_offset >= bo->num_pages)) { 437592ffb21SWarner Losh ret = -EFBIG; 438592ffb21SWarner Losh goto out_unref; 439592ffb21SWarner Losh } 440592ffb21SWarner Losh 441592ffb21SWarner Losh page_offset = *f_pos & ~PAGE_MASK; 442592ffb21SWarner Losh io_size = bo->num_pages - kmap_offset; 443592ffb21SWarner Losh io_size = (io_size << PAGE_SHIFT) - page_offset; 444592ffb21SWarner Losh if (count < io_size) 445592ffb21SWarner Losh io_size = count; 446592ffb21SWarner Losh 447592ffb21SWarner Losh kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT; 448592ffb21SWarner Losh kmap_num = kmap_end - kmap_offset + 1; 449592ffb21SWarner Losh 450592ffb21SWarner Losh ret = ttm_bo_reserve(bo, true, no_wait, false, 0); 451592ffb21SWarner Losh 452592ffb21SWarner Losh switch (ret) { 453592ffb21SWarner Losh case 0: 454592ffb21SWarner Losh break; 455592ffb21SWarner Losh case -EBUSY: 456592ffb21SWarner Losh ret = -EAGAIN; 457592ffb21SWarner Losh goto out_unref; 458592ffb21SWarner Losh default: 459592ffb21SWarner Losh goto out_unref; 460592ffb21SWarner Losh } 461592ffb21SWarner Losh 462592ffb21SWarner Losh ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); 463592ffb21SWarner Losh if (unlikely(ret != 0)) { 464592ffb21SWarner Losh ttm_bo_unreserve(bo); 465592ffb21SWarner Losh goto out_unref; 466592ffb21SWarner Losh } 467592ffb21SWarner Losh 468592ffb21SWarner Losh virtual = ttm_kmap_obj_virtual(&map, &dummy); 469592ffb21SWarner Losh virtual += page_offset; 470592ffb21SWarner Losh 471592ffb21SWarner Losh if (write) 472592ffb21SWarner Losh ret = copy_from_user(virtual, wbuf, io_size); 473592ffb21SWarner Losh else 474592ffb21SWarner Losh ret = copy_to_user(rbuf, virtual, io_size); 475592ffb21SWarner Losh 476592ffb21SWarner Losh ttm_bo_kunmap(&map); 477592ffb21SWarner Losh ttm_bo_unreserve(bo); 478592ffb21SWarner Losh ttm_bo_unref(&bo); 479592ffb21SWarner Losh 480592ffb21SWarner Losh if (unlikely(ret != 0)) 481592ffb21SWarner Losh return -EFBIG; 482592ffb21SWarner Losh 483592ffb21SWarner Losh *f_pos += io_size; 484592ffb21SWarner Losh 485592ffb21SWarner Losh return io_size; 486592ffb21SWarner Losh out_unref: 487592ffb21SWarner Losh ttm_bo_unref(&bo); 488592ffb21SWarner Losh return ret; 489592ffb21SWarner Losh } 490592ffb21SWarner Losh 491592ffb21SWarner Losh ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf, 492592ffb21SWarner Losh char __user *rbuf, size_t count, loff_t *f_pos, 493592ffb21SWarner Losh bool write) 494592ffb21SWarner Losh { 495592ffb21SWarner Losh struct ttm_bo_kmap_obj map; 496592ffb21SWarner Losh unsigned long kmap_offset; 497592ffb21SWarner Losh unsigned long kmap_end; 498592ffb21SWarner Losh unsigned long kmap_num; 499592ffb21SWarner Losh size_t io_size; 500592ffb21SWarner Losh unsigned int page_offset; 501592ffb21SWarner Losh char *virtual; 502592ffb21SWarner Losh int ret; 503592ffb21SWarner Losh bool no_wait = false; 504592ffb21SWarner Losh bool dummy; 505592ffb21SWarner Losh 506592ffb21SWarner Losh kmap_offset = (*f_pos >> PAGE_SHIFT); 507592ffb21SWarner Losh if (unlikely(kmap_offset >= bo->num_pages)) 508592ffb21SWarner Losh return -EFBIG; 509592ffb21SWarner Losh 510592ffb21SWarner Losh page_offset = *f_pos & ~PAGE_MASK; 511592ffb21SWarner Losh io_size = bo->num_pages - kmap_offset; 512592ffb21SWarner Losh io_size = (io_size << PAGE_SHIFT) - page_offset; 513592ffb21SWarner Losh if (count < io_size) 514592ffb21SWarner Losh io_size = count; 515592ffb21SWarner Losh 516592ffb21SWarner Losh kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT; 517592ffb21SWarner Losh kmap_num = kmap_end - kmap_offset + 1; 518592ffb21SWarner Losh 519592ffb21SWarner Losh ret = ttm_bo_reserve(bo, true, no_wait, false, 0); 520592ffb21SWarner Losh 521592ffb21SWarner Losh switch (ret) { 522592ffb21SWarner Losh case 0: 523592ffb21SWarner Losh break; 524592ffb21SWarner Losh case -EBUSY: 525592ffb21SWarner Losh return -EAGAIN; 526592ffb21SWarner Losh default: 527592ffb21SWarner Losh return ret; 528592ffb21SWarner Losh } 529592ffb21SWarner Losh 530592ffb21SWarner Losh ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); 531592ffb21SWarner Losh if (unlikely(ret != 0)) { 532592ffb21SWarner Losh ttm_bo_unreserve(bo); 533592ffb21SWarner Losh return ret; 534592ffb21SWarner Losh } 535592ffb21SWarner Losh 536592ffb21SWarner Losh virtual = ttm_kmap_obj_virtual(&map, &dummy); 537592ffb21SWarner Losh virtual += page_offset; 538592ffb21SWarner Losh 539592ffb21SWarner Losh if (write) 540592ffb21SWarner Losh ret = copy_from_user(virtual, wbuf, io_size); 541592ffb21SWarner Losh else 542592ffb21SWarner Losh ret = copy_to_user(rbuf, virtual, io_size); 543592ffb21SWarner Losh 544592ffb21SWarner Losh ttm_bo_kunmap(&map); 545592ffb21SWarner Losh ttm_bo_unreserve(bo); 546592ffb21SWarner Losh ttm_bo_unref(&bo); 547592ffb21SWarner Losh 548592ffb21SWarner Losh if (unlikely(ret != 0)) 549592ffb21SWarner Losh return ret; 550592ffb21SWarner Losh 551592ffb21SWarner Losh *f_pos += io_size; 552592ffb21SWarner Losh 553592ffb21SWarner Losh return io_size; 554592ffb21SWarner Losh } 555592ffb21SWarner Losh #endif 556