1592ffb21SWarner Losh /**************************************************************************
2592ffb21SWarner Losh *
3592ffb21SWarner Losh * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4592ffb21SWarner Losh * All Rights Reserved.
5592ffb21SWarner Losh *
6592ffb21SWarner Losh * Permission is hereby granted, free of charge, to any person obtaining a
7592ffb21SWarner Losh * copy of this software and associated documentation files (the
8592ffb21SWarner Losh * "Software"), to deal in the Software without restriction, including
9592ffb21SWarner Losh * without limitation the rights to use, copy, modify, merge, publish,
10592ffb21SWarner Losh * distribute, sub license, and/or sell copies of the Software, and to
11592ffb21SWarner Losh * permit persons to whom the Software is furnished to do so, subject to
12592ffb21SWarner Losh * the following conditions:
13592ffb21SWarner Losh *
14592ffb21SWarner Losh * The above copyright notice and this permission notice (including the
15592ffb21SWarner Losh * next paragraph) shall be included in all copies or substantial portions
16592ffb21SWarner Losh * of the Software.
17592ffb21SWarner Losh *
18592ffb21SWarner Losh * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19592ffb21SWarner Losh * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20592ffb21SWarner Losh * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21592ffb21SWarner Losh * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22592ffb21SWarner Losh * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23592ffb21SWarner Losh * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24592ffb21SWarner Losh * USE OR OTHER DEALINGS IN THE SOFTWARE.
25592ffb21SWarner Losh *
26592ffb21SWarner Losh **************************************************************************/
27592ffb21SWarner Losh /*
28592ffb21SWarner Losh * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29592ffb21SWarner Losh */
30592ffb21SWarner Losh /*
31592ffb21SWarner Losh * Copyright (c) 2013 The FreeBSD Foundation
32592ffb21SWarner Losh * All rights reserved.
33592ffb21SWarner Losh *
34592ffb21SWarner Losh * Portions of this software were developed by Konstantin Belousov
35592ffb21SWarner Losh * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
36592ffb21SWarner Losh */
37592ffb21SWarner Losh
38592ffb21SWarner Losh #include <sys/cdefs.h>
39592ffb21SWarner Losh #include "opt_vm.h"
40592ffb21SWarner Losh
41592ffb21SWarner Losh #include <dev/drm2/drmP.h>
42592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_module.h>
43592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_bo_driver.h>
44592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_placement.h>
45592ffb21SWarner Losh
46592ffb21SWarner Losh #include <vm/vm.h>
47592ffb21SWarner Losh #include <vm/vm_page.h>
48592ffb21SWarner Losh #include <vm/vm_pageout.h>
49592ffb21SWarner Losh
50592ffb21SWarner Losh #define TTM_BO_VM_NUM_PREFAULT 16
51592ffb21SWarner Losh
52592ffb21SWarner Losh RB_GENERATE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
53592ffb21SWarner Losh ttm_bo_cmp_rb_tree_items);
54592ffb21SWarner Losh
55592ffb21SWarner Losh int
ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object * a,struct ttm_buffer_object * b)56592ffb21SWarner Losh ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
57592ffb21SWarner Losh struct ttm_buffer_object *b)
58592ffb21SWarner Losh {
59592ffb21SWarner Losh
60592ffb21SWarner Losh if (a->vm_node->start < b->vm_node->start) {
61592ffb21SWarner Losh return (-1);
62592ffb21SWarner Losh } else if (a->vm_node->start > b->vm_node->start) {
63592ffb21SWarner Losh return (1);
64592ffb21SWarner Losh } else {
65592ffb21SWarner Losh return (0);
66592ffb21SWarner Losh }
67592ffb21SWarner Losh }
68592ffb21SWarner Losh
ttm_bo_vm_lookup_rb(struct ttm_bo_device * bdev,unsigned long page_start,unsigned long num_pages)69592ffb21SWarner Losh static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
70592ffb21SWarner Losh unsigned long page_start,
71592ffb21SWarner Losh unsigned long num_pages)
72592ffb21SWarner Losh {
73592ffb21SWarner Losh unsigned long cur_offset;
74592ffb21SWarner Losh struct ttm_buffer_object *bo;
75592ffb21SWarner Losh struct ttm_buffer_object *best_bo = NULL;
76592ffb21SWarner Losh
77592ffb21SWarner Losh bo = RB_ROOT(&bdev->addr_space_rb);
78592ffb21SWarner Losh while (bo != NULL) {
79592ffb21SWarner Losh cur_offset = bo->vm_node->start;
80592ffb21SWarner Losh if (page_start >= cur_offset) {
81592ffb21SWarner Losh best_bo = bo;
82592ffb21SWarner Losh if (page_start == cur_offset)
83592ffb21SWarner Losh break;
84592ffb21SWarner Losh bo = RB_RIGHT(bo, vm_rb);
85592ffb21SWarner Losh } else
86592ffb21SWarner Losh bo = RB_LEFT(bo, vm_rb);
87592ffb21SWarner Losh }
88592ffb21SWarner Losh
89592ffb21SWarner Losh if (unlikely(best_bo == NULL))
90592ffb21SWarner Losh return NULL;
91592ffb21SWarner Losh
92592ffb21SWarner Losh if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
93592ffb21SWarner Losh (page_start + num_pages)))
94592ffb21SWarner Losh return NULL;
95592ffb21SWarner Losh
96592ffb21SWarner Losh return best_bo;
97592ffb21SWarner Losh }
98592ffb21SWarner Losh
99592ffb21SWarner Losh static int
ttm_bo_vm_fault(vm_object_t vm_obj,vm_ooffset_t offset,int prot,vm_page_t * mres)100592ffb21SWarner Losh ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
101592ffb21SWarner Losh int prot, vm_page_t *mres)
102592ffb21SWarner Losh {
103592ffb21SWarner Losh
104592ffb21SWarner Losh struct ttm_buffer_object *bo = vm_obj->handle;
105592ffb21SWarner Losh struct ttm_bo_device *bdev = bo->bdev;
106592ffb21SWarner Losh struct ttm_tt *ttm = NULL;
107592ffb21SWarner Losh vm_page_t m, m1;
108592ffb21SWarner Losh int ret;
109592ffb21SWarner Losh int retval = VM_PAGER_OK;
110592ffb21SWarner Losh struct ttm_mem_type_manager *man =
111592ffb21SWarner Losh &bdev->man[bo->mem.mem_type];
112592ffb21SWarner Losh
113592ffb21SWarner Losh vm_object_pip_add(vm_obj, 1);
114592ffb21SWarner Losh if (*mres != NULL) {
1150fd977b3SMark Johnston (void)vm_page_remove(*mres);
116592ffb21SWarner Losh }
117592ffb21SWarner Losh retry:
118592ffb21SWarner Losh VM_OBJECT_WUNLOCK(vm_obj);
119592ffb21SWarner Losh m = NULL;
120592ffb21SWarner Losh
121592ffb21SWarner Losh reserve:
122592ffb21SWarner Losh ret = ttm_bo_reserve(bo, false, false, false, 0);
123592ffb21SWarner Losh if (unlikely(ret != 0)) {
124592ffb21SWarner Losh if (ret == -EBUSY) {
125592ffb21SWarner Losh kern_yield(PRI_USER);
126592ffb21SWarner Losh goto reserve;
127592ffb21SWarner Losh }
128592ffb21SWarner Losh }
129592ffb21SWarner Losh
130592ffb21SWarner Losh if (bdev->driver->fault_reserve_notify) {
131592ffb21SWarner Losh ret = bdev->driver->fault_reserve_notify(bo);
132592ffb21SWarner Losh switch (ret) {
133592ffb21SWarner Losh case 0:
134592ffb21SWarner Losh break;
135592ffb21SWarner Losh case -EBUSY:
136592ffb21SWarner Losh case -ERESTARTSYS:
137592ffb21SWarner Losh case -EINTR:
138592ffb21SWarner Losh kern_yield(PRI_USER);
139592ffb21SWarner Losh goto reserve;
140592ffb21SWarner Losh default:
141592ffb21SWarner Losh retval = VM_PAGER_ERROR;
142592ffb21SWarner Losh goto out_unlock;
143592ffb21SWarner Losh }
144592ffb21SWarner Losh }
145592ffb21SWarner Losh
146592ffb21SWarner Losh /*
147592ffb21SWarner Losh * Wait for buffer data in transit, due to a pipelined
148592ffb21SWarner Losh * move.
149592ffb21SWarner Losh */
150592ffb21SWarner Losh
151592ffb21SWarner Losh mtx_lock(&bdev->fence_lock);
152592ffb21SWarner Losh if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
153592ffb21SWarner Losh /*
154592ffb21SWarner Losh * Here, the behavior differs between Linux and FreeBSD.
155592ffb21SWarner Losh *
156592ffb21SWarner Losh * On Linux, the wait is interruptible (3rd argument to
157592ffb21SWarner Losh * ttm_bo_wait). There must be some mechanism to resume
158592ffb21SWarner Losh * page fault handling, once the signal is processed.
159592ffb21SWarner Losh *
160592ffb21SWarner Losh * On FreeBSD, the wait is uninteruptible. This is not a
161592ffb21SWarner Losh * problem as we can't end up with an unkillable process
162592ffb21SWarner Losh * here, because the wait will eventually time out.
163592ffb21SWarner Losh *
164592ffb21SWarner Losh * An example of this situation is the Xorg process
165592ffb21SWarner Losh * which uses SIGALRM internally. The signal could
166592ffb21SWarner Losh * interrupt the wait, causing the page fault to fail
167592ffb21SWarner Losh * and the process to receive SIGSEGV.
168592ffb21SWarner Losh */
169592ffb21SWarner Losh ret = ttm_bo_wait(bo, false, false, false);
170592ffb21SWarner Losh mtx_unlock(&bdev->fence_lock);
171592ffb21SWarner Losh if (unlikely(ret != 0)) {
172592ffb21SWarner Losh retval = VM_PAGER_ERROR;
173592ffb21SWarner Losh goto out_unlock;
174592ffb21SWarner Losh }
175592ffb21SWarner Losh } else
176592ffb21SWarner Losh mtx_unlock(&bdev->fence_lock);
177592ffb21SWarner Losh
178592ffb21SWarner Losh ret = ttm_mem_io_lock(man, true);
179592ffb21SWarner Losh if (unlikely(ret != 0)) {
180592ffb21SWarner Losh retval = VM_PAGER_ERROR;
181592ffb21SWarner Losh goto out_unlock;
182592ffb21SWarner Losh }
183592ffb21SWarner Losh ret = ttm_mem_io_reserve_vm(bo);
184592ffb21SWarner Losh if (unlikely(ret != 0)) {
185592ffb21SWarner Losh retval = VM_PAGER_ERROR;
186592ffb21SWarner Losh goto out_io_unlock;
187592ffb21SWarner Losh }
188592ffb21SWarner Losh
189592ffb21SWarner Losh /*
190592ffb21SWarner Losh * Strictly, we're not allowed to modify vma->vm_page_prot here,
191592ffb21SWarner Losh * since the mmap_sem is only held in read mode. However, we
192592ffb21SWarner Losh * modify only the caching bits of vma->vm_page_prot and
193592ffb21SWarner Losh * consider those bits protected by
194592ffb21SWarner Losh * the bo->mutex, as we should be the only writers.
195592ffb21SWarner Losh * There shouldn't really be any readers of these bits except
196592ffb21SWarner Losh * within vm_insert_mixed()? fork?
197592ffb21SWarner Losh *
198592ffb21SWarner Losh * TODO: Add a list of vmas to the bo, and change the
199592ffb21SWarner Losh * vma->vm_page_prot when the object changes caching policy, with
200592ffb21SWarner Losh * the correct locks held.
201592ffb21SWarner Losh */
202592ffb21SWarner Losh if (!bo->mem.bus.is_iomem) {
203592ffb21SWarner Losh /* Allocate all page at once, most common usage */
204592ffb21SWarner Losh ttm = bo->ttm;
205592ffb21SWarner Losh if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
206592ffb21SWarner Losh retval = VM_PAGER_ERROR;
207592ffb21SWarner Losh goto out_io_unlock;
208592ffb21SWarner Losh }
209592ffb21SWarner Losh }
210592ffb21SWarner Losh
211592ffb21SWarner Losh if (bo->mem.bus.is_iomem) {
212592ffb21SWarner Losh m = PHYS_TO_VM_PAGE(bo->mem.bus.base + bo->mem.bus.offset +
213592ffb21SWarner Losh offset);
214592ffb21SWarner Losh KASSERT((m->flags & PG_FICTITIOUS) != 0,
215592ffb21SWarner Losh ("physical address %#jx not fictitious",
216592ffb21SWarner Losh (uintmax_t)(bo->mem.bus.base + bo->mem.bus.offset
217592ffb21SWarner Losh + offset)));
218592ffb21SWarner Losh pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
219592ffb21SWarner Losh } else {
220592ffb21SWarner Losh ttm = bo->ttm;
221592ffb21SWarner Losh m = ttm->pages[OFF_TO_IDX(offset)];
222592ffb21SWarner Losh if (unlikely(!m)) {
223592ffb21SWarner Losh retval = VM_PAGER_ERROR;
224592ffb21SWarner Losh goto out_io_unlock;
225592ffb21SWarner Losh }
226592ffb21SWarner Losh pmap_page_set_memattr(m,
227592ffb21SWarner Losh (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
228592ffb21SWarner Losh VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
229592ffb21SWarner Losh }
230592ffb21SWarner Losh
231592ffb21SWarner Losh VM_OBJECT_WLOCK(vm_obj);
232c7575748SJeff Roberson if (vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL) == 0) {
233592ffb21SWarner Losh ttm_mem_io_unlock(man);
234592ffb21SWarner Losh ttm_bo_unreserve(bo);
235592ffb21SWarner Losh goto retry;
236592ffb21SWarner Losh }
237592ffb21SWarner Losh m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
2383cf3b4e6SJeff Roberson /* XXX This looks like it should just be vm_page_replace? */
239592ffb21SWarner Losh if (m1 == NULL) {
240592ffb21SWarner Losh if (vm_page_insert(m, vm_obj, OFF_TO_IDX(offset))) {
241c7575748SJeff Roberson vm_page_xunbusy(m);
242592ffb21SWarner Losh VM_OBJECT_WUNLOCK(vm_obj);
243592ffb21SWarner Losh vm_wait(vm_obj);
244592ffb21SWarner Losh VM_OBJECT_WLOCK(vm_obj);
245592ffb21SWarner Losh ttm_mem_io_unlock(man);
246592ffb21SWarner Losh ttm_bo_unreserve(bo);
247592ffb21SWarner Losh goto retry;
248592ffb21SWarner Losh }
249592ffb21SWarner Losh } else {
250592ffb21SWarner Losh KASSERT(m == m1,
251592ffb21SWarner Losh ("inconsistent insert bo %p m %p m1 %p offset %jx",
252592ffb21SWarner Losh bo, m, m1, (uintmax_t)offset));
253592ffb21SWarner Losh }
2540012f373SJeff Roberson vm_page_valid(m);
255592ffb21SWarner Losh if (*mres != NULL) {
256592ffb21SWarner Losh KASSERT(*mres != m, ("losing %p %p", *mres, m));
2573cf3b4e6SJeff Roberson vm_page_xunbusy(*mres);
258592ffb21SWarner Losh vm_page_free(*mres);
259592ffb21SWarner Losh }
260592ffb21SWarner Losh *mres = m;
261592ffb21SWarner Losh
262592ffb21SWarner Losh out_io_unlock1:
263592ffb21SWarner Losh ttm_mem_io_unlock(man);
264592ffb21SWarner Losh out_unlock1:
265592ffb21SWarner Losh ttm_bo_unreserve(bo);
266592ffb21SWarner Losh vm_object_pip_wakeup(vm_obj);
267592ffb21SWarner Losh return (retval);
268592ffb21SWarner Losh
269592ffb21SWarner Losh out_io_unlock:
270592ffb21SWarner Losh VM_OBJECT_WLOCK(vm_obj);
271592ffb21SWarner Losh goto out_io_unlock1;
272592ffb21SWarner Losh
273592ffb21SWarner Losh out_unlock:
274592ffb21SWarner Losh VM_OBJECT_WLOCK(vm_obj);
275592ffb21SWarner Losh goto out_unlock1;
276592ffb21SWarner Losh }
277592ffb21SWarner Losh
278592ffb21SWarner Losh static int
ttm_bo_vm_ctor(void * handle,vm_ooffset_t size,vm_prot_t prot,vm_ooffset_t foff,struct ucred * cred,u_short * color)279592ffb21SWarner Losh ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
280592ffb21SWarner Losh vm_ooffset_t foff, struct ucred *cred, u_short *color)
281592ffb21SWarner Losh {
282592ffb21SWarner Losh
283592ffb21SWarner Losh /*
284592ffb21SWarner Losh * On Linux, a reference to the buffer object is acquired here.
285592ffb21SWarner Losh * The reason is that this function is not called when the
286592ffb21SWarner Losh * mmap() is initialized, but only when a process forks for
287592ffb21SWarner Losh * instance. Therefore on Linux, the reference on the bo is
288592ffb21SWarner Losh * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's
289592ffb21SWarner Losh * then released in ttm_bo_vm_close().
290592ffb21SWarner Losh *
291592ffb21SWarner Losh * Here, this function is called during mmap() initialization.
292592ffb21SWarner Losh * Thus, the reference acquired in ttm_bo_mmap_single() is
293592ffb21SWarner Losh * sufficient.
294592ffb21SWarner Losh */
295592ffb21SWarner Losh
296592ffb21SWarner Losh *color = 0;
297592ffb21SWarner Losh return (0);
298592ffb21SWarner Losh }
299592ffb21SWarner Losh
300592ffb21SWarner Losh static void
ttm_bo_vm_dtor(void * handle)301592ffb21SWarner Losh ttm_bo_vm_dtor(void *handle)
302592ffb21SWarner Losh {
303592ffb21SWarner Losh struct ttm_buffer_object *bo = handle;
304592ffb21SWarner Losh
305592ffb21SWarner Losh ttm_bo_unref(&bo);
306592ffb21SWarner Losh }
307592ffb21SWarner Losh
308592ffb21SWarner Losh static struct cdev_pager_ops ttm_pager_ops = {
309592ffb21SWarner Losh .cdev_pg_fault = ttm_bo_vm_fault,
310592ffb21SWarner Losh .cdev_pg_ctor = ttm_bo_vm_ctor,
311592ffb21SWarner Losh .cdev_pg_dtor = ttm_bo_vm_dtor
312592ffb21SWarner Losh };
313592ffb21SWarner Losh
314592ffb21SWarner Losh int
ttm_bo_mmap_single(struct ttm_bo_device * bdev,vm_ooffset_t * offset,vm_size_t size,struct vm_object ** obj_res,int nprot)315592ffb21SWarner Losh ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size,
316592ffb21SWarner Losh struct vm_object **obj_res, int nprot)
317592ffb21SWarner Losh {
318592ffb21SWarner Losh struct ttm_bo_driver *driver;
319592ffb21SWarner Losh struct ttm_buffer_object *bo;
320592ffb21SWarner Losh struct vm_object *vm_obj;
321592ffb21SWarner Losh int ret;
322592ffb21SWarner Losh
323592ffb21SWarner Losh rw_wlock(&bdev->vm_lock);
324592ffb21SWarner Losh bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size));
325592ffb21SWarner Losh if (likely(bo != NULL))
326592ffb21SWarner Losh refcount_acquire(&bo->kref);
327592ffb21SWarner Losh rw_wunlock(&bdev->vm_lock);
328592ffb21SWarner Losh
329592ffb21SWarner Losh if (unlikely(bo == NULL)) {
330592ffb21SWarner Losh printf("[TTM] Could not find buffer object to map\n");
331592ffb21SWarner Losh return (-EINVAL);
332592ffb21SWarner Losh }
333592ffb21SWarner Losh
334592ffb21SWarner Losh driver = bo->bdev->driver;
335592ffb21SWarner Losh if (unlikely(!driver->verify_access)) {
336592ffb21SWarner Losh ret = -EPERM;
337592ffb21SWarner Losh goto out_unref;
338592ffb21SWarner Losh }
339592ffb21SWarner Losh ret = driver->verify_access(bo);
340592ffb21SWarner Losh if (unlikely(ret != 0))
341592ffb21SWarner Losh goto out_unref;
342592ffb21SWarner Losh
343592ffb21SWarner Losh vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops,
344592ffb21SWarner Losh size, nprot, 0, curthread->td_ucred);
345592ffb21SWarner Losh if (vm_obj == NULL) {
346592ffb21SWarner Losh ret = -EINVAL;
347592ffb21SWarner Losh goto out_unref;
348592ffb21SWarner Losh }
349592ffb21SWarner Losh /*
350592ffb21SWarner Losh * Note: We're transferring the bo reference to vm_obj->handle here.
351592ffb21SWarner Losh */
352592ffb21SWarner Losh *offset = 0;
353592ffb21SWarner Losh *obj_res = vm_obj;
354592ffb21SWarner Losh return 0;
355592ffb21SWarner Losh out_unref:
356592ffb21SWarner Losh ttm_bo_unref(&bo);
357592ffb21SWarner Losh return ret;
358592ffb21SWarner Losh }
359592ffb21SWarner Losh
360592ffb21SWarner Losh void
ttm_bo_release_mmap(struct ttm_buffer_object * bo)361592ffb21SWarner Losh ttm_bo_release_mmap(struct ttm_buffer_object *bo)
362592ffb21SWarner Losh {
363592ffb21SWarner Losh vm_object_t vm_obj;
364592ffb21SWarner Losh
365592ffb21SWarner Losh vm_obj = cdev_pager_lookup(bo);
366*38e3125dSDoug Moore if (vm_obj != NULL) {
367*38e3125dSDoug Moore cdev_mgtdev_pager_free_pages(vm_obj);
368592ffb21SWarner Losh vm_object_deallocate(vm_obj);
369592ffb21SWarner Losh }
370*38e3125dSDoug Moore }
371592ffb21SWarner Losh
372592ffb21SWarner Losh #if 0
373592ffb21SWarner Losh int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
374592ffb21SWarner Losh {
375592ffb21SWarner Losh if (vma->vm_pgoff != 0)
376592ffb21SWarner Losh return -EACCES;
377592ffb21SWarner Losh
378592ffb21SWarner Losh vma->vm_ops = &ttm_bo_vm_ops;
379592ffb21SWarner Losh vma->vm_private_data = ttm_bo_reference(bo);
380592ffb21SWarner Losh vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
381592ffb21SWarner Losh return 0;
382592ffb21SWarner Losh }
383592ffb21SWarner Losh
384592ffb21SWarner Losh ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
385592ffb21SWarner Losh const char __user *wbuf, char __user *rbuf, size_t count,
386592ffb21SWarner Losh loff_t *f_pos, bool write)
387592ffb21SWarner Losh {
388592ffb21SWarner Losh struct ttm_buffer_object *bo;
389592ffb21SWarner Losh struct ttm_bo_driver *driver;
390592ffb21SWarner Losh struct ttm_bo_kmap_obj map;
391592ffb21SWarner Losh unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
392592ffb21SWarner Losh unsigned long kmap_offset;
393592ffb21SWarner Losh unsigned long kmap_end;
394592ffb21SWarner Losh unsigned long kmap_num;
395592ffb21SWarner Losh size_t io_size;
396592ffb21SWarner Losh unsigned int page_offset;
397592ffb21SWarner Losh char *virtual;
398592ffb21SWarner Losh int ret;
399592ffb21SWarner Losh bool no_wait = false;
400592ffb21SWarner Losh bool dummy;
401592ffb21SWarner Losh
402592ffb21SWarner Losh read_lock(&bdev->vm_lock);
403592ffb21SWarner Losh bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
404592ffb21SWarner Losh if (likely(bo != NULL))
405592ffb21SWarner Losh ttm_bo_reference(bo);
406592ffb21SWarner Losh read_unlock(&bdev->vm_lock);
407592ffb21SWarner Losh
408592ffb21SWarner Losh if (unlikely(bo == NULL))
409592ffb21SWarner Losh return -EFAULT;
410592ffb21SWarner Losh
411592ffb21SWarner Losh driver = bo->bdev->driver;
412592ffb21SWarner Losh if (unlikely(!driver->verify_access)) {
413592ffb21SWarner Losh ret = -EPERM;
414592ffb21SWarner Losh goto out_unref;
415592ffb21SWarner Losh }
416592ffb21SWarner Losh
417592ffb21SWarner Losh ret = driver->verify_access(bo, filp);
418592ffb21SWarner Losh if (unlikely(ret != 0))
419592ffb21SWarner Losh goto out_unref;
420592ffb21SWarner Losh
421592ffb21SWarner Losh kmap_offset = dev_offset - bo->vm_node->start;
422592ffb21SWarner Losh if (unlikely(kmap_offset >= bo->num_pages)) {
423592ffb21SWarner Losh ret = -EFBIG;
424592ffb21SWarner Losh goto out_unref;
425592ffb21SWarner Losh }
426592ffb21SWarner Losh
427592ffb21SWarner Losh page_offset = *f_pos & ~PAGE_MASK;
428592ffb21SWarner Losh io_size = bo->num_pages - kmap_offset;
429592ffb21SWarner Losh io_size = (io_size << PAGE_SHIFT) - page_offset;
430592ffb21SWarner Losh if (count < io_size)
431592ffb21SWarner Losh io_size = count;
432592ffb21SWarner Losh
433592ffb21SWarner Losh kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
434592ffb21SWarner Losh kmap_num = kmap_end - kmap_offset + 1;
435592ffb21SWarner Losh
436592ffb21SWarner Losh ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
437592ffb21SWarner Losh
438592ffb21SWarner Losh switch (ret) {
439592ffb21SWarner Losh case 0:
440592ffb21SWarner Losh break;
441592ffb21SWarner Losh case -EBUSY:
442592ffb21SWarner Losh ret = -EAGAIN;
443592ffb21SWarner Losh goto out_unref;
444592ffb21SWarner Losh default:
445592ffb21SWarner Losh goto out_unref;
446592ffb21SWarner Losh }
447592ffb21SWarner Losh
448592ffb21SWarner Losh ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
449592ffb21SWarner Losh if (unlikely(ret != 0)) {
450592ffb21SWarner Losh ttm_bo_unreserve(bo);
451592ffb21SWarner Losh goto out_unref;
452592ffb21SWarner Losh }
453592ffb21SWarner Losh
454592ffb21SWarner Losh virtual = ttm_kmap_obj_virtual(&map, &dummy);
455592ffb21SWarner Losh virtual += page_offset;
456592ffb21SWarner Losh
457592ffb21SWarner Losh if (write)
458592ffb21SWarner Losh ret = copy_from_user(virtual, wbuf, io_size);
459592ffb21SWarner Losh else
460592ffb21SWarner Losh ret = copy_to_user(rbuf, virtual, io_size);
461592ffb21SWarner Losh
462592ffb21SWarner Losh ttm_bo_kunmap(&map);
463592ffb21SWarner Losh ttm_bo_unreserve(bo);
464592ffb21SWarner Losh ttm_bo_unref(&bo);
465592ffb21SWarner Losh
466592ffb21SWarner Losh if (unlikely(ret != 0))
467592ffb21SWarner Losh return -EFBIG;
468592ffb21SWarner Losh
469592ffb21SWarner Losh *f_pos += io_size;
470592ffb21SWarner Losh
471592ffb21SWarner Losh return io_size;
472592ffb21SWarner Losh out_unref:
473592ffb21SWarner Losh ttm_bo_unref(&bo);
474592ffb21SWarner Losh return ret;
475592ffb21SWarner Losh }
476592ffb21SWarner Losh
477592ffb21SWarner Losh ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
478592ffb21SWarner Losh char __user *rbuf, size_t count, loff_t *f_pos,
479592ffb21SWarner Losh bool write)
480592ffb21SWarner Losh {
481592ffb21SWarner Losh struct ttm_bo_kmap_obj map;
482592ffb21SWarner Losh unsigned long kmap_offset;
483592ffb21SWarner Losh unsigned long kmap_end;
484592ffb21SWarner Losh unsigned long kmap_num;
485592ffb21SWarner Losh size_t io_size;
486592ffb21SWarner Losh unsigned int page_offset;
487592ffb21SWarner Losh char *virtual;
488592ffb21SWarner Losh int ret;
489592ffb21SWarner Losh bool no_wait = false;
490592ffb21SWarner Losh bool dummy;
491592ffb21SWarner Losh
492592ffb21SWarner Losh kmap_offset = (*f_pos >> PAGE_SHIFT);
493592ffb21SWarner Losh if (unlikely(kmap_offset >= bo->num_pages))
494592ffb21SWarner Losh return -EFBIG;
495592ffb21SWarner Losh
496592ffb21SWarner Losh page_offset = *f_pos & ~PAGE_MASK;
497592ffb21SWarner Losh io_size = bo->num_pages - kmap_offset;
498592ffb21SWarner Losh io_size = (io_size << PAGE_SHIFT) - page_offset;
499592ffb21SWarner Losh if (count < io_size)
500592ffb21SWarner Losh io_size = count;
501592ffb21SWarner Losh
502592ffb21SWarner Losh kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
503592ffb21SWarner Losh kmap_num = kmap_end - kmap_offset + 1;
504592ffb21SWarner Losh
505592ffb21SWarner Losh ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
506592ffb21SWarner Losh
507592ffb21SWarner Losh switch (ret) {
508592ffb21SWarner Losh case 0:
509592ffb21SWarner Losh break;
510592ffb21SWarner Losh case -EBUSY:
511592ffb21SWarner Losh return -EAGAIN;
512592ffb21SWarner Losh default:
513592ffb21SWarner Losh return ret;
514592ffb21SWarner Losh }
515592ffb21SWarner Losh
516592ffb21SWarner Losh ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
517592ffb21SWarner Losh if (unlikely(ret != 0)) {
518592ffb21SWarner Losh ttm_bo_unreserve(bo);
519592ffb21SWarner Losh return ret;
520592ffb21SWarner Losh }
521592ffb21SWarner Losh
522592ffb21SWarner Losh virtual = ttm_kmap_obj_virtual(&map, &dummy);
523592ffb21SWarner Losh virtual += page_offset;
524592ffb21SWarner Losh
525592ffb21SWarner Losh if (write)
526592ffb21SWarner Losh ret = copy_from_user(virtual, wbuf, io_size);
527592ffb21SWarner Losh else
528592ffb21SWarner Losh ret = copy_to_user(rbuf, virtual, io_size);
529592ffb21SWarner Losh
530592ffb21SWarner Losh ttm_bo_kunmap(&map);
531592ffb21SWarner Losh ttm_bo_unreserve(bo);
532592ffb21SWarner Losh ttm_bo_unref(&bo);
533592ffb21SWarner Losh
534592ffb21SWarner Losh if (unlikely(ret != 0))
535592ffb21SWarner Losh return ret;
536592ffb21SWarner Losh
537592ffb21SWarner Losh *f_pos += io_size;
538592ffb21SWarner Losh
539592ffb21SWarner Losh return io_size;
540592ffb21SWarner Losh }
541592ffb21SWarner Losh #endif
542