xref: /freebsd/sys/dev/drm2/ttm/ttm_bo_vm.c (revision 592ffb217505586a6c69e91549a3c14132875f16)
1*592ffb21SWarner Losh /**************************************************************************
2*592ffb21SWarner Losh  *
3*592ffb21SWarner Losh  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4*592ffb21SWarner Losh  * All Rights Reserved.
5*592ffb21SWarner Losh  *
6*592ffb21SWarner Losh  * Permission is hereby granted, free of charge, to any person obtaining a
7*592ffb21SWarner Losh  * copy of this software and associated documentation files (the
8*592ffb21SWarner Losh  * "Software"), to deal in the Software without restriction, including
9*592ffb21SWarner Losh  * without limitation the rights to use, copy, modify, merge, publish,
10*592ffb21SWarner Losh  * distribute, sub license, and/or sell copies of the Software, and to
11*592ffb21SWarner Losh  * permit persons to whom the Software is furnished to do so, subject to
12*592ffb21SWarner Losh  * the following conditions:
13*592ffb21SWarner Losh  *
14*592ffb21SWarner Losh  * The above copyright notice and this permission notice (including the
15*592ffb21SWarner Losh  * next paragraph) shall be included in all copies or substantial portions
16*592ffb21SWarner Losh  * of the Software.
17*592ffb21SWarner Losh  *
18*592ffb21SWarner Losh  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19*592ffb21SWarner Losh  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20*592ffb21SWarner Losh  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21*592ffb21SWarner Losh  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22*592ffb21SWarner Losh  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23*592ffb21SWarner Losh  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24*592ffb21SWarner Losh  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25*592ffb21SWarner Losh  *
26*592ffb21SWarner Losh  **************************************************************************/
27*592ffb21SWarner Losh /*
28*592ffb21SWarner Losh  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29*592ffb21SWarner Losh  */
30*592ffb21SWarner Losh /*
31*592ffb21SWarner Losh  * Copyright (c) 2013 The FreeBSD Foundation
32*592ffb21SWarner Losh  * All rights reserved.
33*592ffb21SWarner Losh  *
34*592ffb21SWarner Losh  * Portions of this software were developed by Konstantin Belousov
35*592ffb21SWarner Losh  * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
36*592ffb21SWarner Losh  */
37*592ffb21SWarner Losh 
38*592ffb21SWarner Losh #include <sys/cdefs.h>
39*592ffb21SWarner Losh __FBSDID("$FreeBSD$");
40*592ffb21SWarner Losh 
41*592ffb21SWarner Losh #include "opt_vm.h"
42*592ffb21SWarner Losh 
43*592ffb21SWarner Losh #include <dev/drm2/drmP.h>
44*592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_module.h>
45*592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_bo_driver.h>
46*592ffb21SWarner Losh #include <dev/drm2/ttm/ttm_placement.h>
47*592ffb21SWarner Losh 
48*592ffb21SWarner Losh #include <vm/vm.h>
49*592ffb21SWarner Losh #include <vm/vm_page.h>
50*592ffb21SWarner Losh #include <vm/vm_pageout.h>
51*592ffb21SWarner Losh 
52*592ffb21SWarner Losh #define TTM_BO_VM_NUM_PREFAULT 16
53*592ffb21SWarner Losh 
54*592ffb21SWarner Losh RB_GENERATE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
55*592ffb21SWarner Losh     ttm_bo_cmp_rb_tree_items);
56*592ffb21SWarner Losh 
57*592ffb21SWarner Losh int
58*592ffb21SWarner Losh ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
59*592ffb21SWarner Losh     struct ttm_buffer_object *b)
60*592ffb21SWarner Losh {
61*592ffb21SWarner Losh 
62*592ffb21SWarner Losh 	if (a->vm_node->start < b->vm_node->start) {
63*592ffb21SWarner Losh 		return (-1);
64*592ffb21SWarner Losh 	} else if (a->vm_node->start > b->vm_node->start) {
65*592ffb21SWarner Losh 		return (1);
66*592ffb21SWarner Losh 	} else {
67*592ffb21SWarner Losh 		return (0);
68*592ffb21SWarner Losh 	}
69*592ffb21SWarner Losh }
70*592ffb21SWarner Losh 
71*592ffb21SWarner Losh static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
72*592ffb21SWarner Losh 						     unsigned long page_start,
73*592ffb21SWarner Losh 						     unsigned long num_pages)
74*592ffb21SWarner Losh {
75*592ffb21SWarner Losh 	unsigned long cur_offset;
76*592ffb21SWarner Losh 	struct ttm_buffer_object *bo;
77*592ffb21SWarner Losh 	struct ttm_buffer_object *best_bo = NULL;
78*592ffb21SWarner Losh 
79*592ffb21SWarner Losh 	bo = RB_ROOT(&bdev->addr_space_rb);
80*592ffb21SWarner Losh 	while (bo != NULL) {
81*592ffb21SWarner Losh 		cur_offset = bo->vm_node->start;
82*592ffb21SWarner Losh 		if (page_start >= cur_offset) {
83*592ffb21SWarner Losh 			best_bo = bo;
84*592ffb21SWarner Losh 			if (page_start == cur_offset)
85*592ffb21SWarner Losh 				break;
86*592ffb21SWarner Losh 			bo = RB_RIGHT(bo, vm_rb);
87*592ffb21SWarner Losh 		} else
88*592ffb21SWarner Losh 			bo = RB_LEFT(bo, vm_rb);
89*592ffb21SWarner Losh 	}
90*592ffb21SWarner Losh 
91*592ffb21SWarner Losh 	if (unlikely(best_bo == NULL))
92*592ffb21SWarner Losh 		return NULL;
93*592ffb21SWarner Losh 
94*592ffb21SWarner Losh 	if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
95*592ffb21SWarner Losh 		     (page_start + num_pages)))
96*592ffb21SWarner Losh 		return NULL;
97*592ffb21SWarner Losh 
98*592ffb21SWarner Losh 	return best_bo;
99*592ffb21SWarner Losh }
100*592ffb21SWarner Losh 
101*592ffb21SWarner Losh static int
102*592ffb21SWarner Losh ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
103*592ffb21SWarner Losh     int prot, vm_page_t *mres)
104*592ffb21SWarner Losh {
105*592ffb21SWarner Losh 
106*592ffb21SWarner Losh 	struct ttm_buffer_object *bo = vm_obj->handle;
107*592ffb21SWarner Losh 	struct ttm_bo_device *bdev = bo->bdev;
108*592ffb21SWarner Losh 	struct ttm_tt *ttm = NULL;
109*592ffb21SWarner Losh 	vm_page_t m, m1;
110*592ffb21SWarner Losh 	int ret;
111*592ffb21SWarner Losh 	int retval = VM_PAGER_OK;
112*592ffb21SWarner Losh 	struct ttm_mem_type_manager *man =
113*592ffb21SWarner Losh 		&bdev->man[bo->mem.mem_type];
114*592ffb21SWarner Losh 
115*592ffb21SWarner Losh 	vm_object_pip_add(vm_obj, 1);
116*592ffb21SWarner Losh 	if (*mres != NULL) {
117*592ffb21SWarner Losh 		vm_page_lock(*mres);
118*592ffb21SWarner Losh 		vm_page_remove(*mres);
119*592ffb21SWarner Losh 		vm_page_unlock(*mres);
120*592ffb21SWarner Losh 	}
121*592ffb21SWarner Losh retry:
122*592ffb21SWarner Losh 	VM_OBJECT_WUNLOCK(vm_obj);
123*592ffb21SWarner Losh 	m = NULL;
124*592ffb21SWarner Losh 
125*592ffb21SWarner Losh reserve:
126*592ffb21SWarner Losh 	ret = ttm_bo_reserve(bo, false, false, false, 0);
127*592ffb21SWarner Losh 	if (unlikely(ret != 0)) {
128*592ffb21SWarner Losh 		if (ret == -EBUSY) {
129*592ffb21SWarner Losh 			kern_yield(PRI_USER);
130*592ffb21SWarner Losh 			goto reserve;
131*592ffb21SWarner Losh 		}
132*592ffb21SWarner Losh 	}
133*592ffb21SWarner Losh 
134*592ffb21SWarner Losh 	if (bdev->driver->fault_reserve_notify) {
135*592ffb21SWarner Losh 		ret = bdev->driver->fault_reserve_notify(bo);
136*592ffb21SWarner Losh 		switch (ret) {
137*592ffb21SWarner Losh 		case 0:
138*592ffb21SWarner Losh 			break;
139*592ffb21SWarner Losh 		case -EBUSY:
140*592ffb21SWarner Losh 		case -ERESTARTSYS:
141*592ffb21SWarner Losh 		case -EINTR:
142*592ffb21SWarner Losh 			kern_yield(PRI_USER);
143*592ffb21SWarner Losh 			goto reserve;
144*592ffb21SWarner Losh 		default:
145*592ffb21SWarner Losh 			retval = VM_PAGER_ERROR;
146*592ffb21SWarner Losh 			goto out_unlock;
147*592ffb21SWarner Losh 		}
148*592ffb21SWarner Losh 	}
149*592ffb21SWarner Losh 
150*592ffb21SWarner Losh 	/*
151*592ffb21SWarner Losh 	 * Wait for buffer data in transit, due to a pipelined
152*592ffb21SWarner Losh 	 * move.
153*592ffb21SWarner Losh 	 */
154*592ffb21SWarner Losh 
155*592ffb21SWarner Losh 	mtx_lock(&bdev->fence_lock);
156*592ffb21SWarner Losh 	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
157*592ffb21SWarner Losh 		/*
158*592ffb21SWarner Losh 		 * Here, the behavior differs between Linux and FreeBSD.
159*592ffb21SWarner Losh 		 *
160*592ffb21SWarner Losh 		 * On Linux, the wait is interruptible (3rd argument to
161*592ffb21SWarner Losh 		 * ttm_bo_wait). There must be some mechanism to resume
162*592ffb21SWarner Losh 		 * page fault handling, once the signal is processed.
163*592ffb21SWarner Losh 		 *
164*592ffb21SWarner Losh 		 * On FreeBSD, the wait is uninteruptible. This is not a
165*592ffb21SWarner Losh 		 * problem as we can't end up with an unkillable process
166*592ffb21SWarner Losh 		 * here, because the wait will eventually time out.
167*592ffb21SWarner Losh 		 *
168*592ffb21SWarner Losh 		 * An example of this situation is the Xorg process
169*592ffb21SWarner Losh 		 * which uses SIGALRM internally. The signal could
170*592ffb21SWarner Losh 		 * interrupt the wait, causing the page fault to fail
171*592ffb21SWarner Losh 		 * and the process to receive SIGSEGV.
172*592ffb21SWarner Losh 		 */
173*592ffb21SWarner Losh 		ret = ttm_bo_wait(bo, false, false, false);
174*592ffb21SWarner Losh 		mtx_unlock(&bdev->fence_lock);
175*592ffb21SWarner Losh 		if (unlikely(ret != 0)) {
176*592ffb21SWarner Losh 			retval = VM_PAGER_ERROR;
177*592ffb21SWarner Losh 			goto out_unlock;
178*592ffb21SWarner Losh 		}
179*592ffb21SWarner Losh 	} else
180*592ffb21SWarner Losh 		mtx_unlock(&bdev->fence_lock);
181*592ffb21SWarner Losh 
182*592ffb21SWarner Losh 	ret = ttm_mem_io_lock(man, true);
183*592ffb21SWarner Losh 	if (unlikely(ret != 0)) {
184*592ffb21SWarner Losh 		retval = VM_PAGER_ERROR;
185*592ffb21SWarner Losh 		goto out_unlock;
186*592ffb21SWarner Losh 	}
187*592ffb21SWarner Losh 	ret = ttm_mem_io_reserve_vm(bo);
188*592ffb21SWarner Losh 	if (unlikely(ret != 0)) {
189*592ffb21SWarner Losh 		retval = VM_PAGER_ERROR;
190*592ffb21SWarner Losh 		goto out_io_unlock;
191*592ffb21SWarner Losh 	}
192*592ffb21SWarner Losh 
193*592ffb21SWarner Losh 	/*
194*592ffb21SWarner Losh 	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
195*592ffb21SWarner Losh 	 * since the mmap_sem is only held in read mode. However, we
196*592ffb21SWarner Losh 	 * modify only the caching bits of vma->vm_page_prot and
197*592ffb21SWarner Losh 	 * consider those bits protected by
198*592ffb21SWarner Losh 	 * the bo->mutex, as we should be the only writers.
199*592ffb21SWarner Losh 	 * There shouldn't really be any readers of these bits except
200*592ffb21SWarner Losh 	 * within vm_insert_mixed()? fork?
201*592ffb21SWarner Losh 	 *
202*592ffb21SWarner Losh 	 * TODO: Add a list of vmas to the bo, and change the
203*592ffb21SWarner Losh 	 * vma->vm_page_prot when the object changes caching policy, with
204*592ffb21SWarner Losh 	 * the correct locks held.
205*592ffb21SWarner Losh 	 */
206*592ffb21SWarner Losh 	if (!bo->mem.bus.is_iomem) {
207*592ffb21SWarner Losh 		/* Allocate all page at once, most common usage */
208*592ffb21SWarner Losh 		ttm = bo->ttm;
209*592ffb21SWarner Losh 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
210*592ffb21SWarner Losh 			retval = VM_PAGER_ERROR;
211*592ffb21SWarner Losh 			goto out_io_unlock;
212*592ffb21SWarner Losh 		}
213*592ffb21SWarner Losh 	}
214*592ffb21SWarner Losh 
215*592ffb21SWarner Losh 	if (bo->mem.bus.is_iomem) {
216*592ffb21SWarner Losh 		m = PHYS_TO_VM_PAGE(bo->mem.bus.base + bo->mem.bus.offset +
217*592ffb21SWarner Losh 		    offset);
218*592ffb21SWarner Losh 		KASSERT((m->flags & PG_FICTITIOUS) != 0,
219*592ffb21SWarner Losh 		    ("physical address %#jx not fictitious",
220*592ffb21SWarner Losh 		    (uintmax_t)(bo->mem.bus.base + bo->mem.bus.offset
221*592ffb21SWarner Losh 		    + offset)));
222*592ffb21SWarner Losh 		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
223*592ffb21SWarner Losh 	} else {
224*592ffb21SWarner Losh 		ttm = bo->ttm;
225*592ffb21SWarner Losh 		m = ttm->pages[OFF_TO_IDX(offset)];
226*592ffb21SWarner Losh 		if (unlikely(!m)) {
227*592ffb21SWarner Losh 			retval = VM_PAGER_ERROR;
228*592ffb21SWarner Losh 			goto out_io_unlock;
229*592ffb21SWarner Losh 		}
230*592ffb21SWarner Losh 		pmap_page_set_memattr(m,
231*592ffb21SWarner Losh 		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
232*592ffb21SWarner Losh 		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
233*592ffb21SWarner Losh 	}
234*592ffb21SWarner Losh 
235*592ffb21SWarner Losh 	VM_OBJECT_WLOCK(vm_obj);
236*592ffb21SWarner Losh 	if (vm_page_busied(m)) {
237*592ffb21SWarner Losh 		vm_page_lock(m);
238*592ffb21SWarner Losh 		VM_OBJECT_WUNLOCK(vm_obj);
239*592ffb21SWarner Losh 		vm_page_busy_sleep(m, "ttmpbs", false);
240*592ffb21SWarner Losh 		VM_OBJECT_WLOCK(vm_obj);
241*592ffb21SWarner Losh 		ttm_mem_io_unlock(man);
242*592ffb21SWarner Losh 		ttm_bo_unreserve(bo);
243*592ffb21SWarner Losh 		goto retry;
244*592ffb21SWarner Losh 	}
245*592ffb21SWarner Losh 	m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
246*592ffb21SWarner Losh 	if (m1 == NULL) {
247*592ffb21SWarner Losh 		if (vm_page_insert(m, vm_obj, OFF_TO_IDX(offset))) {
248*592ffb21SWarner Losh 			VM_OBJECT_WUNLOCK(vm_obj);
249*592ffb21SWarner Losh 			vm_wait(vm_obj);
250*592ffb21SWarner Losh 			VM_OBJECT_WLOCK(vm_obj);
251*592ffb21SWarner Losh 			ttm_mem_io_unlock(man);
252*592ffb21SWarner Losh 			ttm_bo_unreserve(bo);
253*592ffb21SWarner Losh 			goto retry;
254*592ffb21SWarner Losh 		}
255*592ffb21SWarner Losh 	} else {
256*592ffb21SWarner Losh 		KASSERT(m == m1,
257*592ffb21SWarner Losh 		    ("inconsistent insert bo %p m %p m1 %p offset %jx",
258*592ffb21SWarner Losh 		    bo, m, m1, (uintmax_t)offset));
259*592ffb21SWarner Losh 	}
260*592ffb21SWarner Losh 	m->valid = VM_PAGE_BITS_ALL;
261*592ffb21SWarner Losh 	vm_page_xbusy(m);
262*592ffb21SWarner Losh 	if (*mres != NULL) {
263*592ffb21SWarner Losh 		KASSERT(*mres != m, ("losing %p %p", *mres, m));
264*592ffb21SWarner Losh 		vm_page_lock(*mres);
265*592ffb21SWarner Losh 		vm_page_free(*mres);
266*592ffb21SWarner Losh 		vm_page_unlock(*mres);
267*592ffb21SWarner Losh 	}
268*592ffb21SWarner Losh 	*mres = m;
269*592ffb21SWarner Losh 
270*592ffb21SWarner Losh out_io_unlock1:
271*592ffb21SWarner Losh 	ttm_mem_io_unlock(man);
272*592ffb21SWarner Losh out_unlock1:
273*592ffb21SWarner Losh 	ttm_bo_unreserve(bo);
274*592ffb21SWarner Losh 	vm_object_pip_wakeup(vm_obj);
275*592ffb21SWarner Losh 	return (retval);
276*592ffb21SWarner Losh 
277*592ffb21SWarner Losh out_io_unlock:
278*592ffb21SWarner Losh 	VM_OBJECT_WLOCK(vm_obj);
279*592ffb21SWarner Losh 	goto out_io_unlock1;
280*592ffb21SWarner Losh 
281*592ffb21SWarner Losh out_unlock:
282*592ffb21SWarner Losh 	VM_OBJECT_WLOCK(vm_obj);
283*592ffb21SWarner Losh 	goto out_unlock1;
284*592ffb21SWarner Losh }
285*592ffb21SWarner Losh 
286*592ffb21SWarner Losh static int
287*592ffb21SWarner Losh ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
288*592ffb21SWarner Losh     vm_ooffset_t foff, struct ucred *cred, u_short *color)
289*592ffb21SWarner Losh {
290*592ffb21SWarner Losh 
291*592ffb21SWarner Losh 	/*
292*592ffb21SWarner Losh 	 * On Linux, a reference to the buffer object is acquired here.
293*592ffb21SWarner Losh 	 * The reason is that this function is not called when the
294*592ffb21SWarner Losh 	 * mmap() is initialized, but only when a process forks for
295*592ffb21SWarner Losh 	 * instance. Therefore on Linux, the reference on the bo is
296*592ffb21SWarner Losh 	 * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's
297*592ffb21SWarner Losh 	 * then released in ttm_bo_vm_close().
298*592ffb21SWarner Losh 	 *
299*592ffb21SWarner Losh 	 * Here, this function is called during mmap() initialization.
300*592ffb21SWarner Losh 	 * Thus, the reference acquired in ttm_bo_mmap_single() is
301*592ffb21SWarner Losh 	 * sufficient.
302*592ffb21SWarner Losh 	 */
303*592ffb21SWarner Losh 
304*592ffb21SWarner Losh 	*color = 0;
305*592ffb21SWarner Losh 	return (0);
306*592ffb21SWarner Losh }
307*592ffb21SWarner Losh 
308*592ffb21SWarner Losh static void
309*592ffb21SWarner Losh ttm_bo_vm_dtor(void *handle)
310*592ffb21SWarner Losh {
311*592ffb21SWarner Losh 	struct ttm_buffer_object *bo = handle;
312*592ffb21SWarner Losh 
313*592ffb21SWarner Losh 	ttm_bo_unref(&bo);
314*592ffb21SWarner Losh }
315*592ffb21SWarner Losh 
316*592ffb21SWarner Losh static struct cdev_pager_ops ttm_pager_ops = {
317*592ffb21SWarner Losh 	.cdev_pg_fault = ttm_bo_vm_fault,
318*592ffb21SWarner Losh 	.cdev_pg_ctor = ttm_bo_vm_ctor,
319*592ffb21SWarner Losh 	.cdev_pg_dtor = ttm_bo_vm_dtor
320*592ffb21SWarner Losh };
321*592ffb21SWarner Losh 
322*592ffb21SWarner Losh int
323*592ffb21SWarner Losh ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size,
324*592ffb21SWarner Losh     struct vm_object **obj_res, int nprot)
325*592ffb21SWarner Losh {
326*592ffb21SWarner Losh 	struct ttm_bo_driver *driver;
327*592ffb21SWarner Losh 	struct ttm_buffer_object *bo;
328*592ffb21SWarner Losh 	struct vm_object *vm_obj;
329*592ffb21SWarner Losh 	int ret;
330*592ffb21SWarner Losh 
331*592ffb21SWarner Losh 	rw_wlock(&bdev->vm_lock);
332*592ffb21SWarner Losh 	bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size));
333*592ffb21SWarner Losh 	if (likely(bo != NULL))
334*592ffb21SWarner Losh 		refcount_acquire(&bo->kref);
335*592ffb21SWarner Losh 	rw_wunlock(&bdev->vm_lock);
336*592ffb21SWarner Losh 
337*592ffb21SWarner Losh 	if (unlikely(bo == NULL)) {
338*592ffb21SWarner Losh 		printf("[TTM] Could not find buffer object to map\n");
339*592ffb21SWarner Losh 		return (-EINVAL);
340*592ffb21SWarner Losh 	}
341*592ffb21SWarner Losh 
342*592ffb21SWarner Losh 	driver = bo->bdev->driver;
343*592ffb21SWarner Losh 	if (unlikely(!driver->verify_access)) {
344*592ffb21SWarner Losh 		ret = -EPERM;
345*592ffb21SWarner Losh 		goto out_unref;
346*592ffb21SWarner Losh 	}
347*592ffb21SWarner Losh 	ret = driver->verify_access(bo);
348*592ffb21SWarner Losh 	if (unlikely(ret != 0))
349*592ffb21SWarner Losh 		goto out_unref;
350*592ffb21SWarner Losh 
351*592ffb21SWarner Losh 	vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops,
352*592ffb21SWarner Losh 	    size, nprot, 0, curthread->td_ucred);
353*592ffb21SWarner Losh 	if (vm_obj == NULL) {
354*592ffb21SWarner Losh 		ret = -EINVAL;
355*592ffb21SWarner Losh 		goto out_unref;
356*592ffb21SWarner Losh 	}
357*592ffb21SWarner Losh 	/*
358*592ffb21SWarner Losh 	 * Note: We're transferring the bo reference to vm_obj->handle here.
359*592ffb21SWarner Losh 	 */
360*592ffb21SWarner Losh 	*offset = 0;
361*592ffb21SWarner Losh 	*obj_res = vm_obj;
362*592ffb21SWarner Losh 	return 0;
363*592ffb21SWarner Losh out_unref:
364*592ffb21SWarner Losh 	ttm_bo_unref(&bo);
365*592ffb21SWarner Losh 	return ret;
366*592ffb21SWarner Losh }
367*592ffb21SWarner Losh 
368*592ffb21SWarner Losh void
369*592ffb21SWarner Losh ttm_bo_release_mmap(struct ttm_buffer_object *bo)
370*592ffb21SWarner Losh {
371*592ffb21SWarner Losh 	vm_object_t vm_obj;
372*592ffb21SWarner Losh 	vm_page_t m;
373*592ffb21SWarner Losh 	int i;
374*592ffb21SWarner Losh 
375*592ffb21SWarner Losh 	vm_obj = cdev_pager_lookup(bo);
376*592ffb21SWarner Losh 	if (vm_obj == NULL)
377*592ffb21SWarner Losh 		return;
378*592ffb21SWarner Losh 
379*592ffb21SWarner Losh 	VM_OBJECT_WLOCK(vm_obj);
380*592ffb21SWarner Losh retry:
381*592ffb21SWarner Losh 	for (i = 0; i < bo->num_pages; i++) {
382*592ffb21SWarner Losh 		m = vm_page_lookup(vm_obj, i);
383*592ffb21SWarner Losh 		if (m == NULL)
384*592ffb21SWarner Losh 			continue;
385*592ffb21SWarner Losh 		if (vm_page_sleep_if_busy(m, "ttm_unm"))
386*592ffb21SWarner Losh 			goto retry;
387*592ffb21SWarner Losh 		cdev_pager_free_page(vm_obj, m);
388*592ffb21SWarner Losh 	}
389*592ffb21SWarner Losh 	VM_OBJECT_WUNLOCK(vm_obj);
390*592ffb21SWarner Losh 
391*592ffb21SWarner Losh 	vm_object_deallocate(vm_obj);
392*592ffb21SWarner Losh }
393*592ffb21SWarner Losh 
394*592ffb21SWarner Losh #if 0
395*592ffb21SWarner Losh int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
396*592ffb21SWarner Losh {
397*592ffb21SWarner Losh 	if (vma->vm_pgoff != 0)
398*592ffb21SWarner Losh 		return -EACCES;
399*592ffb21SWarner Losh 
400*592ffb21SWarner Losh 	vma->vm_ops = &ttm_bo_vm_ops;
401*592ffb21SWarner Losh 	vma->vm_private_data = ttm_bo_reference(bo);
402*592ffb21SWarner Losh 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
403*592ffb21SWarner Losh 	return 0;
404*592ffb21SWarner Losh }
405*592ffb21SWarner Losh 
406*592ffb21SWarner Losh ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
407*592ffb21SWarner Losh 		  const char __user *wbuf, char __user *rbuf, size_t count,
408*592ffb21SWarner Losh 		  loff_t *f_pos, bool write)
409*592ffb21SWarner Losh {
410*592ffb21SWarner Losh 	struct ttm_buffer_object *bo;
411*592ffb21SWarner Losh 	struct ttm_bo_driver *driver;
412*592ffb21SWarner Losh 	struct ttm_bo_kmap_obj map;
413*592ffb21SWarner Losh 	unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
414*592ffb21SWarner Losh 	unsigned long kmap_offset;
415*592ffb21SWarner Losh 	unsigned long kmap_end;
416*592ffb21SWarner Losh 	unsigned long kmap_num;
417*592ffb21SWarner Losh 	size_t io_size;
418*592ffb21SWarner Losh 	unsigned int page_offset;
419*592ffb21SWarner Losh 	char *virtual;
420*592ffb21SWarner Losh 	int ret;
421*592ffb21SWarner Losh 	bool no_wait = false;
422*592ffb21SWarner Losh 	bool dummy;
423*592ffb21SWarner Losh 
424*592ffb21SWarner Losh 	read_lock(&bdev->vm_lock);
425*592ffb21SWarner Losh 	bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
426*592ffb21SWarner Losh 	if (likely(bo != NULL))
427*592ffb21SWarner Losh 		ttm_bo_reference(bo);
428*592ffb21SWarner Losh 	read_unlock(&bdev->vm_lock);
429*592ffb21SWarner Losh 
430*592ffb21SWarner Losh 	if (unlikely(bo == NULL))
431*592ffb21SWarner Losh 		return -EFAULT;
432*592ffb21SWarner Losh 
433*592ffb21SWarner Losh 	driver = bo->bdev->driver;
434*592ffb21SWarner Losh 	if (unlikely(!driver->verify_access)) {
435*592ffb21SWarner Losh 		ret = -EPERM;
436*592ffb21SWarner Losh 		goto out_unref;
437*592ffb21SWarner Losh 	}
438*592ffb21SWarner Losh 
439*592ffb21SWarner Losh 	ret = driver->verify_access(bo, filp);
440*592ffb21SWarner Losh 	if (unlikely(ret != 0))
441*592ffb21SWarner Losh 		goto out_unref;
442*592ffb21SWarner Losh 
443*592ffb21SWarner Losh 	kmap_offset = dev_offset - bo->vm_node->start;
444*592ffb21SWarner Losh 	if (unlikely(kmap_offset >= bo->num_pages)) {
445*592ffb21SWarner Losh 		ret = -EFBIG;
446*592ffb21SWarner Losh 		goto out_unref;
447*592ffb21SWarner Losh 	}
448*592ffb21SWarner Losh 
449*592ffb21SWarner Losh 	page_offset = *f_pos & ~PAGE_MASK;
450*592ffb21SWarner Losh 	io_size = bo->num_pages - kmap_offset;
451*592ffb21SWarner Losh 	io_size = (io_size << PAGE_SHIFT) - page_offset;
452*592ffb21SWarner Losh 	if (count < io_size)
453*592ffb21SWarner Losh 		io_size = count;
454*592ffb21SWarner Losh 
455*592ffb21SWarner Losh 	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
456*592ffb21SWarner Losh 	kmap_num = kmap_end - kmap_offset + 1;
457*592ffb21SWarner Losh 
458*592ffb21SWarner Losh 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
459*592ffb21SWarner Losh 
460*592ffb21SWarner Losh 	switch (ret) {
461*592ffb21SWarner Losh 	case 0:
462*592ffb21SWarner Losh 		break;
463*592ffb21SWarner Losh 	case -EBUSY:
464*592ffb21SWarner Losh 		ret = -EAGAIN;
465*592ffb21SWarner Losh 		goto out_unref;
466*592ffb21SWarner Losh 	default:
467*592ffb21SWarner Losh 		goto out_unref;
468*592ffb21SWarner Losh 	}
469*592ffb21SWarner Losh 
470*592ffb21SWarner Losh 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
471*592ffb21SWarner Losh 	if (unlikely(ret != 0)) {
472*592ffb21SWarner Losh 		ttm_bo_unreserve(bo);
473*592ffb21SWarner Losh 		goto out_unref;
474*592ffb21SWarner Losh 	}
475*592ffb21SWarner Losh 
476*592ffb21SWarner Losh 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
477*592ffb21SWarner Losh 	virtual += page_offset;
478*592ffb21SWarner Losh 
479*592ffb21SWarner Losh 	if (write)
480*592ffb21SWarner Losh 		ret = copy_from_user(virtual, wbuf, io_size);
481*592ffb21SWarner Losh 	else
482*592ffb21SWarner Losh 		ret = copy_to_user(rbuf, virtual, io_size);
483*592ffb21SWarner Losh 
484*592ffb21SWarner Losh 	ttm_bo_kunmap(&map);
485*592ffb21SWarner Losh 	ttm_bo_unreserve(bo);
486*592ffb21SWarner Losh 	ttm_bo_unref(&bo);
487*592ffb21SWarner Losh 
488*592ffb21SWarner Losh 	if (unlikely(ret != 0))
489*592ffb21SWarner Losh 		return -EFBIG;
490*592ffb21SWarner Losh 
491*592ffb21SWarner Losh 	*f_pos += io_size;
492*592ffb21SWarner Losh 
493*592ffb21SWarner Losh 	return io_size;
494*592ffb21SWarner Losh out_unref:
495*592ffb21SWarner Losh 	ttm_bo_unref(&bo);
496*592ffb21SWarner Losh 	return ret;
497*592ffb21SWarner Losh }
498*592ffb21SWarner Losh 
499*592ffb21SWarner Losh ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
500*592ffb21SWarner Losh 			char __user *rbuf, size_t count, loff_t *f_pos,
501*592ffb21SWarner Losh 			bool write)
502*592ffb21SWarner Losh {
503*592ffb21SWarner Losh 	struct ttm_bo_kmap_obj map;
504*592ffb21SWarner Losh 	unsigned long kmap_offset;
505*592ffb21SWarner Losh 	unsigned long kmap_end;
506*592ffb21SWarner Losh 	unsigned long kmap_num;
507*592ffb21SWarner Losh 	size_t io_size;
508*592ffb21SWarner Losh 	unsigned int page_offset;
509*592ffb21SWarner Losh 	char *virtual;
510*592ffb21SWarner Losh 	int ret;
511*592ffb21SWarner Losh 	bool no_wait = false;
512*592ffb21SWarner Losh 	bool dummy;
513*592ffb21SWarner Losh 
514*592ffb21SWarner Losh 	kmap_offset = (*f_pos >> PAGE_SHIFT);
515*592ffb21SWarner Losh 	if (unlikely(kmap_offset >= bo->num_pages))
516*592ffb21SWarner Losh 		return -EFBIG;
517*592ffb21SWarner Losh 
518*592ffb21SWarner Losh 	page_offset = *f_pos & ~PAGE_MASK;
519*592ffb21SWarner Losh 	io_size = bo->num_pages - kmap_offset;
520*592ffb21SWarner Losh 	io_size = (io_size << PAGE_SHIFT) - page_offset;
521*592ffb21SWarner Losh 	if (count < io_size)
522*592ffb21SWarner Losh 		io_size = count;
523*592ffb21SWarner Losh 
524*592ffb21SWarner Losh 	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
525*592ffb21SWarner Losh 	kmap_num = kmap_end - kmap_offset + 1;
526*592ffb21SWarner Losh 
527*592ffb21SWarner Losh 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
528*592ffb21SWarner Losh 
529*592ffb21SWarner Losh 	switch (ret) {
530*592ffb21SWarner Losh 	case 0:
531*592ffb21SWarner Losh 		break;
532*592ffb21SWarner Losh 	case -EBUSY:
533*592ffb21SWarner Losh 		return -EAGAIN;
534*592ffb21SWarner Losh 	default:
535*592ffb21SWarner Losh 		return ret;
536*592ffb21SWarner Losh 	}
537*592ffb21SWarner Losh 
538*592ffb21SWarner Losh 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
539*592ffb21SWarner Losh 	if (unlikely(ret != 0)) {
540*592ffb21SWarner Losh 		ttm_bo_unreserve(bo);
541*592ffb21SWarner Losh 		return ret;
542*592ffb21SWarner Losh 	}
543*592ffb21SWarner Losh 
544*592ffb21SWarner Losh 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
545*592ffb21SWarner Losh 	virtual += page_offset;
546*592ffb21SWarner Losh 
547*592ffb21SWarner Losh 	if (write)
548*592ffb21SWarner Losh 		ret = copy_from_user(virtual, wbuf, io_size);
549*592ffb21SWarner Losh 	else
550*592ffb21SWarner Losh 		ret = copy_to_user(rbuf, virtual, io_size);
551*592ffb21SWarner Losh 
552*592ffb21SWarner Losh 	ttm_bo_kunmap(&map);
553*592ffb21SWarner Losh 	ttm_bo_unreserve(bo);
554*592ffb21SWarner Losh 	ttm_bo_unref(&bo);
555*592ffb21SWarner Losh 
556*592ffb21SWarner Losh 	if (unlikely(ret != 0))
557*592ffb21SWarner Losh 		return ret;
558*592ffb21SWarner Losh 
559*592ffb21SWarner Losh 	*f_pos += io_size;
560*592ffb21SWarner Losh 
561*592ffb21SWarner Losh 	return io_size;
562*592ffb21SWarner Losh }
563*592ffb21SWarner Losh #endif
564