xref: /freebsd/sys/dev/drm2/ttm/ttm_bo_vm.c (revision 595e514d0df2bac5b813d35f83e32875dbf16a83)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 /*
31  * Copyright (c) 2013 The FreeBSD Foundation
32  * All rights reserved.
33  *
34  * Portions of this software were developed by Konstantin Belousov
35  * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include "opt_vm.h"
42 
43 #include <dev/drm2/drmP.h>
44 #include <dev/drm2/ttm/ttm_module.h>
45 #include <dev/drm2/ttm/ttm_bo_driver.h>
46 #include <dev/drm2/ttm/ttm_placement.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_page.h>
50 
51 #define TTM_BO_VM_NUM_PREFAULT 16
52 
53 RB_GENERATE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
54     ttm_bo_cmp_rb_tree_items);
55 
56 int
57 ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
58     struct ttm_buffer_object *b)
59 {
60 
61 	if (a->vm_node->start < b->vm_node->start) {
62 		return (-1);
63 	} else if (a->vm_node->start > b->vm_node->start) {
64 		return (1);
65 	} else {
66 		return (0);
67 	}
68 }
69 
70 static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
71 						     unsigned long page_start,
72 						     unsigned long num_pages)
73 {
74 	unsigned long cur_offset;
75 	struct ttm_buffer_object *bo;
76 	struct ttm_buffer_object *best_bo = NULL;
77 
78 	RB_FOREACH(bo, ttm_bo_device_buffer_objects, &bdev->addr_space_rb) {
79 		cur_offset = bo->vm_node->start;
80 		if (page_start >= cur_offset) {
81 			best_bo = bo;
82 			if (page_start == cur_offset)
83 				break;
84 		}
85 	}
86 
87 	if (unlikely(best_bo == NULL))
88 		return NULL;
89 
90 	if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
91 		     (page_start + num_pages)))
92 		return NULL;
93 
94 	return best_bo;
95 }
96 
97 static int
98 ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
99     int prot, vm_page_t *mres)
100 {
101 
102 	struct ttm_buffer_object *bo = vm_obj->handle;
103 	struct ttm_bo_device *bdev = bo->bdev;
104 	struct ttm_tt *ttm = NULL;
105 	vm_page_t m, oldm;
106 	int ret;
107 	int retval = VM_PAGER_OK;
108 	struct ttm_mem_type_manager *man =
109 		&bdev->man[bo->mem.mem_type];
110 
111 	vm_object_pip_add(vm_obj, 1);
112 	oldm = *mres;
113 	if (oldm != NULL) {
114 		vm_page_lock(oldm);
115 		vm_page_remove(oldm);
116 		vm_page_unlock(oldm);
117 		*mres = NULL;
118 	} else
119 		oldm = NULL;
120 retry:
121 	VM_OBJECT_WUNLOCK(vm_obj);
122 	m = NULL;
123 
124 reserve:
125 	ret = ttm_bo_reserve(bo, false, false, false, 0);
126 	if (unlikely(ret != 0)) {
127 		if (ret == -EBUSY) {
128 			kern_yield(0);
129 			goto reserve;
130 		}
131 	}
132 
133 	if (bdev->driver->fault_reserve_notify) {
134 		ret = bdev->driver->fault_reserve_notify(bo);
135 		switch (ret) {
136 		case 0:
137 			break;
138 		case -EBUSY:
139 		case -ERESTART:
140 		case -EINTR:
141 			kern_yield(0);
142 			goto reserve;
143 		default:
144 			retval = VM_PAGER_ERROR;
145 			goto out_unlock;
146 		}
147 	}
148 
149 	/*
150 	 * Wait for buffer data in transit, due to a pipelined
151 	 * move.
152 	 */
153 
154 	mtx_lock(&bdev->fence_lock);
155 	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
156 		ret = ttm_bo_wait(bo, false, true, false);
157 		mtx_unlock(&bdev->fence_lock);
158 		if (unlikely(ret != 0)) {
159 			retval = VM_PAGER_ERROR;
160 			goto out_unlock;
161 		}
162 	} else
163 		mtx_unlock(&bdev->fence_lock);
164 
165 	ret = ttm_mem_io_lock(man, true);
166 	if (unlikely(ret != 0)) {
167 		retval = VM_PAGER_ERROR;
168 		goto out_unlock;
169 	}
170 	ret = ttm_mem_io_reserve_vm(bo);
171 	if (unlikely(ret != 0)) {
172 		retval = VM_PAGER_ERROR;
173 		goto out_io_unlock;
174 	}
175 
176 	/*
177 	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
178 	 * since the mmap_sem is only held in read mode. However, we
179 	 * modify only the caching bits of vma->vm_page_prot and
180 	 * consider those bits protected by
181 	 * the bo->mutex, as we should be the only writers.
182 	 * There shouldn't really be any readers of these bits except
183 	 * within vm_insert_mixed()? fork?
184 	 *
185 	 * TODO: Add a list of vmas to the bo, and change the
186 	 * vma->vm_page_prot when the object changes caching policy, with
187 	 * the correct locks held.
188 	 */
189 	if (!bo->mem.bus.is_iomem) {
190 		/* Allocate all page at once, most common usage */
191 		ttm = bo->ttm;
192 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
193 			retval = VM_PAGER_ERROR;
194 			goto out_io_unlock;
195 		}
196 	}
197 
198 	if (bo->mem.bus.is_iomem) {
199 		m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
200 		    bo->mem.bus.offset + offset);
201 		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
202 	} else {
203 		ttm = bo->ttm;
204 		m = ttm->pages[OFF_TO_IDX(offset)];
205 		if (unlikely(!m)) {
206 			retval = VM_PAGER_ERROR;
207 			goto out_io_unlock;
208 		}
209 		pmap_page_set_memattr(m,
210 		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
211 		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
212 	}
213 
214 	VM_OBJECT_WLOCK(vm_obj);
215 	if ((m->flags & VPO_BUSY) != 0) {
216 		vm_page_sleep(m, "ttmpbs");
217 		ttm_mem_io_unlock(man);
218 		ttm_bo_unreserve(bo);
219 		goto retry;
220 	}
221 	m->valid = VM_PAGE_BITS_ALL;
222 	*mres = m;
223 	vm_page_lock(m);
224 	vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
225 	vm_page_unlock(m);
226 	vm_page_busy(m);
227 
228 	if (oldm != NULL) {
229 		vm_page_lock(oldm);
230 		vm_page_free(oldm);
231 		vm_page_unlock(oldm);
232 	}
233 
234 out_io_unlock1:
235 	ttm_mem_io_unlock(man);
236 out_unlock1:
237 	ttm_bo_unreserve(bo);
238 	vm_object_pip_wakeup(vm_obj);
239 	return (retval);
240 
241 out_io_unlock:
242 	VM_OBJECT_WLOCK(vm_obj);
243 	goto out_io_unlock1;
244 
245 out_unlock:
246 	VM_OBJECT_WLOCK(vm_obj);
247 	goto out_unlock1;
248 }
249 
250 static int
251 ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
252     vm_ooffset_t foff, struct ucred *cred, u_short *color)
253 {
254 
255 	/*
256 	 * We don't acquire a reference on bo->kref here, because it was
257 	 * already done in ttm_bo_mmap_single().
258 	 */
259 
260 	*color = 0;
261 	return (0);
262 }
263 
264 static void
265 ttm_bo_vm_dtor(void *handle)
266 {
267 	struct ttm_buffer_object *bo = handle;
268 
269 	ttm_bo_unref(&bo);
270 }
271 
272 static struct cdev_pager_ops ttm_pager_ops = {
273 	.cdev_pg_fault = ttm_bo_vm_fault,
274 	.cdev_pg_ctor = ttm_bo_vm_ctor,
275 	.cdev_pg_dtor = ttm_bo_vm_dtor
276 };
277 
278 int
279 ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size,
280     struct vm_object **obj_res, int nprot)
281 {
282 	struct ttm_bo_driver *driver;
283 	struct ttm_buffer_object *bo;
284 	struct vm_object *vm_obj;
285 	int ret;
286 
287 	rw_wlock(&bdev->vm_lock);
288 	bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size));
289 	if (likely(bo != NULL))
290 		refcount_acquire(&bo->kref);
291 	rw_wunlock(&bdev->vm_lock);
292 
293 	if (unlikely(bo == NULL)) {
294 		printf("[TTM] Could not find buffer object to map\n");
295 		return (EINVAL);
296 	}
297 
298 	driver = bo->bdev->driver;
299 	if (unlikely(!driver->verify_access)) {
300 		ret = EPERM;
301 		goto out_unref;
302 	}
303 	ret = -driver->verify_access(bo);
304 	if (unlikely(ret != 0))
305 		goto out_unref;
306 
307 	vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops,
308 	    size, nprot, 0, curthread->td_ucred);
309 	if (vm_obj == NULL) {
310 		ret = EINVAL;
311 		goto out_unref;
312 	}
313 	/*
314 	 * Note: We're transferring the bo reference to vm_obj->handle here.
315 	 */
316 	*offset = 0;
317 	*obj_res = vm_obj;
318 	return 0;
319 out_unref:
320 	ttm_bo_unref(&bo);
321 	return ret;
322 }
323 
324 #if 0
325 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
326 {
327 	if (vma->vm_pgoff != 0)
328 		return -EACCES;
329 
330 	vma->vm_ops = &ttm_bo_vm_ops;
331 	vma->vm_private_data = ttm_bo_reference(bo);
332 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
333 	return 0;
334 }
335 
336 ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
337 		  const char __user *wbuf, char __user *rbuf, size_t count,
338 		  loff_t *f_pos, bool write)
339 {
340 	struct ttm_buffer_object *bo;
341 	struct ttm_bo_driver *driver;
342 	struct ttm_bo_kmap_obj map;
343 	unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
344 	unsigned long kmap_offset;
345 	unsigned long kmap_end;
346 	unsigned long kmap_num;
347 	size_t io_size;
348 	unsigned int page_offset;
349 	char *virtual;
350 	int ret;
351 	bool no_wait = false;
352 	bool dummy;
353 
354 	read_lock(&bdev->vm_lock);
355 	bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
356 	if (likely(bo != NULL))
357 		ttm_bo_reference(bo);
358 	read_unlock(&bdev->vm_lock);
359 
360 	if (unlikely(bo == NULL))
361 		return -EFAULT;
362 
363 	driver = bo->bdev->driver;
364 	if (unlikely(!driver->verify_access)) {
365 		ret = -EPERM;
366 		goto out_unref;
367 	}
368 
369 	ret = driver->verify_access(bo, filp);
370 	if (unlikely(ret != 0))
371 		goto out_unref;
372 
373 	kmap_offset = dev_offset - bo->vm_node->start;
374 	if (unlikely(kmap_offset >= bo->num_pages)) {
375 		ret = -EFBIG;
376 		goto out_unref;
377 	}
378 
379 	page_offset = *f_pos & ~PAGE_MASK;
380 	io_size = bo->num_pages - kmap_offset;
381 	io_size = (io_size << PAGE_SHIFT) - page_offset;
382 	if (count < io_size)
383 		io_size = count;
384 
385 	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
386 	kmap_num = kmap_end - kmap_offset + 1;
387 
388 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
389 
390 	switch (ret) {
391 	case 0:
392 		break;
393 	case -EBUSY:
394 		ret = -EAGAIN;
395 		goto out_unref;
396 	default:
397 		goto out_unref;
398 	}
399 
400 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
401 	if (unlikely(ret != 0)) {
402 		ttm_bo_unreserve(bo);
403 		goto out_unref;
404 	}
405 
406 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
407 	virtual += page_offset;
408 
409 	if (write)
410 		ret = copy_from_user(virtual, wbuf, io_size);
411 	else
412 		ret = copy_to_user(rbuf, virtual, io_size);
413 
414 	ttm_bo_kunmap(&map);
415 	ttm_bo_unreserve(bo);
416 	ttm_bo_unref(&bo);
417 
418 	if (unlikely(ret != 0))
419 		return -EFBIG;
420 
421 	*f_pos += io_size;
422 
423 	return io_size;
424 out_unref:
425 	ttm_bo_unref(&bo);
426 	return ret;
427 }
428 
429 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
430 			char __user *rbuf, size_t count, loff_t *f_pos,
431 			bool write)
432 {
433 	struct ttm_bo_kmap_obj map;
434 	unsigned long kmap_offset;
435 	unsigned long kmap_end;
436 	unsigned long kmap_num;
437 	size_t io_size;
438 	unsigned int page_offset;
439 	char *virtual;
440 	int ret;
441 	bool no_wait = false;
442 	bool dummy;
443 
444 	kmap_offset = (*f_pos >> PAGE_SHIFT);
445 	if (unlikely(kmap_offset >= bo->num_pages))
446 		return -EFBIG;
447 
448 	page_offset = *f_pos & ~PAGE_MASK;
449 	io_size = bo->num_pages - kmap_offset;
450 	io_size = (io_size << PAGE_SHIFT) - page_offset;
451 	if (count < io_size)
452 		io_size = count;
453 
454 	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
455 	kmap_num = kmap_end - kmap_offset + 1;
456 
457 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
458 
459 	switch (ret) {
460 	case 0:
461 		break;
462 	case -EBUSY:
463 		return -EAGAIN;
464 	default:
465 		return ret;
466 	}
467 
468 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
469 	if (unlikely(ret != 0)) {
470 		ttm_bo_unreserve(bo);
471 		return ret;
472 	}
473 
474 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
475 	virtual += page_offset;
476 
477 	if (write)
478 		ret = copy_from_user(virtual, wbuf, io_size);
479 	else
480 		ret = copy_to_user(rbuf, virtual, io_size);
481 
482 	ttm_bo_kunmap(&map);
483 	ttm_bo_unreserve(bo);
484 	ttm_bo_unref(&bo);
485 
486 	if (unlikely(ret != 0))
487 		return ret;
488 
489 	*f_pos += io_size;
490 
491 	return io_size;
492 }
493 #endif
494