xref: /freebsd/sys/dev/drm2/ttm/ttm_bo_vm.c (revision ab0b9f6b3073e6c4d1dfbf07444d7db67a189a96)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 /*
31  * Copyright (c) 2013 The FreeBSD Foundation
32  * All rights reserved.
33  *
34  * Portions of this software were developed by Konstantin Belousov
35  * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include "opt_vm.h"
42 
43 #include <dev/drm2/drmP.h>
44 #include <dev/drm2/ttm/ttm_module.h>
45 #include <dev/drm2/ttm/ttm_bo_driver.h>
46 #include <dev/drm2/ttm/ttm_placement.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_pageout.h>
51 
52 #define TTM_BO_VM_NUM_PREFAULT 16
53 
54 RB_GENERATE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
55     ttm_bo_cmp_rb_tree_items);
56 
57 int
58 ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
59     struct ttm_buffer_object *b)
60 {
61 
62 	if (a->vm_node->start < b->vm_node->start) {
63 		return (-1);
64 	} else if (a->vm_node->start > b->vm_node->start) {
65 		return (1);
66 	} else {
67 		return (0);
68 	}
69 }
70 
71 static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
72 						     unsigned long page_start,
73 						     unsigned long num_pages)
74 {
75 	unsigned long cur_offset;
76 	struct ttm_buffer_object *bo;
77 	struct ttm_buffer_object *best_bo = NULL;
78 
79 	bo = RB_ROOT(&bdev->addr_space_rb);
80 	while (bo != NULL) {
81 		cur_offset = bo->vm_node->start;
82 		if (page_start >= cur_offset) {
83 			best_bo = bo;
84 			if (page_start == cur_offset)
85 				break;
86 			bo = RB_RIGHT(bo, vm_rb);
87 		} else
88 			bo = RB_LEFT(bo, vm_rb);
89 	}
90 
91 	if (unlikely(best_bo == NULL))
92 		return NULL;
93 
94 	if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
95 		     (page_start + num_pages)))
96 		return NULL;
97 
98 	return best_bo;
99 }
100 
101 static int
102 ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
103     int prot, vm_page_t *mres)
104 {
105 
106 	struct ttm_buffer_object *bo = vm_obj->handle;
107 	struct ttm_bo_device *bdev = bo->bdev;
108 	struct ttm_tt *ttm = NULL;
109 	vm_page_t m, m1, oldm;
110 	int ret;
111 	int retval = VM_PAGER_OK;
112 	struct ttm_mem_type_manager *man =
113 		&bdev->man[bo->mem.mem_type];
114 
115 	vm_object_pip_add(vm_obj, 1);
116 	oldm = *mres;
117 	if (oldm != NULL) {
118 		vm_page_lock(oldm);
119 		vm_page_remove(oldm);
120 		vm_page_unlock(oldm);
121 		*mres = NULL;
122 	} else
123 		oldm = NULL;
124 retry:
125 	VM_OBJECT_WUNLOCK(vm_obj);
126 	m = NULL;
127 
128 reserve:
129 	ret = ttm_bo_reserve(bo, false, false, false, 0);
130 	if (unlikely(ret != 0)) {
131 		if (ret == -EBUSY) {
132 			kern_yield(0);
133 			goto reserve;
134 		}
135 	}
136 
137 	if (bdev->driver->fault_reserve_notify) {
138 		ret = bdev->driver->fault_reserve_notify(bo);
139 		switch (ret) {
140 		case 0:
141 			break;
142 		case -EBUSY:
143 		case -ERESTART:
144 		case -EINTR:
145 			kern_yield(0);
146 			goto reserve;
147 		default:
148 			retval = VM_PAGER_ERROR;
149 			goto out_unlock;
150 		}
151 	}
152 
153 	/*
154 	 * Wait for buffer data in transit, due to a pipelined
155 	 * move.
156 	 */
157 
158 	mtx_lock(&bdev->fence_lock);
159 	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
160 		/*
161 		 * Here, the behavior differs between Linux and FreeBSD.
162 		 *
163 		 * On Linux, the wait is interruptible (3rd argument to
164 		 * ttm_bo_wait). There must be some mechanism to resume
165 		 * page fault handling, once the signal is processed.
166 		 *
167 		 * On FreeBSD, the wait is uninteruptible. This is not a
168 		 * problem as we can't end up with an unkillable process
169 		 * here, because the wait will eventually time out.
170 		 *
171 		 * An example of this situation is the Xorg process
172 		 * which uses SIGALRM internally. The signal could
173 		 * interrupt the wait, causing the page fault to fail
174 		 * and the process to receive SIGSEGV.
175 		 */
176 		ret = ttm_bo_wait(bo, false, false, false);
177 		mtx_unlock(&bdev->fence_lock);
178 		if (unlikely(ret != 0)) {
179 			retval = VM_PAGER_ERROR;
180 			goto out_unlock;
181 		}
182 	} else
183 		mtx_unlock(&bdev->fence_lock);
184 
185 	ret = ttm_mem_io_lock(man, true);
186 	if (unlikely(ret != 0)) {
187 		retval = VM_PAGER_ERROR;
188 		goto out_unlock;
189 	}
190 	ret = ttm_mem_io_reserve_vm(bo);
191 	if (unlikely(ret != 0)) {
192 		retval = VM_PAGER_ERROR;
193 		goto out_io_unlock;
194 	}
195 
196 	/*
197 	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
198 	 * since the mmap_sem is only held in read mode. However, we
199 	 * modify only the caching bits of vma->vm_page_prot and
200 	 * consider those bits protected by
201 	 * the bo->mutex, as we should be the only writers.
202 	 * There shouldn't really be any readers of these bits except
203 	 * within vm_insert_mixed()? fork?
204 	 *
205 	 * TODO: Add a list of vmas to the bo, and change the
206 	 * vma->vm_page_prot when the object changes caching policy, with
207 	 * the correct locks held.
208 	 */
209 	if (!bo->mem.bus.is_iomem) {
210 		/* Allocate all page at once, most common usage */
211 		ttm = bo->ttm;
212 		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
213 			retval = VM_PAGER_ERROR;
214 			goto out_io_unlock;
215 		}
216 	}
217 
218 	if (bo->mem.bus.is_iomem) {
219 		m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
220 		    bo->mem.bus.offset + offset);
221 		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
222 	} else {
223 		ttm = bo->ttm;
224 		m = ttm->pages[OFF_TO_IDX(offset)];
225 		if (unlikely(!m)) {
226 			retval = VM_PAGER_ERROR;
227 			goto out_io_unlock;
228 		}
229 		pmap_page_set_memattr(m,
230 		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
231 		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
232 	}
233 
234 	VM_OBJECT_WLOCK(vm_obj);
235 	if (vm_page_busied(m)) {
236 		vm_page_lock(m);
237 		VM_OBJECT_WUNLOCK(vm_obj);
238 		vm_page_busy_sleep(m, "ttmpbs");
239 		VM_OBJECT_WLOCK(vm_obj);
240 		ttm_mem_io_unlock(man);
241 		ttm_bo_unreserve(bo);
242 		goto retry;
243 	}
244 	m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
245 	if (m1 == NULL) {
246 		if (vm_page_insert(m, vm_obj, OFF_TO_IDX(offset))) {
247 			VM_OBJECT_WUNLOCK(vm_obj);
248 			VM_WAIT;
249 			VM_OBJECT_WLOCK(vm_obj);
250 			ttm_mem_io_unlock(man);
251 			ttm_bo_unreserve(bo);
252 			goto retry;
253 		}
254 	} else {
255 		KASSERT(m == m1,
256 		    ("inconsistent insert bo %p m %p m1 %p offset %jx",
257 		    bo, m, m1, (uintmax_t)offset));
258 	}
259 	m->valid = VM_PAGE_BITS_ALL;
260 	*mres = m;
261 	vm_page_xbusy(m);
262 
263 	if (oldm != NULL) {
264 		vm_page_lock(oldm);
265 		vm_page_free(oldm);
266 		vm_page_unlock(oldm);
267 	}
268 
269 out_io_unlock1:
270 	ttm_mem_io_unlock(man);
271 out_unlock1:
272 	ttm_bo_unreserve(bo);
273 	vm_object_pip_wakeup(vm_obj);
274 	return (retval);
275 
276 out_io_unlock:
277 	VM_OBJECT_WLOCK(vm_obj);
278 	goto out_io_unlock1;
279 
280 out_unlock:
281 	VM_OBJECT_WLOCK(vm_obj);
282 	goto out_unlock1;
283 }
284 
285 static int
286 ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
287     vm_ooffset_t foff, struct ucred *cred, u_short *color)
288 {
289 
290 	/*
291 	 * On Linux, a reference to the buffer object is acquired here.
292 	 * The reason is that this function is not called when the
293 	 * mmap() is initialized, but only when a process forks for
294 	 * instance. Therefore on Linux, the reference on the bo is
295 	 * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's
296 	 * then released in ttm_bo_vm_close().
297 	 *
298 	 * Here, this function is called during mmap() intialization.
299 	 * Thus, the reference acquired in ttm_bo_mmap_single() is
300 	 * sufficient.
301 	 */
302 
303 	*color = 0;
304 	return (0);
305 }
306 
307 static void
308 ttm_bo_vm_dtor(void *handle)
309 {
310 	struct ttm_buffer_object *bo = handle;
311 
312 	ttm_bo_unref(&bo);
313 }
314 
315 static struct cdev_pager_ops ttm_pager_ops = {
316 	.cdev_pg_fault = ttm_bo_vm_fault,
317 	.cdev_pg_ctor = ttm_bo_vm_ctor,
318 	.cdev_pg_dtor = ttm_bo_vm_dtor
319 };
320 
321 int
322 ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size,
323     struct vm_object **obj_res, int nprot)
324 {
325 	struct ttm_bo_driver *driver;
326 	struct ttm_buffer_object *bo;
327 	struct vm_object *vm_obj;
328 	int ret;
329 
330 	rw_wlock(&bdev->vm_lock);
331 	bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size));
332 	if (likely(bo != NULL))
333 		refcount_acquire(&bo->kref);
334 	rw_wunlock(&bdev->vm_lock);
335 
336 	if (unlikely(bo == NULL)) {
337 		printf("[TTM] Could not find buffer object to map\n");
338 		return (EINVAL);
339 	}
340 
341 	driver = bo->bdev->driver;
342 	if (unlikely(!driver->verify_access)) {
343 		ret = EPERM;
344 		goto out_unref;
345 	}
346 	ret = -driver->verify_access(bo);
347 	if (unlikely(ret != 0))
348 		goto out_unref;
349 
350 	vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops,
351 	    size, nprot, 0, curthread->td_ucred);
352 	if (vm_obj == NULL) {
353 		ret = EINVAL;
354 		goto out_unref;
355 	}
356 	/*
357 	 * Note: We're transferring the bo reference to vm_obj->handle here.
358 	 */
359 	*offset = 0;
360 	*obj_res = vm_obj;
361 	return 0;
362 out_unref:
363 	ttm_bo_unref(&bo);
364 	return ret;
365 }
366 
367 void
368 ttm_bo_release_mmap(struct ttm_buffer_object *bo)
369 {
370 	vm_object_t vm_obj;
371 	vm_page_t m;
372 	int i;
373 
374 	vm_obj = cdev_pager_lookup(bo);
375 	if (vm_obj == NULL)
376 		return;
377 
378 	VM_OBJECT_WLOCK(vm_obj);
379 retry:
380 	for (i = 0; i < bo->num_pages; i++) {
381 		m = vm_page_lookup(vm_obj, i);
382 		if (m == NULL)
383 			continue;
384 		if (vm_page_sleep_if_busy(m, "ttm_unm"))
385 			goto retry;
386 		cdev_pager_free_page(vm_obj, m);
387 	}
388 	VM_OBJECT_WUNLOCK(vm_obj);
389 
390 	vm_object_deallocate(vm_obj);
391 }
392 
393 #if 0
394 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
395 {
396 	if (vma->vm_pgoff != 0)
397 		return -EACCES;
398 
399 	vma->vm_ops = &ttm_bo_vm_ops;
400 	vma->vm_private_data = ttm_bo_reference(bo);
401 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
402 	return 0;
403 }
404 
405 ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
406 		  const char __user *wbuf, char __user *rbuf, size_t count,
407 		  loff_t *f_pos, bool write)
408 {
409 	struct ttm_buffer_object *bo;
410 	struct ttm_bo_driver *driver;
411 	struct ttm_bo_kmap_obj map;
412 	unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
413 	unsigned long kmap_offset;
414 	unsigned long kmap_end;
415 	unsigned long kmap_num;
416 	size_t io_size;
417 	unsigned int page_offset;
418 	char *virtual;
419 	int ret;
420 	bool no_wait = false;
421 	bool dummy;
422 
423 	read_lock(&bdev->vm_lock);
424 	bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
425 	if (likely(bo != NULL))
426 		ttm_bo_reference(bo);
427 	read_unlock(&bdev->vm_lock);
428 
429 	if (unlikely(bo == NULL))
430 		return -EFAULT;
431 
432 	driver = bo->bdev->driver;
433 	if (unlikely(!driver->verify_access)) {
434 		ret = -EPERM;
435 		goto out_unref;
436 	}
437 
438 	ret = driver->verify_access(bo, filp);
439 	if (unlikely(ret != 0))
440 		goto out_unref;
441 
442 	kmap_offset = dev_offset - bo->vm_node->start;
443 	if (unlikely(kmap_offset >= bo->num_pages)) {
444 		ret = -EFBIG;
445 		goto out_unref;
446 	}
447 
448 	page_offset = *f_pos & ~PAGE_MASK;
449 	io_size = bo->num_pages - kmap_offset;
450 	io_size = (io_size << PAGE_SHIFT) - page_offset;
451 	if (count < io_size)
452 		io_size = count;
453 
454 	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
455 	kmap_num = kmap_end - kmap_offset + 1;
456 
457 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
458 
459 	switch (ret) {
460 	case 0:
461 		break;
462 	case -EBUSY:
463 		ret = -EAGAIN;
464 		goto out_unref;
465 	default:
466 		goto out_unref;
467 	}
468 
469 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
470 	if (unlikely(ret != 0)) {
471 		ttm_bo_unreserve(bo);
472 		goto out_unref;
473 	}
474 
475 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
476 	virtual += page_offset;
477 
478 	if (write)
479 		ret = copy_from_user(virtual, wbuf, io_size);
480 	else
481 		ret = copy_to_user(rbuf, virtual, io_size);
482 
483 	ttm_bo_kunmap(&map);
484 	ttm_bo_unreserve(bo);
485 	ttm_bo_unref(&bo);
486 
487 	if (unlikely(ret != 0))
488 		return -EFBIG;
489 
490 	*f_pos += io_size;
491 
492 	return io_size;
493 out_unref:
494 	ttm_bo_unref(&bo);
495 	return ret;
496 }
497 
498 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
499 			char __user *rbuf, size_t count, loff_t *f_pos,
500 			bool write)
501 {
502 	struct ttm_bo_kmap_obj map;
503 	unsigned long kmap_offset;
504 	unsigned long kmap_end;
505 	unsigned long kmap_num;
506 	size_t io_size;
507 	unsigned int page_offset;
508 	char *virtual;
509 	int ret;
510 	bool no_wait = false;
511 	bool dummy;
512 
513 	kmap_offset = (*f_pos >> PAGE_SHIFT);
514 	if (unlikely(kmap_offset >= bo->num_pages))
515 		return -EFBIG;
516 
517 	page_offset = *f_pos & ~PAGE_MASK;
518 	io_size = bo->num_pages - kmap_offset;
519 	io_size = (io_size << PAGE_SHIFT) - page_offset;
520 	if (count < io_size)
521 		io_size = count;
522 
523 	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
524 	kmap_num = kmap_end - kmap_offset + 1;
525 
526 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
527 
528 	switch (ret) {
529 	case 0:
530 		break;
531 	case -EBUSY:
532 		return -EAGAIN;
533 	default:
534 		return ret;
535 	}
536 
537 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
538 	if (unlikely(ret != 0)) {
539 		ttm_bo_unreserve(bo);
540 		return ret;
541 	}
542 
543 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
544 	virtual += page_offset;
545 
546 	if (write)
547 		ret = copy_from_user(virtual, wbuf, io_size);
548 	else
549 		ret = copy_to_user(rbuf, virtual, io_size);
550 
551 	ttm_bo_kunmap(&map);
552 	ttm_bo_unreserve(bo);
553 	ttm_bo_unref(&bo);
554 
555 	if (unlikely(ret != 0))
556 		return ret;
557 
558 	*f_pos += io_size;
559 
560 	return io_size;
561 }
562 #endif
563